[x265] [PATCH] asm: avx2 asm code for 8bpp and 16bpp vesion of scale1D_128to64 module
murugan at multicorewareinc.com
murugan at multicorewareinc.com
Tue Oct 21 12:54:23 CEST 2014
# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1413888229 -19800
# Tue Oct 21 16:13:49 2014 +0530
# Node ID b1a86d6e9dc3fab7996a5bc2d653513d1120530c
# Parent e66f78a6df4f7fee71437b6645574b66024574f4
asm: avx2 asm code for 8bpp and 16bpp vesion of scale1D_128to64 module
diff -r e66f78a6df4f -r b1a86d6e9dc3 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue Oct 21 11:51:19 2014 +0530
+++ b/source/common/x86/asm-primitives.cpp Tue Oct 21 16:13:49 2014 +0530
@@ -1444,6 +1444,7 @@
p.quant = x265_quant_avx2;
p.nquant = x265_nquant_avx2;
p.dequant_normal = x265_dequant_normal_avx2;
+ p.scale1D_128to64 = x265_scale1D_128to64_avx2;
#if X86_64
p.dct[DCT_8x8] = x265_dct8_avx2;
p.dct[DCT_16x16] = x265_dct16_avx2;
@@ -1788,6 +1789,7 @@
p.chroma[X265_CSP_I422].copy_ss[CHROMA422_16x24] = x265_blockcopy_ss_16x24_avx;
p.chroma[X265_CSP_I422].copy_ss[CHROMA422_16x32] = x265_blockcopy_ss_16x32_avx;
p.chroma[X265_CSP_I422].copy_ss[CHROMA422_16x64] = x265_blockcopy_ss_16x64_avx;
+ p.scale1D_128to64 = x265_scale1D_128to64_avx2;
#if X86_64
p.dct[DCT_8x8] = x265_dct8_avx2;
diff -r e66f78a6df4f -r b1a86d6e9dc3 source/common/x86/pixel-util.h
--- a/source/common/x86/pixel-util.h Tue Oct 21 11:51:19 2014 +0530
+++ b/source/common/x86/pixel-util.h Tue Oct 21 16:13:49 2014 +0530
@@ -70,6 +70,7 @@
float x265_pixel_ssim_end4_avx(int sum0[5][4], int sum1[5][4], int width);
void x265_scale1D_128to64_ssse3(pixel *, pixel *, intptr_t);
+void x265_scale1D_128to64_avx2(pixel *, pixel *, intptr_t);
void x265_scale2D_64to32_ssse3(pixel *, pixel *, intptr_t);
#define SETUP_CHROMA_PIXELSUB_PS_FUNC(W, H, cpu) \
diff -r e66f78a6df4f -r b1a86d6e9dc3 source/common/x86/pixel-util8.asm
--- a/source/common/x86/pixel-util8.asm Tue Oct 21 11:51:19 2014 +0530
+++ b/source/common/x86/pixel-util8.asm Tue Oct 21 16:13:49 2014 +0530
@@ -42,7 +42,7 @@
mask_ff: times 16 db 0xff
times 16 db 0
deinterleave_shuf: db 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15
-deinterleave_word_shuf: db 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 15, 15
+deinterleave_word_shuf: db 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15
hmul_16p: times 16 db 1
times 8 db 1, -1
hmulw_16p: times 8 dw 1
@@ -53,6 +53,7 @@
SECTION .text
cextern pw_1
+cextern pb_1
cextern pw_00ff
cextern pw_2000
cextern pw_pixel_max
@@ -3439,6 +3440,69 @@
%endif
RET
+%if HIGH_BIT_DEPTH == 1
+INIT_YMM avx2
+cglobal scale1D_128to64, 2, 2, 4
+ pxor m2, m2
+ vbroadcasti128 m3, [deinterleave_word_shuf]
+
+ movu m0, [r1]
+ movu m1, [r1 + 32]
+ phaddw m0, m1
+ pavgw m0, m2
+ vpermq m0, m0, 0xD8
+ movu [r0], m0
+
+ movu m0, [r1 + 64]
+ movu m1, [r1 + 96]
+ phaddw m0, m1
+ pavgw m0, m2
+ vpermq m0, m0, 0xD8
+ movu [r0 + 32], m0
+
+ movu m0, [r1 + 128]
+ movu m1, [r1 + 160]
+ phaddw m0, m1
+ pavgw m0, m2
+ vpermq m0, m0, 0xD8
+ movu [r0 + 64], m0
+
+ movu m0, [r1 + 192]
+ movu m1, [r1 + 224]
+ phaddw m0, m1
+ pavgw m0, m2
+ vpermq m0, m0, 0xD8
+ movu [r0 + 96], m0
+ RET
+%else ; HIGH_BIT_DEPTH == 0
+INIT_YMM avx2
+cglobal scale1D_128to64, 2, 2, 5
+ pxor m2, m2
+ vbroadcasti128 m3, [deinterleave_shuf]
+ mova m4, [pb_1]
+
+ movu m0, [r1]
+ pmaddubsw m0, m0, m4
+ pavgw m0, m2
+ movu m1, [r1 + 32]
+ pmaddubsw m1, m1, m4
+ pavgw m1, m2
+ packuswb m0, m1
+ vpermq m0, m0, 0xD8
+ movu [r0], m0
+
+ movu m0, [r1 + 64]
+ pmaddubsw m0, m0, m4
+ pavgw m0, m2
+ movu m1, [r1 + 96]
+ pmaddubsw m1, m1, m4
+ pavgw m1, m2
+ packuswb m0, m1
+ vpermq m0, m0, 0xD8
+ movu [r0 + 32], m0
+ RET
+%endif
+
;-----------------------------------------------------------------
; void scale2D_64to32(pixel *dst, pixel *src, intptr_t stride)
;-----------------------------------------------------------------
More information about the x265-devel
mailing list