[x265] [PATCH] asm: avx2 code for sad[32x16], [32x24], [32x32] for 10 bpp
sumalatha at multicorewareinc.com
sumalatha at multicorewareinc.com
Mon May 11 08:08:02 CEST 2015
# HG changeset patch
# User Sumalatha Polureddy
# Date 1431324476 -19800
# Mon May 11 11:37:56 2015 +0530
# Branch stable
# Node ID 86c5d0e638099e0fe2a23af5ff8edf827da8d609
# Parent e66d0c67572d0eb1bb7f6bfbe9a26c8c97ba8eab
asm: avx2 code for sad[32x16],[32x24],[32x32] for 10 bpp
sse2:
sad[32x16] 3.01x 1595.11 4794.98
sad[32x24] 2.98x 2362.68 7051.16
sad[32x32] 2.97x 3128.34 9278.31
avx2:
sad[32x16] 7.66x 603.61 4621.37
sad[32x24] 6.98x 1003.75 7006.95
sad[32x32] 7.05x 1340.97 9452.61
diff -r e66d0c67572d -r 86c5d0e63809 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon May 11 10:49:02 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Mon May 11 11:37:56 2015 +0530
@@ -1266,6 +1266,9 @@
p.pu[LUMA_16x32].sad = x265_pixel_sad_16x32_avx2;
p.pu[LUMA_16x64].sad = x265_pixel_sad_16x64_avx2;
p.pu[LUMA_32x8].sad = x265_pixel_sad_32x8_avx2;
+ p.pu[LUMA_32x16].sad = x265_pixel_sad_32x16_avx2;
+ p.pu[LUMA_32x24].sad = x265_pixel_sad_32x24_avx2;
+ p.pu[LUMA_32x32].sad = x265_pixel_sad_32x32_avx2;
p.pu[LUMA_16x4].convert_p2s = x265_filterPixelToShort_16x4_avx2;
p.pu[LUMA_16x8].convert_p2s = x265_filterPixelToShort_16x8_avx2;
diff -r e66d0c67572d -r 86c5d0e63809 source/common/x86/sad16-a.asm
--- a/source/common/x86/sad16-a.asm Mon May 11 10:49:02 2015 +0530
+++ b/source/common/x86/sad16-a.asm Mon May 11 11:37:56 2015 +0530
@@ -507,7 +507,204 @@
HADDW m0, m1
movd eax, xm0
RET
-
+
+INIT_YMM avx2
+cglobal pixel_sad_32x16, 4,7,5
+ pxor m0, m0
+ mov r4d, 16/8
+ add r3d, r3d
+ add r1d, r1d
+ lea r5, [r1 * 3]
+ lea r6, [r3 * 3]
+.loop:
+ movu m1, [r2]
+ movu m2, [r2 + 32]
+ movu m3, [r2 + r3]
+ movu m4, [r2 + r3 + 32]
+ psubw m1, [r0]
+ psubw m2, [r0 + 32]
+ psubw m3, [r0 + r1]
+ psubw m4, [r0 + r1 + 32]
+ pabsw m1, m1
+ pabsw m2, m2
+ pabsw m3, m3
+ pabsw m4, m4
+ paddw m1, m2
+ paddw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+
+ movu m1, [r2 + 2 * r3]
+ movu m2, [r2 + 2 * r3 + 32]
+ movu m3, [r2 + r6]
+ movu m4, [r2 + r6 + 32]
+ psubw m1, [r0 + 2 * r1]
+ psubw m2, [r0 + 2 * r1 + 32]
+ psubw m3, [r0 + r5]
+ psubw m4, [r0 + r5 + 32]
+ pabsw m1, m1
+ pabsw m2, m2
+ lea r0, [r0 + 4 * r1]
+ lea r2, [r2 + 4 * r3]
+ pabsw m3, m3
+ pabsw m4, m4
+ paddw m1, m2
+ paddw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+
+ movu m1, [r2]
+ movu m2, [r2 + 32]
+ movu m3, [r2 + r3]
+ movu m4, [r2 + r3 + 32]
+ psubw m1, [r0]
+ psubw m2, [r0 + 32]
+ psubw m3, [r0 + r1]
+ psubw m4, [r0 + r1 + 32]
+ pabsw m1, m1
+ pabsw m2, m2
+ pabsw m3, m3
+ pabsw m4, m4
+ paddw m1, m2
+ paddw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+
+ movu m1, [r2 + 2 * r3]
+ movu m2, [r2 + 2 * r3 + 32]
+ movu m3, [r2 + r6]
+ movu m4, [r2 + r6 + 32]
+ psubw m1, [r0 + 2 * r1]
+ psubw m2, [r0 + 2 * r1 + 32]
+ psubw m3, [r0 + r5]
+ psubw m4, [r0 + r5 + 32]
+ pabsw m1, m1
+ pabsw m2, m2
+ lea r0, [r0 + 4 * r1]
+ lea r2, [r2 + 4 * r3]
+ pabsw m3, m3
+ pabsw m4, m4
+ paddw m1, m2
+ paddw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+
+ dec r4d
+ jg .loop
+
+ HADDW m0, m1
+ movd eax, xm0
+ RET
+
+INIT_YMM avx2
+cglobal pixel_sad_32x24, 4,7,5
+ pxor m0, m0
+ mov r4d, 24/4
+ add r3d, r3d
+ add r1d, r1d
+ lea r5, [r1 * 3]
+ lea r6, [r3 * 3]
+.loop:
+ movu m1, [r2]
+ movu m2, [r2 + 32]
+ movu m3, [r2 + r3]
+ movu m4, [r2 + r3 + 32]
+ psubw m1, [r0]
+ psubw m2, [r0 + 32]
+ psubw m3, [r0 + r1]
+ psubw m4, [r0 + r1 + 32]
+ pabsw m1, m1
+ pabsw m2, m2
+ pabsw m3, m3
+ pabsw m4, m4
+ paddw m1, m2
+ paddw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+
+ movu m1, [r2 + 2 * r3]
+ movu m2, [r2 + 2 * r3 + 32]
+ movu m3, [r2 + r6]
+ movu m4, [r2 + r6 + 32]
+ psubw m1, [r0 + 2 * r1]
+ psubw m2, [r0 + 2 * r1 + 32]
+ psubw m3, [r0 + r5]
+ psubw m4, [r0 + r5 + 32]
+ pabsw m1, m1
+ pabsw m2, m2
+ pabsw m3, m3
+ pabsw m4, m4
+ paddw m1, m2
+ paddw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+
+ lea r0, [r0 + 4 * r1]
+ lea r2, [r2 + 4 * r3]
+
+ dec r4d
+ jg .loop
+
+ HADDUWD m0, m1
+ HADDD m0, m1
+ movd eax, xm0
+ RET
+
+
+INIT_YMM avx2
+cglobal pixel_sad_32x32, 4,7,5
+ pxor m0, m0
+ mov r4d, 32/4
+ add r3d, r3d
+ add r1d, r1d
+ lea r5, [r1 * 3]
+ lea r6, [r3 * 3]
+.loop:
+ movu m1, [r2]
+ movu m2, [r2 + 32]
+ movu m3, [r2 + r3]
+ movu m4, [r2 + r3 + 32]
+ psubw m1, [r0]
+ psubw m2, [r0 + 32]
+ psubw m3, [r0 + r1]
+ psubw m4, [r0 + r1 + 32]
+ pabsw m1, m1
+ pabsw m2, m2
+ pabsw m3, m3
+ pabsw m4, m4
+ paddw m1, m2
+ paddw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+
+ movu m1, [r2 + 2 * r3]
+ movu m2, [r2 + 2 * r3 + 32]
+ movu m3, [r2 + r6]
+ movu m4, [r2 + r6 + 32]
+ psubw m1, [r0 + 2 * r1]
+ psubw m2, [r0 + 2 * r1 + 32]
+ psubw m3, [r0 + r5]
+ psubw m4, [r0 + r5 + 32]
+ pabsw m1, m1
+ pabsw m2, m2
+ pabsw m3, m3
+ pabsw m4, m4
+ paddw m1, m2
+ paddw m3, m4
+ paddw m0, m1
+ paddw m0, m3
+
+ lea r0, [r0 + 4 * r1]
+ lea r2, [r2 + 4 * r3]
+
+ dec r4d
+ jg .loop
+
+ HADDUWD m0, m1
+ HADDD m0, m1
+ movd eax, xm0
+ RET
+
;------------------------------------------------------------------
; int pixel_sad_32xN( uint16_t *, intptr_t, uint16_t *, intptr_t )
;------------------------------------------------------------------
More information about the x265-devel
mailing list