[x265] [PATCH 067 of 307] [x265-avx512]x86: AVX512 pixel_sad_x4_32xN for high bit depth
mythreyi at multicorewareinc.com
mythreyi at multicorewareinc.com
Sat Apr 7 04:31:05 CEST 2018
# HG changeset patch
# User Gopi Satykrishna Akisetty <gopi.satykrishna at multicorewareinc.com>
# Date 1501837071 -19800
# Fri Aug 04 14:27:51 2017 +0530
# Node ID c3a2abd8e46f8db3ba7c276f39fe41ed002ce295
# Parent 241f318be574498b7bb77939937a907e4721dc32
[x265-avx512]x86: AVX512 pixel_sad_x4_32xN for high bit depth
Size | AVX2 performance | AVX512 performance
------------------------------------------------
32x8 | 16.73x | 25.16x
32x16 | 18.36x | 29.04x
32x24 | 19.52x | 31.03x
32x32 | 18.78x | 31.95x
32x64 | 19.01x | 34.20x
diff -r 241f318be574 -r c3a2abd8e46f source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu Aug 03 18:30:51 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp Fri Aug 04 14:27:51 2017 +0530
@@ -2319,6 +2319,12 @@
p.pu[LUMA_32x32].sad_x3 = PFX(pixel_sad_x3_32x32_avx512);
p.pu[LUMA_32x64].sad_x3 = PFX(pixel_sad_x3_32x64_avx512);
+ p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx512);
+ p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx512);
+ p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx512);
+ p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx512);
+ p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx512);
+
}
}
#else // if HIGH_BIT_DEPTH
diff -r 241f318be574 -r c3a2abd8e46f source/common/x86/sad16-a.asm
--- a/source/common/x86/sad16-a.asm Thu Aug 03 18:30:51 2017 +0530
+++ b/source/common/x86/sad16-a.asm Fri Aug 04 14:27:51 2017 +0530
@@ -2501,6 +2501,160 @@
; SAD x3/x4 avx512 code start
;============================
+%macro PROCESS_SAD_X4_32x4_AVX512 0
+ movu m8, [r0]
+ movu m4, [r1]
+ movu m5, [r2]
+ movu m6, [r3]
+ movu m7, [r4]
+
+
+ psubw m4, m8
+ psubw m5, m8
+ psubw m6, m8
+ psubw m7, m8
+ pabsw m4, m4
+ pabsw m5, m5
+ pabsw m6, m6
+ pabsw m7, m7
+
+ pmaddwd m4, m9
+ paddd m0, m4
+ pmaddwd m5, m9
+ paddd m1, m5
+ pmaddwd m6, m9
+ paddd m2, m6
+ pmaddwd m7, m9
+ paddd m3, m7
+
+
+ movu m8, [r0 + 2 * FENC_STRIDE]
+ movu m4, [r1 + r5]
+ movu m5, [r2 + r5]
+ movu m6, [r3 + r5]
+ movu m7, [r4 + r5]
+
+
+ psubw m4, m8
+ psubw m5, m8
+ psubw m6, m8
+ psubw m7, m8
+ pabsw m4, m4
+ pabsw m5, m5
+ pabsw m6, m6
+ pabsw m7, m7
+
+ pmaddwd m4, m9
+ paddd m0, m4
+ pmaddwd m5, m9
+ paddd m1, m5
+ pmaddwd m6, m9
+ paddd m2, m6
+ pmaddwd m7, m9
+ paddd m3, m7
+
+ movu m8, [r0 + 4 * FENC_STRIDE]
+ movu m4, [r1 + 2 * r5]
+ movu m5, [r2 + 2 * r5]
+ movu m6, [r3 + 2 * r5]
+ movu m7, [r4 + 2 * r5]
+
+
+ psubw m4, m8
+ psubw m5, m8
+ psubw m6, m8
+ psubw m7, m8
+ pabsw m4, m4
+ pabsw m5, m5
+ pabsw m6, m6
+ pabsw m7, m7
+
+ pmaddwd m4, m9
+ paddd m0, m4
+ pmaddwd m5, m9
+ paddd m1, m5
+ pmaddwd m6, m9
+ paddd m2, m6
+ pmaddwd m7, m9
+ paddd m3, m7
+
+ movu m8, [r0 + 6 * FENC_STRIDE]
+ movu m4, [r1 + r7]
+ movu m5, [r2 + r7]
+ movu m6, [r3 + r7]
+ movu m7, [r4 + r7]
+
+
+ psubw m4, m8
+ psubw m5, m8
+ psubw m6, m8
+ psubw m7, m8
+ pabsw m4, m4
+ pabsw m5, m5
+ pabsw m6, m6
+ pabsw m7, m7
+
+ pmaddwd m4, m9
+ paddd m0, m4
+ pmaddwd m5, m9
+ paddd m1, m5
+ pmaddwd m6, m9
+ paddd m2, m6
+ pmaddwd m7, m9
+ paddd m3, m7
+%endmacro
+
+
+%macro PROCESS_SAD_X4_END_AVX512 0
+ vextracti32x8 ym4, m0, 1
+ vextracti32x8 ym5, m1, 1
+ vextracti32x8 ym6, m2, 1
+ vextracti32x8 ym7, m3, 1
+
+ paddd ym0, ym4
+ paddd ym1, ym5
+ paddd ym2, ym6
+ paddd ym3, ym7
+
+ vextracti64x2 xm4, m0, 1
+ vextracti64x2 xm5, m1, 1
+ vextracti64x2 xm6, m2, 1
+ vextracti64x2 xm7, m3, 1
+
+ paddd xm0, xm4
+ paddd xm1, xm5
+ paddd xm2, xm6
+ paddd xm3, xm7
+
+ pshufd xm4, xm0, 00001110b
+ pshufd xm5, xm1, 00001110b
+ pshufd xm6, xm2, 00001110b
+ pshufd xm7, xm3, 00001110b
+
+ paddd xm0, xm4
+ paddd xm1, xm5
+ paddd xm2, xm6
+ paddd xm3, xm7
+
+ pshufd xm4, xm0, 00000001b
+ pshufd xm5, xm1, 00000001b
+ pshufd xm6, xm2, 00000001b
+ pshufd xm7, xm3, 00000001b
+
+ paddd xm0, xm4
+ paddd xm1, xm5
+ paddd xm2, xm6
+ paddd xm3, xm7
+
+ mov r0, r6mp
+ movd [r0 + 0], xm0
+ movd [r0 + 4], xm1
+ movd [r0 + 8], xm2
+ movd [r0 + 12], xm3
+%endmacro
+
+
+
%macro PROCESS_SAD_X3_32x4_AVX512 0
movu m6, [r0]
movu m3, [r1]
@@ -2856,6 +3010,278 @@
PROCESS_SAD_X3_END_AVX512
RET
+
+;------------------------------------------------------------------------------------------------------------------------------------------------------------
+; void pixel_sad_x4_32x%1( const pixel* pix1, const pixel* pix2, const pixel* pix3, const pixel* pix4, const pixel* pix5, intptr_t frefstride, int32_t* res )
+;------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x8, 6,8,10
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ pxor m3, m3
+
+ vbroadcasti32x8 m9, [pw_1]
+
+ add r5d, r5d
+ lea r7d, [r5 * 3]
+
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ PROCESS_SAD_X4_END_AVX512
+ RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x16, 6,8,10
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ pxor m3, m3
+
+ vbroadcasti32x8 m9, [pw_1]
+
+ add r5d, r5d
+ lea r7d, [r5 * 3]
+
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ PROCESS_SAD_X4_END_AVX512
+ RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x24, 6,8,10
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ pxor m3, m3
+
+ vbroadcasti32x8 m9, [pw_1]
+
+ add r5d, r5d
+ lea r7d, [r5 * 3]
+
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ PROCESS_SAD_X4_END_AVX512
+ RET
+
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x32, 6,8,10
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ pxor m3, m3
+
+ vbroadcasti32x8 m9, [pw_1]
+
+ add r5d, r5d
+ lea r7d, [r5 * 3]
+
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ PROCESS_SAD_X4_END_AVX512
+ RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x64, 6,8,10
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ pxor m3, m3
+
+ vbroadcasti32x8 m9, [pw_1]
+
+ add r5d, r5d
+ lea r7d, [r5 * 3]
+
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ add r0, FENC_STRIDE * 8
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ PROCESS_SAD_X4_32x4_AVX512
+ PROCESS_SAD_X4_END_AVX512
+ RET
+
;============================
; SAD x3/x4 avx512 code start
;============================
More information about the x265-devel
mailing list