[x265] [PATCH 029 of 307] x86: AVX512 pixel_sad_x4_48x64
mythreyi at multicorewareinc.com
mythreyi at multicorewareinc.com
Sat Apr 7 04:30:27 CEST 2018
# HG changeset patch
# User Vignesh Vijayakumar
# Date 1500263597 -19800
# Mon Jul 17 09:23:17 2017 +0530
# Node ID 576a93cba7d189fddba3466a21188f0ece3ed278
# Parent 229c13a0d7e4a1dafad7b0a2e9eef041ecccdb77
x86: AVX512 pixel_sad_x4_48x64
AVX2 performance : 59.49x
AVX512 performance: 62.29x
diff -r 229c13a0d7e4 -r 576a93cba7d1 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Jul 17 08:27:14 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp Mon Jul 17 09:23:17 2017 +0530
@@ -3756,6 +3756,7 @@
p.pu[LUMA_64x32].sad_x4 = PFX(pixel_sad_x4_64x32_avx512);
p.pu[LUMA_64x48].sad_x4 = PFX(pixel_sad_x4_64x48_avx512);
p.pu[LUMA_64x64].sad_x4 = PFX(pixel_sad_x4_64x64_avx512);
+ p.pu[LUMA_48x64].sad_x4 = PFX(pixel_sad_x4_48x64_avx512);
p.pu[LUMA_4x4].satd = PFX(pixel_satd_4x4_avx512);
p.pu[LUMA_4x8].satd = PFX(pixel_satd_4x8_avx512);
diff -r 229c13a0d7e4 -r 576a93cba7d1 source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm Mon Jul 17 08:27:14 2017 +0530
+++ b/source/common/x86/sad-a.asm Mon Jul 17 09:23:17 2017 +0530
@@ -4348,6 +4348,154 @@
paddd m3, m4
%endmacro
+%macro SAD_X4_48x8_AVX512 0
+ movu ym4, [r0]
+ vinserti32x8 m4, [r0 + FENC_STRIDE], 1
+ movu ym5, [r1]
+ vinserti32x8 m5, [r1 + r5], 1
+ movu ym6, [r2]
+ vinserti32x8 m6, [r2 + r5], 1
+ movu ym7, [r3]
+ vinserti32x8 m7, [r3 + r5], 1
+ movu ym8, [r4]
+ vinserti32x8 m8, [r4 + r5], 1
+
+ psadbw m9, m4, m5
+ paddd m0, m9
+ psadbw m5, m4, m6
+ paddd m1, m5
+ psadbw m6, m4, m7
+ paddd m2, m6
+ psadbw m4, m8
+ paddd m3, m4
+
+ movu ym4, [r0 + FENC_STRIDE * 2]
+ vinserti32x8 m4, [r0 + FENC_STRIDE * 3], 1
+ movu ym5, [r1 + r5 * 2]
+ vinserti32x8 m5, [r1 + r7], 1
+ movu ym6, [r2 + r5 * 2]
+ vinserti32x8 m6, [r2 + r7], 1
+ movu ym7, [r3 + r5 * 2]
+ vinserti32x8 m7, [r3 + r7], 1
+ movu ym8, [r4 + r5 * 2]
+ vinserti32x8 m8, [r4 + r7], 1
+
+ psadbw m9, m4, m5
+ paddd m0, m9
+ psadbw m5, m4, m6
+ paddd m1, m5
+ psadbw m6, m4, m7
+ paddd m2, m6
+ psadbw m4, m8
+ paddd m3, m4
+
+ movu xm4, [r0 + 32]
+ vinserti32x4 m4, [r0 + FENC_STRIDE + 32], 1
+ vinserti32x4 m4, [r0 + FENC_STRIDE * 2 + 32], 2
+ vinserti32x4 m4, [r0 + FENC_STRIDE * 3 + 32], 3
+ movu xm5, [r1 + 32]
+ vinserti32x4 m5, [r1 + r5 + 32], 1
+ vinserti32x4 m5, [r1 + r5 * 2 + 32], 2
+ vinserti32x4 m5, [r1 + r7 + 32], 3
+ movu xm6, [r2 + 32]
+ vinserti32x4 m6, [r2 + r5 + 32], 1
+ vinserti32x4 m6, [r2 + r5 * 2 + 32], 2
+ vinserti32x4 m6, [r2 + r7 + 32], 3
+ movu xm7, [r3 + 32]
+ vinserti32x4 m7, [r3 + r5 + 32], 1
+ vinserti32x4 m7, [r3 + r5 * 2 + 32], 2
+ vinserti32x4 m7, [r3 + r7 + 32], 3
+ movu xm8, [r4 + 32]
+ vinserti32x4 m8, [r4 + r5 + 32], 1
+ vinserti32x4 m8, [r4 + r5 * 2 + 32], 2
+ vinserti32x4 m8, [r4 + r7 + 32], 3
+
+ psadbw m9, m4, m5
+ paddd m0, m9
+ psadbw m5, m4, m6
+ paddd m1, m5
+ psadbw m6, m4, m7
+ paddd m2, m6
+ psadbw m4, m8
+ paddd m3, m4
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+
+ movu ym4, [r0]
+ vinserti32x8 m4, [r0 + FENC_STRIDE], 1
+ movu ym5, [r1]
+ vinserti32x8 m5, [r1 + r5], 1
+ movu ym6, [r2]
+ vinserti32x8 m6, [r2 + r5], 1
+ movu ym7, [r3]
+ vinserti32x8 m7, [r3 + r5], 1
+ movu ym8, [r4]
+ vinserti32x8 m8, [r4 + r5], 1
+
+ psadbw m9, m4, m5
+ paddd m0, m9
+ psadbw m5, m4, m6
+ paddd m1, m5
+ psadbw m6, m4, m7
+ paddd m2, m6
+ psadbw m4, m8
+ paddd m3, m4
+
+ movu ym4, [r0 + FENC_STRIDE * 2]
+ vinserti32x8 m4, [r0 + FENC_STRIDE * 3], 1
+ movu ym5, [r1 + r5 * 2]
+ vinserti32x8 m5, [r1 + r7], 1
+ movu ym6, [r2 + r5 * 2]
+ vinserti32x8 m6, [r2 + r7], 1
+ movu ym7, [r3 + r5 * 2]
+ vinserti32x8 m7, [r3 + r7], 1
+ movu ym8, [r4 + r5 * 2]
+ vinserti32x8 m8, [r4 + r7], 1
+
+ psadbw m9, m4, m5
+ paddd m0, m9
+ psadbw m5, m4, m6
+ paddd m1, m5
+ psadbw m6, m4, m7
+ paddd m2, m6
+ psadbw m4, m8
+ paddd m3, m4
+
+ movu xm4, [r0 + 32]
+ vinserti32x4 m4, [r0 + FENC_STRIDE + 32], 1
+ vinserti32x4 m4, [r0 + FENC_STRIDE * 2 + 32], 2
+ vinserti32x4 m4, [r0 + FENC_STRIDE * 3 + 32], 3
+ movu xm5, [r1 + 32]
+ vinserti32x4 m5, [r1 + r5 + 32], 1
+ vinserti32x4 m5, [r1 + r5 * 2 + 32], 2
+ vinserti32x4 m5, [r1 + r7 + 32], 3
+ movu xm6, [r2 + 32]
+ vinserti32x4 m6, [r2 + r5 + 32], 1
+ vinserti32x4 m6, [r2 + r5 * 2 + 32], 2
+ vinserti32x4 m6, [r2 + r7 + 32], 3
+ movu xm7, [r3 + 32]
+ vinserti32x4 m7, [r3 + r5 + 32], 1
+ vinserti32x4 m7, [r3 + r5 * 2 + 32], 2
+ vinserti32x4 m7, [r3 + r7 + 32], 3
+ movu xm8, [r4 + 32]
+ vinserti32x4 m8, [r4 + r5 + 32], 1
+ vinserti32x4 m8, [r4 + r5 * 2 + 32], 2
+ vinserti32x4 m8, [r4 + r7 + 32], 3
+
+ psadbw m9, m4, m5
+ paddd m0, m9
+ psadbw m5, m4, m6
+ paddd m1, m5
+ psadbw m6, m4, m7
+ paddd m2, m6
+ psadbw m4, m8
+ paddd m3, m4
+%endmacro
+
%macro PIXEL_SAD_X4_END_AVX512 0
vextracti32x8 ym4, m0, 1
vextracti32x8 ym5, m1, 1
@@ -4660,6 +4808,60 @@
SAD_X4_32x8_AVX512
PIXEL_SAD_X4_END_AVX512
RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_48x64, 7,8,10
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ pxor m3, m3
+ lea r7, [r5 * 3]
+
+ SAD_X4_48x8_AVX512
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ SAD_X4_48x8_AVX512
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ SAD_X4_48x8_AVX512
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ SAD_X4_48x8_AVX512
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ SAD_X4_48x8_AVX512
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ SAD_X4_48x8_AVX512
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ SAD_X4_48x8_AVX512
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r5 * 4]
+ lea r2, [r2 + r5 * 4]
+ lea r3, [r3 + r5 * 4]
+ lea r4, [r4 + r5 * 4]
+ SAD_X4_48x8_AVX512
+ PIXEL_SAD_X4_END_AVX512
+ RET
;------------------------------------------------------------
;sad_x4 avx512 code end
;------------------------------------------------------------
More information about the x265-devel
mailing list