[x265] [PATCH 191 of 307] x86: AVX512 interp_8tap_vert_sp_64xN and interp_8tap_vert_ss_64xN
mythreyi at multicorewareinc.com
mythreyi at multicorewareinc.com
Sat Apr 7 04:33:09 CEST 2018
# HG changeset patch
# User Vignesh Vijayakumar<vignesh at multicorewareinc.com>
# Date 1511425344 -19800
# Thu Nov 23 13:52:24 2017 +0530
# Node ID b7ebc01ecbfe2510fbe6a2ce305bdb479267decf
# Parent 4714e877aaec710502f81f383734247ef8c4aea4
x86: AVX512 interp_8tap_vert_sp_64xN and interp_8tap_vert_ss_64xN
luma_vss
Size | AVX2 performance | AVX512 performance
----------------------------------------------
64x16 | 10.75x | 19.11x
64x32 | 10.48x | 18.95x
64x48 | 10.55x | 18.98x
64x64 | 10.43x | 18.92x
luma_vsp
Size | AVX2 performance | AVX512 performance
----------------------------------------------
64x16 | 12.23x | 21.54x
64x32 | 12.24x | 21.61x
64x48 | 12.28x | 21.61x
64x64 | 12.26x | 21.70x
diff -r 4714e877aaec -r b7ebc01ecbfe source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu Nov 23 10:38:37 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp Thu Nov 23 13:52:24 2017 +0530
@@ -2840,12 +2840,20 @@
p.pu[LUMA_32x32].luma_vss = PFX(interp_8tap_vert_ss_32x32_avx512);
p.pu[LUMA_32x24].luma_vss = PFX(interp_8tap_vert_ss_32x24_avx512);
p.pu[LUMA_32x64].luma_vss = PFX(interp_8tap_vert_ss_32x64_avx512);
+ p.pu[LUMA_64x16].luma_vss = PFX(interp_8tap_vert_ss_64x16_avx512);
+ p.pu[LUMA_64x32].luma_vss = PFX(interp_8tap_vert_ss_64x32_avx512);
+ p.pu[LUMA_64x48].luma_vss = PFX(interp_8tap_vert_ss_64x48_avx512);
+ p.pu[LUMA_64x64].luma_vss = PFX(interp_8tap_vert_ss_64x64_avx512);
p.pu[LUMA_32x8].luma_vsp = PFX(interp_8tap_vert_sp_32x8_avx512);
p.pu[LUMA_32x16].luma_vsp = PFX(interp_8tap_vert_sp_32x16_avx512);
p.pu[LUMA_32x32].luma_vsp = PFX(interp_8tap_vert_sp_32x32_avx512);
p.pu[LUMA_32x24].luma_vsp = PFX(interp_8tap_vert_sp_32x24_avx512);
p.pu[LUMA_32x64].luma_vsp = PFX(interp_8tap_vert_sp_32x64_avx512);
+ p.pu[LUMA_64x16].luma_vsp = PFX(interp_8tap_vert_sp_64x16_avx512);
+ p.pu[LUMA_64x32].luma_vsp = PFX(interp_8tap_vert_sp_64x32_avx512);
+ p.pu[LUMA_64x48].luma_vsp = PFX(interp_8tap_vert_sp_64x48_avx512);
+ p.pu[LUMA_64x64].luma_vsp = PFX(interp_8tap_vert_sp_64x64_avx512);
p.cu[BLOCK_8x8].dct = PFX(dct8_avx512);
p.cu[BLOCK_8x8].idct = PFX(idct8_avx512);
diff -r 4714e877aaec -r b7ebc01ecbfe source/common/x86/ipfilter16.asm
--- a/source/common/x86/ipfilter16.asm Thu Nov 23 10:38:37 2017 +0530
+++ b/source/common/x86/ipfilter16.asm Thu Nov 23 13:52:24 2017 +0530
@@ -10941,6 +10941,243 @@
FILTER_VER_S_LUMA_32xN_AVX512 sp, 24
FILTER_VER_S_LUMA_32xN_AVX512 sp, 64
%endif
+
+%macro PROCESS_LUMA_VERT_S_64x2_AVX512 1
+ movu m1, [r0] ;0 row
+ movu m3, [r0 + r1] ;1 row
+ punpcklwd m0, m1, m3
+ pmaddwd m0, m15
+ punpckhwd m1, m3
+ pmaddwd m1, m15
+
+ movu m4, [r0 + 2 * r1] ;2 row
+ punpcklwd m2, m3, m4
+ pmaddwd m2, m15
+ punpckhwd m3, m4
+ pmaddwd m3, m15
+
+ movu m5, [r0 + r7] ;3 row
+ punpcklwd m6, m4, m5
+ pmaddwd m6, m16
+ punpckhwd m4, m5
+ pmaddwd m4, m16
+
+ paddd m0, m6
+ paddd m1, m4
+
+ movu m4, [r0 + 4 * r1] ;4 row
+ punpcklwd m6, m5, m4
+ pmaddwd m6, m16
+ punpckhwd m5, m4
+ pmaddwd m5, m16
+
+ paddd m2, m6
+ paddd m3, m5
+
+ lea r6, [r0 + 4 * r1]
+
+ movu m11, [r6 + r1] ;5 row
+ punpcklwd m8, m4, m11
+ pmaddwd m8, m17
+ punpckhwd m4, m11
+ pmaddwd m4, m17
+
+ movu m12, [r6 + 2 * r1] ;6 row
+ punpcklwd m10, m11, m12
+ pmaddwd m10, m17
+ punpckhwd m11, m12
+ pmaddwd m11, m17
+
+ movu m13, [r6 + r7] ;7 row
+ punpcklwd m14, m12, m13
+ pmaddwd m14, m18
+ punpckhwd m12, m13
+ pmaddwd m12, m18
+
+ paddd m8, m14
+ paddd m4, m12
+ paddd m0, m8
+ paddd m1, m4
+
+ movu m12, [r6 + 4 * r1] ; 8 row
+ punpcklwd m14, m13, m12
+ pmaddwd m14, m18
+ punpckhwd m13, m12
+ pmaddwd m13, m18
+
+ paddd m10, m14
+ paddd m11, m13
+ paddd m2, m10
+ paddd m3, m11
+
+%ifidn %1, sp
+ paddd m0, m19
+ paddd m1, m19
+ paddd m2, m19
+ paddd m3, m19
+
+ psrad m0, INTERP_SHIFT_SP
+ psrad m1, INTERP_SHIFT_SP
+ psrad m2, INTERP_SHIFT_SP
+ psrad m3, INTERP_SHIFT_SP
+
+ packssdw m0, m1
+ packssdw m2, m3
+ CLIPW2 m0, m2, m20, m21
+%else
+ psrad m0, 6
+ psrad m1, 6
+ psrad m2, 6
+ psrad m3, 6
+
+ packssdw m0, m1
+ packssdw m2, m3
+%endif
+
+ movu [r2], m0
+ movu [r2 + r3], m2
+
+ movu m1, [r0 + mmsize] ;0 row
+ movu m3, [r0 + r1 + mmsize] ;1 row
+ punpcklwd m0, m1, m3
+ pmaddwd m0, m15
+ punpckhwd m1, m3
+ pmaddwd m1, m15
+
+ movu m4, [r0 + 2 * r1 + mmsize] ;2 row
+ punpcklwd m2, m3, m4
+ pmaddwd m2, m15
+ punpckhwd m3, m4
+ pmaddwd m3, m15
+
+ movu m5, [r0 + r7 + mmsize] ;3 row
+ punpcklwd m6, m4, m5
+ pmaddwd m6, m16
+ punpckhwd m4, m5
+ pmaddwd m4, m16
+
+ paddd m0, m6
+ paddd m1, m4
+
+ movu m4, [r0 + 4 * r1 + mmsize] ;4 row
+ punpcklwd m6, m5, m4
+ pmaddwd m6, m16
+ punpckhwd m5, m4
+ pmaddwd m5, m16
+
+ paddd m2, m6
+ paddd m3, m5
+
+ movu m11, [r6 + r1 + mmsize] ;5 row
+ punpcklwd m8, m4, m11
+ pmaddwd m8, m17
+ punpckhwd m4, m11
+ pmaddwd m4, m17
+
+ movu m12, [r6 + 2 * r1 + mmsize] ;6 row
+ punpcklwd m10, m11, m12
+ pmaddwd m10, m17
+ punpckhwd m11, m12
+ pmaddwd m11, m17
+
+ movu m13, [r6 + r7 + mmsize] ;7 row
+ punpcklwd m14, m12, m13
+ pmaddwd m14, m18
+ punpckhwd m12, m13
+ pmaddwd m12, m18
+
+ paddd m8, m14
+ paddd m4, m12
+ paddd m0, m8
+ paddd m1, m4
+
+ movu m12, [r6 + 4 * r1 + mmsize] ; 8 row
+ punpcklwd m14, m13, m12
+ pmaddwd m14, m18
+ punpckhwd m13, m12
+ pmaddwd m13, m18
+
+ paddd m10, m14
+ paddd m11, m13
+ paddd m2, m10
+ paddd m3, m11
+
+%ifidn %1, sp
+ paddd m0, m19
+ paddd m1, m19
+ paddd m2, m19
+ paddd m3, m19
+
+ psrad m0, INTERP_SHIFT_SP
+ psrad m1, INTERP_SHIFT_SP
+ psrad m2, INTERP_SHIFT_SP
+ psrad m3, INTERP_SHIFT_SP
+
+ packssdw m0, m1
+ packssdw m2, m3
+ CLIPW2 m0, m2, m20, m21
+%else
+ psrad m0, 6
+ psrad m1, 6
+ psrad m2, 6
+ psrad m3, 6
+
+ packssdw m0, m1
+ packssdw m2, m3
+%endif
+
+ movu [r2 + mmsize], m0
+ movu [r2 + r3 + mmsize], m2
+%endmacro
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_8tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_S_LUMA_64xN_AVX512 2
+INIT_ZMM avx512
+cglobal interp_8tap_vert_%1_64x%2, 5, 8, 22
+ add r1d, r1d
+ add r3d, r3d
+ lea r7, [3 * r1]
+ sub r0, r7
+ shl r4d, 8
+%ifdef PIC
+ lea r5, [tab_LumaCoeffVer_avx512]
+ mova m15, [r5 + r4]
+ mova m16, [r5 + r4 + 1 * mmsize]
+ mova m17, [r5 + r4 + 2 * mmsize]
+ mova m18, [r5 + r4 + 3 * mmsize]
+%else
+ lea r5, [tab_LumaCoeffVer_avx512 + r4]
+ mova m15, [r5]
+ mova m16, [r5 + 1 * mmsize]
+ mova m17, [r5 + 2 * mmsize]
+ mova m18, [r5 + 3 * mmsize]
+%endif
+%ifidn %1, sp
+ vbroadcasti32x4 m19, [INTERP_OFFSET_SP]
+ pxor m20, m20
+ vbroadcasti32x8 m21, [pw_pixel_max]
+%endif
+
+%rep %2/2 - 1
+ PROCESS_LUMA_VERT_S_64x2_AVX512 %1
+ lea r0, [r0 + 2 * r1]
+ lea r2, [r2 + 2 * r3]
+%endrep
+ PROCESS_LUMA_VERT_S_64x2_AVX512 %1
+ RET
+%endmacro
+
+%if ARCH_X86_64
+ FILTER_VER_S_LUMA_64xN_AVX512 ss, 16
+ FILTER_VER_S_LUMA_64xN_AVX512 ss, 32
+ FILTER_VER_S_LUMA_64xN_AVX512 ss, 48
+ FILTER_VER_S_LUMA_64xN_AVX512 ss, 64
+ FILTER_VER_S_LUMA_64xN_AVX512 sp, 16
+ FILTER_VER_S_LUMA_64xN_AVX512 sp, 32
+ FILTER_VER_S_LUMA_64xN_AVX512 sp, 48
+ FILTER_VER_S_LUMA_64xN_AVX512 sp, 64
+%endif
;-------------------------------------------------------------------------------------------------------------
;avx512 luma_vss and luma_vsp code end
;-------------------------------------------------------------------------------------------------------------
More information about the x265-devel
mailing list