[x265] [PATCH 170 of 307] x86: AVX512 interp_4tap_vert_sp_24xN and interp_4tap_vert_ss_24xN for high bit depth
mythreyi at multicorewareinc.com
mythreyi at multicorewareinc.com
Sat Apr 7 04:32:48 CEST 2018
# HG changeset patch
# User Vignesh Vijayakumar<vignesh at multicorewareinc.com>
# Date 1510667209 -19800
# Tue Nov 14 19:16:49 2017 +0530
# Node ID 487307659c367f26096d0da0c81a89ca89b2ffbe
# Parent a6c12a9c8cba58df74e482e580840984991b31c9
x86: AVX512 interp_4tap_vert_sp_24xN and interp_4tap_vert_ss_24xN for high bit depth
i444
chroma_vsp_24x32
AVX2 performance : 18.42x
AVX512 performance : 31.23x
chroma_vss_24x32
AVX2 performance : 20.37x
AVX512 performance : 29.94x
diff -r a6c12a9c8cba -r 487307659c36 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue Nov 14 17:06:06 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp Tue Nov 14 19:16:49 2017 +0530
@@ -2770,6 +2770,12 @@
p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vps = PFX(interp_4tap_vert_ps_24x32_avx512);
p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vps = PFX(interp_4tap_vert_ps_24x64_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vps = PFX(interp_4tap_vert_ps_24x32_avx512);
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vss = PFX(interp_4tap_vert_ss_24x32_avx512);
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vss = PFX(interp_4tap_vert_ss_24x64_avx512);
+ p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vss = PFX(interp_4tap_vert_ss_24x32_avx512);
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vsp = PFX(interp_4tap_vert_sp_24x32_avx512);
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vsp = PFX(interp_4tap_vert_sp_24x64_avx512);
+ p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vsp = PFX(interp_4tap_vert_sp_24x32_avx512);
p.cu[BLOCK_8x8].dct = PFX(dct8_avx512);
p.cu[BLOCK_8x8].idct = PFX(idct8_avx512);
diff -r a6c12a9c8cba -r 487307659c36 source/common/x86/ipfilter16.asm
--- a/source/common/x86/ipfilter16.asm Tue Nov 14 17:06:06 2017 +0530
+++ b/source/common/x86/ipfilter16.asm Tue Nov 14 19:16:49 2017 +0530
@@ -8272,6 +8272,228 @@
FILTER_VER_PS_CHROMA_64xN_AVX512 64
%endif
+%macro PROCESS_CHROMA_VERT_S_24x8_AVX512 1
+ movu ym1, [r0]
+ lea r6, [r0 + 2 * r1]
+ lea r8, [r0 + 4 * r1]
+ lea r9, [r8 + 2 * r1]
+
+ movu ym10, [r8]
+ movu ym3, [r0 + r1]
+ movu ym12, [r8 + r1]
+ vinserti32x8 m1, [r6], 1
+ vinserti32x8 m10, [r9], 1
+ vinserti32x8 m3, [r6 + r1], 1
+ vinserti32x8 m12, [r9 + r1], 1
+
+ punpcklwd m0, m1, m3
+ punpcklwd m9, m10, m12
+ pmaddwd m0, m16
+ pmaddwd m9, m16
+ punpckhwd m1, m3
+ punpckhwd m10, m12
+ pmaddwd m1, m16
+ pmaddwd m10, m16
+
+ movu ym4, [r0 + 2 * r1]
+ movu ym13, [r8 + 2 * r1]
+ vinserti32x8 m4, [r6 + 2 * r1], 1
+ vinserti32x8 m13, [r9 + 2 * r1], 1
+ punpcklwd m2, m3, m4
+ punpcklwd m11, m12, m13
+ pmaddwd m2, m16
+ pmaddwd m11, m16
+ punpckhwd m3, m4
+ punpckhwd m12, m13
+ pmaddwd m3, m16
+ pmaddwd m12, m16
+
+ movu ym5, [r0 + r10]
+ vinserti32x8 m5, [r6 + r10], 1
+ movu ym14, [r8 + r10]
+ vinserti32x8 m14, [r9 + r10], 1
+ punpcklwd m6, m4, m5
+ punpcklwd m15, m13, m14
+ pmaddwd m6, m17
+ pmaddwd m15, m17
+ paddd m0, m6
+ paddd m9, m15
+ punpckhwd m4, m5
+ punpckhwd m13, m14
+ pmaddwd m4, m17
+ pmaddwd m13, m17
+ paddd m1, m4
+ paddd m10, m13
+
+ movu ym4, [r0 + 4 * r1]
+ vinserti32x8 m4, [r6 + 4 * r1], 1
+ movu ym13, [r8 + 4 * r1]
+ vinserti32x8 m13, [r9 + 4 * r1], 1
+ punpcklwd m6, m5, m4
+ punpcklwd m15, m14, m13
+ pmaddwd m6, m17
+ pmaddwd m15, m17
+ paddd m2, m6
+ paddd m11, m15
+ punpckhwd m5, m4
+ punpckhwd m14, m13
+ pmaddwd m5, m17
+ pmaddwd m14, m17
+ paddd m3, m5
+ paddd m12, m14
+
+%ifidn %1,sp
+ paddd m0, m7
+ paddd m1, m7
+ paddd m2, m7
+ paddd m3, m7
+ paddd m9, m7
+ paddd m10, m7
+ paddd m11, m7
+ paddd m12, m7
+
+ psrad m0, INTERP_SHIFT_SP
+ psrad m1, INTERP_SHIFT_SP
+ psrad m2, INTERP_SHIFT_SP
+ psrad m3, INTERP_SHIFT_SP
+ psrad m9, INTERP_SHIFT_SP
+ psrad m10, INTERP_SHIFT_SP
+ psrad m11, INTERP_SHIFT_SP
+ psrad m12, INTERP_SHIFT_SP
+%else
+ psrad m0, 6
+ psrad m1, 6
+ psrad m2, 6
+ psrad m3, 6
+ psrad m9, 6
+ psrad m10, 6
+ psrad m11, 6
+ psrad m12, 6
+%endif
+ packssdw m0, m1
+ packssdw m2, m3
+ packssdw m9, m10
+ packssdw m11, m12
+ movu [r2], ym0
+ movu [r2 + r3], ym2
+ vextracti32x8 [r2 + 2 * r3], m0, 1
+ vextracti32x8 [r2 + r7], m2, 1
+ lea r11, [r2 + 4 * r3]
+ movu [r11], ym9
+ movu [r11 + r3], ym11
+ vextracti32x8 [r11 + 2 * r3], m9, 1
+ vextracti32x8 [r11 + r7], m11, 1
+
+ movu xm1, [r0 + mmsize/2]
+ vinserti32x4 m1, [r6 + mmsize/2], 1
+ vinserti32x4 m1, [r8 + mmsize/2], 2
+ vinserti32x4 m1, [r9 + mmsize/2], 3
+ movu xm3, [r0 + r1 + mmsize/2]
+ vinserti32x4 m3, [r6 + r1 + mmsize/2], 1
+ vinserti32x4 m3, [r8 + r1 + mmsize/2], 2
+ vinserti32x4 m3, [r9 + r1 + mmsize/2], 3
+ punpcklwd m0, m1, m3
+ pmaddwd m0, m16
+ punpckhwd m1, m3
+ pmaddwd m1, m16
+
+ movu xm4, [r0 + 2 * r1 + mmsize/2]
+ vinserti32x4 m4, [r6 + 2 * r1 + mmsize/2], 1
+ vinserti32x4 m4, [r8 + 2 * r1 + mmsize/2], 2
+ vinserti32x4 m4, [r9 + 2 * r1 + mmsize/2], 3
+ punpcklwd m2, m3, m4
+ pmaddwd m2, m16
+ punpckhwd m3, m4
+ pmaddwd m3, m16
+
+ movu xm5, [r0 + r10 + mmsize/2]
+ vinserti32x4 m5, [r6 + r10 + mmsize/2], 1
+ vinserti32x4 m5, [r8 + r10 + mmsize/2], 2
+ vinserti32x4 m5, [r9 + r10 + mmsize/2], 3
+ punpcklwd m6, m4, m5
+ pmaddwd m6, m17
+ paddd m0, m6
+ punpckhwd m4, m5
+ pmaddwd m4, m17
+ paddd m1, m4
+
+ movu xm4, [r0 + 4 * r1 + mmsize/2]
+ vinserti32x4 m4, [r6 + 4 * r1 + mmsize/2], 1
+ vinserti32x4 m4, [r8 + 4 * r1 + mmsize/2], 2
+ vinserti32x4 m4, [r9 + 4 * r1 + mmsize/2], 3
+ punpcklwd m6, m5, m4
+ pmaddwd m6, m17
+ paddd m2, m6
+ punpckhwd m5, m4
+ pmaddwd m5, m17
+ paddd m3, m5
+
+%ifidn %1,sp
+ paddd m0, m7
+ paddd m1, m7
+ paddd m2, m7
+ paddd m3, m7
+ psrad m0, INTERP_SHIFT_SP
+ psrad m1, INTERP_SHIFT_SP
+ psrad m2, INTERP_SHIFT_SP
+ psrad m3, INTERP_SHIFT_SP
+%else
+ psrad m0, 6
+ psrad m1, 6
+ psrad m2, 6
+ psrad m3, 6
+%endif
+ packssdw m0, m1
+ packssdw m2, m3
+ movu [r2 + mmsize/2], xm0
+ movu [r2 + r3 + mmsize/2], xm2
+ vextracti32x4 [r2 + 2 * r3 + mmsize/2], m0, 1
+ vextracti32x4 [r2 + r7 + mmsize/2], m2, 1
+ lea r2, [r2 + 4 * r3]
+ vextracti32x4 [r2 + mmsize/2], m0, 2
+ vextracti32x4 [r2 + r3 + mmsize/2], m2, 2
+ vextracti32x4 [r2 + 2 * r3 + mmsize/2], m0, 3
+ vextracti32x4 [r2 + r7 + mmsize/2], m2, 3
+%endmacro
+
+%macro FILTER_VER_S_CHROMA_24xN_AVX512 2
+INIT_ZMM avx512
+cglobal interp_4tap_vert_%1_24x%2, 5, 12, 18
+ add r1d, r1d
+ add r3d, r3d
+ sub r0, r1
+ shl r4d, 7
+
+%ifdef PIC
+ lea r5, [tab_ChromaCoeffV_avx512]
+ lea r5, [r5 + r4]
+%else
+ lea r5, [tab_ChromaCoeffV_avx512 + r4]
+%endif
+
+%ifidn %1, sp
+ vbroadcasti32x4 m7, [INTERP_OFFSET_SP]
+%endif
+ mova m16, [r5]
+ mova m17, [r5 + mmsize]
+ lea r10, [3 * r1]
+ lea r7, [3 * r3]
+%rep %2/8 - 1
+ PROCESS_CHROMA_VERT_S_24x8_AVX512 %1
+ lea r0, [r8 + 4 * r1]
+ lea r2, [r2 + 4 * r3]
+%endrep
+ PROCESS_CHROMA_VERT_S_24x8_AVX512 %1
+ RET
+%endmacro
+
+%if ARCH_X86_64
+ FILTER_VER_S_CHROMA_24xN_AVX512 ss,32
+ FILTER_VER_S_CHROMA_24xN_AVX512 ss,64
+ FILTER_VER_S_CHROMA_24xN_AVX512 sp,32
+ FILTER_VER_S_CHROMA_24xN_AVX512 sp,64
+%endif
+
%macro PROCESS_CHROMA_VERT_S_32x2_AVX512 1
movu m1, [r0]
movu m3, [r0 + r1]
More information about the x265-devel
mailing list