[x265] [PATCH 147 of 307] x86: AVX512 interp_4tap_vert_pp_48x64 for high bit depth
mythreyi at multicorewareinc.com
mythreyi at multicorewareinc.com
Sat Apr 7 04:32:25 CEST 2018
# HG changeset patch
# User Vignesh Vijayakumar
# Date 1509798934 -19800
# Sat Nov 04 18:05:34 2017 +0530
# Node ID 410a223c2caa58321a3a6b3e0a91c1dee512667a
# Parent 84dc38e191366e8b737d2a6014793afe830f3b35
x86: AVX512 interp_4tap_vert_pp_48x64 for high bit depth
AVX2 performance : 26.37x
AVX512 performance : 42.37x
diff -r 84dc38e19136 -r 410a223c2caa source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Fri Nov 03 09:20:36 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp Sat Nov 04 18:05:34 2017 +0530
@@ -2639,6 +2639,7 @@
p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vpp = PFX(interp_4tap_vert_pp_64x32_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vpp = PFX(interp_4tap_vert_pp_64x48_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vpp = PFX(interp_4tap_vert_pp_64x64_avx512);
+ p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vpp = PFX(interp_4tap_vert_pp_48x64_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vpp = PFX(interp_4tap_vert_pp_32x8_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vpp = PFX(interp_4tap_vert_pp_32x16_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vpp = PFX(interp_4tap_vert_pp_32x24_avx512);
diff -r 84dc38e19136 -r 410a223c2caa source/common/x86/ipfilter16.asm
--- a/source/common/x86/ipfilter16.asm Fri Nov 03 09:20:36 2017 +0530
+++ b/source/common/x86/ipfilter16.asm Sat Nov 04 18:05:34 2017 +0530
@@ -6242,6 +6242,172 @@
FILTER_VER_PP_CHROMA_32xN_AVX512 64
%endif
+%macro PROCESS_CHROMA_VERT_PP_48x4_AVX512 0
+ movu m1, [r0]
+ lea r6, [r0 + 2 * r1]
+ movu m10, [r6]
+ movu m3, [r0 + r1]
+ movu m12, [r6 + r1]
+ punpcklwd m0, m1, m3
+ punpcklwd m9, m10, m12
+ pmaddwd m0, [r5]
+ pmaddwd m9, [r5]
+ punpckhwd m1, m3
+ punpckhwd m10, m12
+ pmaddwd m1, [r5]
+ pmaddwd m10, [r5]
+
+ movu m4, [r0 + 2 * r1]
+ movu m13, [r6 + 2 * r1]
+ punpcklwd m2, m3, m4
+ punpcklwd m11, m12, m13
+ pmaddwd m2, [r5]
+ pmaddwd m11, [r5]
+ punpckhwd m3, m4
+ punpckhwd m12, m13
+ pmaddwd m3, [r5]
+ pmaddwd m12, [r5]
+
+ movu m5, [r0 + r7]
+ movu m14, [r6 + r7]
+ punpcklwd m6, m4, m5
+ punpcklwd m15, m13, m14
+ pmaddwd m6, [r5 + mmsize]
+ pmaddwd m15, [r5 + mmsize]
+ paddd m0, m6
+ paddd m9, m15
+ punpckhwd m4, m5
+ punpckhwd m13, m14
+ pmaddwd m4, [r5 + mmsize]
+ pmaddwd m13, [r5 + mmsize]
+ paddd m1, m4
+ paddd m10, m13
+
+ movu m4, [r0 + 4 * r1]
+ movu m13, [r6 + 4 * r1]
+ punpcklwd m6, m5, m4
+ punpcklwd m15, m14, m13
+ pmaddwd m6, [r5 + mmsize]
+ pmaddwd m15, [r5 + mmsize]
+ paddd m2, m6
+ paddd m11, m15
+ punpckhwd m5, m4
+ punpckhwd m14, m13
+ pmaddwd m5, [r5 + mmsize]
+ pmaddwd m14, [r5 + mmsize]
+ paddd m3, m5
+ paddd m12, m14
+
+ paddd m0, m7
+ paddd m1, m7
+ paddd m2, m7
+ paddd m3, m7
+ paddd m9, m7
+ paddd m10, m7
+ paddd m11, m7
+ paddd m12, m7
+
+ psrad m0, INTERP_SHIFT_PP
+ psrad m1, INTERP_SHIFT_PP
+ psrad m2, INTERP_SHIFT_PP
+ psrad m3, INTERP_SHIFT_PP
+ psrad m9, INTERP_SHIFT_PP
+ psrad m10, INTERP_SHIFT_PP
+ psrad m11, INTERP_SHIFT_PP
+ psrad m12, INTERP_SHIFT_PP
+
+ packssdw m0, m1
+ packssdw m2, m3
+ packssdw m9, m10
+ packssdw m11, m12
+ CLIPW2 m0, m2, m16, m8
+ CLIPW2 m9, m11, m16, m8
+ movu [r2], m0
+ movu [r2 + r3], m2
+ movu [r2 + 2 * r3], m9
+ movu [r2 + r8], m11
+
+ movu ym1, [r0 + mmsize]
+ vinserti32x8 m1, [r6 + mmsize], 1
+ movu ym3, [r0 + r1 + mmsize]
+ vinserti32x8 m3, [r6 + r1 + mmsize], 1
+ punpcklwd m0, m1, m3
+ pmaddwd m0, [r5]
+ punpckhwd m1, m3
+ pmaddwd m1, [r5]
+
+ movu ym4, [r0 + 2 * r1 + mmsize]
+ vinserti32x8 m4, [r6 + 2 * r1 + mmsize], 1
+ punpcklwd m2, m3, m4
+ pmaddwd m2, [r5]
+ punpckhwd m3, m4
+ pmaddwd m3, [r5]
+
+ movu ym5, [r0 + r7 + mmsize]
+ vinserti32x8 m5, [r6 + r7 + mmsize], 1
+ punpcklwd m6, m4, m5
+ pmaddwd m6, [r5 + mmsize]
+ paddd m0, m6
+ punpckhwd m4, m5
+ pmaddwd m4, [r5 + mmsize]
+ paddd m1, m4
+
+ movu ym4, [r0 + 4 * r1 + mmsize]
+ vinserti32x8 m4, [r6 + 4 * r1 + mmsize], 1
+ punpcklwd m6, m5, m4
+ pmaddwd m6, [r5 + mmsize]
+ paddd m2, m6
+ punpckhwd m5, m4
+ pmaddwd m5, [r5 + mmsize]
+ paddd m3, m5
+
+ paddd m0, m7
+ paddd m1, m7
+ paddd m2, m7
+ paddd m3, m7
+
+ psrad m0, INTERP_SHIFT_PP
+ psrad m1, INTERP_SHIFT_PP
+ psrad m2, INTERP_SHIFT_PP
+ psrad m3, INTERP_SHIFT_PP
+
+ packssdw m0, m1
+ packssdw m2, m3
+ CLIPW2 m0, m2, m16, m8
+ movu [r2 + mmsize], ym0
+ movu [r2 + r3 + mmsize], ym2
+ vextracti32x8 [r2 + 2 * r3 + mmsize], m0, 1
+ vextracti32x8 [r2 + r8 + mmsize], m2, 1
+%endmacro
+
+%if ARCH_X86_64
+INIT_ZMM avx512
+cglobal interp_4tap_vert_pp_48x64, 5, 9, 17
+ add r1d, r1d
+ add r3d, r3d
+ sub r0, r1
+ shl r4d, 7
+%ifdef PIC
+ lea r5, [tab_ChromaCoeffV_avx512]
+ lea r5, [r5 + r4]
+%else
+ lea r5, [tab_ChromaCoeffV_avx512 + r4]
+%endif
+ lea r7, [3 * r1]
+ lea r8, [3 * r3]
+ vbroadcasti32x8 m7, [INTERP_OFFSET_PP]
+ vbroadcasti32x8 m8, [pw_pixel_max]
+ pxor m16, m16
+
+%rep 15
+ PROCESS_CHROMA_VERT_PP_48x4_AVX512
+ lea r0, [r0 + 4 * r1]
+ lea r2, [r2 + 4 * r3]
+%endrep
+ PROCESS_CHROMA_VERT_PP_48x4_AVX512
+ RET
+%endif
+
%macro PROCESS_CHROMA_VERT_PP_64x2_AVX512 0
movu m1, [r0]
movu m3, [r0 + r1]
@@ -6325,8 +6491,8 @@
packssdw m8, m9
packssdw m10, m11
pxor m5, m5
- CLIPW2 m0, m2, m5, m16
- CLIPW2 m8, m10, m5, m16
+ CLIPW2 m0, m2, m5, m15
+ CLIPW2 m8, m10, m5, m15
movu [r2], m0
movu [r2 + r3], m2
movu [r2 + mmsize], m8
@@ -6351,7 +6517,7 @@
lea r5, [tab_ChromaCoeffV_avx512 + r4]
%endif
vbroadcasti32x8 m7, [INTERP_OFFSET_PP]
- vbroadcasti32x8 m16, [pw_pixel_max]
+ vbroadcasti32x8 m15, [pw_pixel_max]
%rep %1/2 - 1
PROCESS_CHROMA_VERT_PP_64x2_AVX512
More information about the x265-devel
mailing list