[x265] [PATCH 228 of 307] x86: AVX512 interp_4tap_vert_sp_48x64
mythreyi at multicorewareinc.com
mythreyi at multicorewareinc.com
Sat Apr 7 04:33:46 CEST 2018
# HG changeset patch
# User Vignesh Vijayakumar<vignesh at multicorewareinc.com>
# Date 1512041776 -19800
# Thu Nov 30 17:06:16 2017 +0530
# Node ID e77ef4964dd04de6a8b84378f7a46219f34bf1b5
# Parent 9c652d9062d29607cdb3392567817e4e2ab7f6bb
x86: AVX512 interp_4tap_vert_sp_48x64
AVX2 performance : 11.93x
AVX512 performance : 23.59x
diff -r 9c652d9062d2 -r e77ef4964dd0 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu Nov 30 17:01:28 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp Thu Nov 30 17:06:16 2017 +0530
@@ -4998,6 +4998,7 @@
p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vsp = PFX(interp_4tap_vert_sp_32x24_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vsp = PFX(interp_4tap_vert_sp_32x32_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vsp = PFX(interp_4tap_vert_sp_32x64_avx512);
+ p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vsp = PFX(interp_4tap_vert_sp_48x64_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vsp = PFX(interp_4tap_vert_sp_64x64_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vsp = PFX(interp_4tap_vert_sp_64x48_avx512);
p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vsp = PFX(interp_4tap_vert_sp_64x32_avx512);
diff -r 9c652d9062d2 -r e77ef4964dd0 source/common/x86/ipfilter8.asm
--- a/source/common/x86/ipfilter8.asm Thu Nov 30 17:01:28 2017 +0530
+++ b/source/common/x86/ipfilter8.asm Thu Nov 30 17:06:16 2017 +0530
@@ -11728,114 +11728,122 @@
FILTER_VER_S_CHROMA_32xN_AVX512 sp, 48
FILTER_VER_S_CHROMA_32xN_AVX512 sp, 64
%endif
-%macro PROCESS_CHROMA_VERT_SS_48x4_AVX512 0
- movu m1, [r0]
+
+%macro PROCESS_CHROMA_VERT_S_48x4_AVX512 1
+ PROCESS_CHROMA_VERT_S_32x2_AVX512 %1
lea r6, [r0 + 2 * r1]
- movu m10, [r6]
- movu m3, [r0 + r1]
- movu m12, [r6 + r1]
+
+ movu m1, [r6]
+ movu m3, [r6 + r1]
punpcklwd m0, m1, m3
- punpcklwd m9, m10, m12
- pmaddwd m0, m16
- pmaddwd m9, m16
+ pmaddwd m0, m7
punpckhwd m1, m3
- punpckhwd m10, m12
- pmaddwd m1, m16
- pmaddwd m10, m16
-
- movu m4, [r0 + 2 * r1]
- movu m13, [r6 + 2 * r1]
+ pmaddwd m1, m7
+ movu m4, [r6 + 2 * r1]
punpcklwd m2, m3, m4
- punpcklwd m11, m12, m13
- pmaddwd m2, m16
- pmaddwd m11, m16
+ pmaddwd m2, m7
punpckhwd m3, m4
- punpckhwd m12, m13
- pmaddwd m3, m16
- pmaddwd m12, m16
-
- movu m5, [r0 + r7]
- movu m14, [r6 + r7]
+ pmaddwd m3, m7
+
+ movu m5, [r6 + r4]
punpcklwd m6, m4, m5
- punpcklwd m15, m13, m14
- pmaddwd m6, m17
- pmaddwd m15, m17
+ pmaddwd m6, m8
paddd m0, m6
- paddd m9, m15
punpckhwd m4, m5
- punpckhwd m13, m14
- pmaddwd m4, m17
- pmaddwd m13, m17
+ pmaddwd m4, m8
paddd m1, m4
- paddd m10, m13
-
- movu m4, [r0 + 4 * r1]
- movu m13, [r6 + 4 * r1]
+
+ movu m4, [r6 + 4 * r1]
punpcklwd m6, m5, m4
- punpcklwd m15, m14, m13
- pmaddwd m6, m17
- pmaddwd m15, m17
+ pmaddwd m6, m8
paddd m2, m6
- paddd m11, m15
punpckhwd m5, m4
- punpckhwd m14, m13
- pmaddwd m5, m17
- pmaddwd m14, m17
+ pmaddwd m5, m8
paddd m3, m5
- paddd m12, m14
-
+
+%ifidn %1, sp
+ paddd m0, m9
+ paddd m1, m9
+ paddd m2, m9
+ paddd m3, m9
+
+ psrad m0, 12
+ psrad m1, 12
+ psrad m2, 12
+ psrad m3, 12
+
+ packssdw m0, m1
+ packssdw m2, m3
+ packuswb m0, m2
+ vpermq m0, m10, m0
+ movu [r2 + 2 * r3], ym0
+ vextracti32x8 [r2 + r5], m0, 1
+%else
psrad m0, 6
psrad m1, 6
psrad m2, 6
psrad m3, 6
- psrad m9, 6
- psrad m10, 6
- psrad m11, 6
- psrad m12, 6
+
packssdw m0, m1
packssdw m2, m3
- packssdw m9, m10
- packssdw m11, m12
-
- movu [r2], m0
- movu [r2 + r3], m2
- movu [r2 + 2 * r3], m9
- movu [r2 + r8], m11
+ movu [r2 + 2 * r3], m0
+ movu [r2 + r5], m2
+%endif
movu ym1, [r0 + mmsize]
- vinserti32x8 m1, [r6 + mmsize], 1
+ vinserti32x8 m1, [r6 + mmsize], 1
movu ym3, [r0 + r1 + mmsize]
- vinserti32x8 m3, [r6 + r1 + mmsize], 1
+ vinserti32x8 m3, [r6 + r1 + mmsize], 1
punpcklwd m0, m1, m3
- pmaddwd m0, m16
+ pmaddwd m0, m7
punpckhwd m1, m3
- pmaddwd m1, m16
+ pmaddwd m1, m7
movu ym4, [r0 + 2 * r1 + mmsize]
- vinserti32x8 m4, [r6 + 2 * r1 + mmsize], 1
+ vinserti32x8 m4, [r6 + 2 * r1 + mmsize], 1
punpcklwd m2, m3, m4
- pmaddwd m2, m16
+ pmaddwd m2, m7
punpckhwd m3, m4
- pmaddwd m3, m16
-
- movu ym5, [r0 + r7 + mmsize]
- vinserti32x8 m5, [r6 + r7 + mmsize], 1
+ pmaddwd m3, m7
+
+ movu ym5, [r0 + r4 + mmsize]
+ vinserti32x8 m5, [r6 + r4 + mmsize], 1
punpcklwd m6, m4, m5
- pmaddwd m6, m17
+ pmaddwd m6, m8
paddd m0, m6
punpckhwd m4, m5
- pmaddwd m4, m17
+ pmaddwd m4, m8
paddd m1, m4
movu ym4, [r0 + 4 * r1 + mmsize]
- vinserti32x8 m4, [r6 + 4 * r1 + mmsize], 1
+ vinserti32x8 m4, [r6 + 4 * r1 + mmsize], 1
punpcklwd m6, m5, m4
- pmaddwd m6, m17
+ pmaddwd m6, m8
paddd m2, m6
punpckhwd m5, m4
- pmaddwd m5, m17
+ pmaddwd m5, m8
paddd m3, m5
+%ifidn %1, sp
+ paddd m0, m9
+ paddd m1, m9
+ paddd m2, m9
+ paddd m3, m9
+
+ psrad m0, 12
+ psrad m1, 12
+ psrad m2, 12
+ psrad m3, 12
+
+ packssdw m0, m1
+ packssdw m2, m3
+ packuswb m0, m2
+ vpermq m0, m10, m0
+ movu [r2 + mmsize/2], xm0
+ vextracti32x4 [r2 + r3 + mmsize/2], m0, 2
+ vextracti32x4 [r2 + 2 * r3 + mmsize/2], m0, 1
+ vextracti32x4 [r2 + r5 + mmsize/2], m0, 3
+%else
psrad m0, 6
psrad m1, 6
psrad m2, 6
@@ -11846,36 +11854,50 @@
movu [r2 + mmsize], ym0
movu [r2 + r3 + mmsize], ym2
vextracti32x8 [r2 + 2 * r3 + mmsize], m0, 1
- vextracti32x8 [r2 + r8 + mmsize], m2, 1
-%endmacro
-
-%if ARCH_X86_64
+ vextracti32x8 [r2 + r5 + mmsize], m2, 1
+%endif
+%endmacro
+
+%macro FILTER_VER_S_CHROMA_48x64_AVX512 1
INIT_ZMM avx512
-cglobal interp_4tap_vert_ss_48x64, 5, 9, 18
+cglobal interp_4tap_vert_%1_48x64, 4, 7, 11
+ mov r4d, r4m
+ shl r4d, 7
+
+%ifdef PIC
+ lea r5, [pw_ChromaCoeffVer_32_avx512]
+ mova m7, [r5 + r4]
+ mova m8, [r5 + r4 + mmsize]
+%else
+ mova m7, [pw_ChromaCoeffVer_32_avx512 + r4]
+ mova m8, [pw_ChromaCoeffVer_32_avx512 + r4 + mmsize]
+%endif
+
+%ifidn %1, sp
+ vbroadcasti32x4 m9, [pd_526336]
+ mova m10, [interp8_vsp_store_avx512]
+%else
+ add r3d, r3d
+%endif
add r1d, r1d
- add r3d, r3d
sub r0, r1
- shl r4d, 7
-%ifdef PIC
- lea r5, [pw_ChromaCoeffVer_32_avx512]
- mova m16, [r5 + r4]
- mova m17, [r5 + r4 + mmsize]
-%else
- lea r5, [pw_ChromaCoeffVer_32_avx512 + r4]
- mova m16, [r5]
- mova m17, [r5 + mmsize]
-%endif
- lea r7, [3 * r1]
- lea r8, [3 * r3]
+ lea r4, [r1 * 3]
+ lea r5, [r3 * 3]
%rep 15
- PROCESS_CHROMA_VERT_SS_48x4_AVX512
+ PROCESS_CHROMA_VERT_S_48x4_AVX512 %1
lea r0, [r0 + 4 * r1]
lea r2, [r2 + 4 * r3]
%endrep
- PROCESS_CHROMA_VERT_SS_48x4_AVX512
- RET
-%endif
+ PROCESS_CHROMA_VERT_S_48x4_AVX512 %1
+ RET
+%endmacro
+
+%if ARCH_X86_64
+ FILTER_VER_S_CHROMA_48x64_AVX512 ss
+ FILTER_VER_S_CHROMA_48x64_AVX512 sp
+%endif
+
%macro PROCESS_CHROMA_VERT_S_64x2_AVX512 1
PROCESS_CHROMA_VERT_S_32x2_AVX512 %1
movu m1, [r0 + mmsize]
More information about the x265-devel
mailing list