[x265] [PATCH] asm: updated avx2 algorithm for copy_ps 32xN & 64xN, improved over 45% than SSE asm
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Wed Aug 5 11:55:59 CEST 2015
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1438767554 -19800
# Wed Aug 05 15:09:14 2015 +0530
# Node ID 377a996a8d74110f838ff2e3cef1c42781d6d730
# Parent 3eb2ec5922be1cd934dec7f7ed886d03c0125ef5
asm: updated avx2 algorithm for copy_ps 32xN & 64xN, improved over 45% than SSE asm
diff -r 3eb2ec5922be -r 377a996a8d74 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Aug 05 12:20:01 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Wed Aug 05 15:09:14 2015 +0530
@@ -3622,6 +3622,11 @@
if (cpuMask & X265_CPU_BMI2)
p.scanPosLast = PFX(scanPosLast_avx2_bmi2);
+ p.cu[BLOCK_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
+ p.chroma[X265_CSP_I420].cu[CHROMA_420_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
+ p.chroma[X265_CSP_I422].cu[CHROMA_422_32x64].copy_ps = PFX(blockcopy_ps_32x64_avx2);
+ p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
+
/* The following primitives have been disabled since performance compared to SSE is negligible/negative */
#if 0
p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_avx2);
@@ -3652,10 +3657,6 @@
p.cu[BLOCK_16x16].copy_sp = PFX(blockcopy_sp_16x16_avx2);
p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].copy_sp = PFX(blockcopy_sp_16x16_avx2);
p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].copy_sp = PFX(blockcopy_sp_16x32_avx2);
- p.cu[BLOCK_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
- p.chroma[X265_CSP_I420].cu[CHROMA_420_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
- p.chroma[X265_CSP_I422].cu[CHROMA_422_32x64].copy_ps = PFX(blockcopy_ps_32x64_avx2);
- p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_hpp = PFX(interp_4tap_horiz_pp_4x8_avx2);
p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_hpp = PFX(interp_4tap_horiz_pp_4x16_avx2);
p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
diff -r 3eb2ec5922be -r 377a996a8d74 source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm Wed Aug 05 12:20:01 2015 +0530
+++ b/source/common/x86/blockcopy8.asm Wed Aug 05 15:09:14 2015 +0530
@@ -3043,43 +3043,31 @@
;-----------------------------------------------------------------------------
%macro BLOCKCOPY_PS_W32_H4_avx2 2
INIT_YMM avx2
-cglobal blockcopy_ps_%1x%2, 4, 7, 3
+cglobal blockcopy_ps_%1x%2, 4, 7, 2
add r1, r1
mov r4d, %2/4
lea r5, [3 * r3]
lea r6, [3 * r1]
- pxor m0, m0
-
.loop:
- movu m1, [r2]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0], m3
- movu [r0 + 32], m2
- movu m1, [r2 + r3]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + r1], m3
- movu [r0 + r1 + 32], m2
- movu m1, [r2 + 2 * r3]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + 2 * r1], m3
- movu [r0 + 2 * r1 + 32], m2
- movu m1, [r2 + r5]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + r6], m3
- movu [r0 + r6 + 32], m2
-
+ pmovzxbw m0, [r2 + 0]
+ pmovzxbw m1, [r2 + 16]
+ movu [r0 + 0], m0
+ movu [r0 + 32], m1
+
+ pmovzxbw m0, [r2 + r3 + 0]
+ pmovzxbw m1, [r2 + r3 + 16]
+ movu [r0 + r1 + 0], m0
+ movu [r0 + r1 + 32], m1
+
+ pmovzxbw m0, [r2 + r3 * 2 + 0]
+ pmovzxbw m1, [r2 + r3 * 2 + 16]
+ movu [r0 + r1 * 2 + 0], m0
+ movu [r0 + r1 * 2 + 32], m1
+
+ pmovzxbw m0, [r2 + r5 + 0]
+ pmovzxbw m1, [r2 + r5 + 16]
+ movu [r0 + r6 + 0], m0
+ movu [r0 + r6 + 32], m1
lea r0, [r0 + 4 * r1]
lea r2, [r2 + 4 * r3]
dec r4d
@@ -3228,71 +3216,49 @@
INIT_YMM avx2
cglobal blockcopy_ps_64x64, 4, 7, 4
add r1, r1
- mov r4d, 64/4
+ mov r4d, 64/8
lea r5, [3 * r3]
lea r6, [3 * r1]
- pxor m0, m0
-
.loop:
- movu m1, [r2]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0], m3
- movu [r0 + 32], m2
- movu m1, [r2 + 32]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + 64], m3
- movu [r0 + 96], m2
- movu m1, [r2 + r3]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + r1], m3
- movu [r0 + r1 + 32], m2
- movu m1, [r2 + r3 + 32]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + r1 + 64], m3
- movu [r0 + r1 + 96], m2
- movu m1, [r2 + 2 * r3]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + 2 * r1], m3
- movu [r0 + 2 * r1 + 32], m2
- movu m1, [r2 + 2 * r3 + 32]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + 2 * r1 + 64], m3
- movu [r0 + 2 * r1 + 96], m2
- movu m1, [r2 + r5]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + r6], m3
- movu [r0 + r6 + 32], m2
- movu m1, [r2 + r5 + 32]
- punpcklbw m2, m1, m0
- punpckhbw m1, m1, m0
- vperm2i128 m3, m2, m1, 00100000b
- vperm2i128 m2, m2, m1, 00110001b
- movu [r0 + r6 + 64], m3
- movu [r0 + r6 + 96], m2
-
+%rep 2
+ pmovzxbw m0, [r2 + 0]
+ pmovzxbw m1, [r2 + 16]
+ pmovzxbw m2, [r2 + 32]
+ pmovzxbw m3, [r2 + 48]
+ movu [r0 + 0], m0
+ movu [r0 + 32], m1
+ movu [r0 + 64], m2
+ movu [r0 + 96], m3
+
+ pmovzxbw m0, [r2 + r3 + 0]
+ pmovzxbw m1, [r2 + r3 + 16]
+ pmovzxbw m2, [r2 + r3 + 32]
+ pmovzxbw m3, [r2 + r3 + 48]
+ movu [r0 + r1 + 0], m0
+ movu [r0 + r1 + 32], m1
+ movu [r0 + r1 + 64], m2
+ movu [r0 + r1 + 96], m3
+
+ pmovzxbw m0, [r2 + r3 * 2 + 0]
+ pmovzxbw m1, [r2 + r3 * 2 + 16]
+ pmovzxbw m2, [r2 + r3 * 2 + 32]
+ pmovzxbw m3, [r2 + r3 * 2 + 48]
+ movu [r0 + r1 * 2 + 0], m0
+ movu [r0 + r1 * 2 + 32], m1
+ movu [r0 + r1 * 2 + 64], m2
+ movu [r0 + r1 * 2 + 96], m3
+
+ pmovzxbw m0, [r2 + r5 + 0]
+ pmovzxbw m1, [r2 + r5 + 16]
+ pmovzxbw m2, [r2 + r5 + 32]
+ pmovzxbw m3, [r2 + r5 + 48]
+ movu [r0 + r6 + 0], m0
+ movu [r0 + r6 + 32], m1
+ movu [r0 + r6 + 64], m2
+ movu [r0 + r6 + 96], m3
lea r0, [r0 + 4 * r1]
lea r2, [r2 + 4 * r3]
+%endrep
dec r4d
jnz .loop
RET
More information about the x265-devel
mailing list