[x265] [PATCH] asm: updated avx2 algorithm for copy_ps 32xN & 64xN, improved over 45% than SSE asm

dnyaneshwar at multicorewareinc.com dnyaneshwar at multicorewareinc.com
Mon Aug 3 14:38:13 CEST 2015


# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1438604844 -19800
#      Mon Aug 03 17:57:24 2015 +0530
# Node ID 8b8e6ff120954c6e65a314e01aa1d98183e8c62a
# Parent  21844b770c96b1773cbad5953e1f52ce8b900865
asm: updated avx2 algorithm for copy_ps 32xN & 64xN, improved over 45% than SSE asm

diff -r 21844b770c96 -r 8b8e6ff12095 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Aug 03 17:00:06 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Mon Aug 03 17:57:24 2015 +0530
@@ -3018,11 +3018,11 @@
         p.chroma[X265_CSP_I420].cu[CHROMA_420_16x16].copy_ps = PFX(blockcopy_ps_16x16_avx2);
         p.chroma[X265_CSP_I422].cu[CHROMA_422_16x32].copy_ps = PFX(blockcopy_ps_16x32_avx2);
 
-        //p.cu[BLOCK_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
-        //p.chroma[X265_CSP_I420].cu[CHROMA_420_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
-        //p.chroma[X265_CSP_I422].cu[CHROMA_422_32x64].copy_ps = PFX(blockcopy_ps_32x64_avx2);
-
-        //p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
+        p.cu[BLOCK_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
+        p.chroma[X265_CSP_I420].cu[CHROMA_420_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
+        p.chroma[X265_CSP_I422].cu[CHROMA_422_32x64].copy_ps = PFX(blockcopy_ps_32x64_avx2);
+
+        p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
 
         ALL_LUMA_TU_S(dct, dct, avx2);
         ALL_LUMA_TU_S(idct, idct, avx2);
diff -r 21844b770c96 -r 8b8e6ff12095 source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm	Mon Aug 03 17:00:06 2015 +0530
+++ b/source/common/x86/blockcopy8.asm	Mon Aug 03 17:57:24 2015 +0530
@@ -3043,42 +3043,32 @@
 ;-----------------------------------------------------------------------------
 %macro BLOCKCOPY_PS_W32_H4_avx2 2
 INIT_YMM avx2
-cglobal blockcopy_ps_%1x%2, 4, 7, 3
+cglobal blockcopy_ps_%1x%2, 4, 7, 2
     add     r1, r1
     mov     r4d, %2/4
     lea     r5, [3 * r3]
     lea     r6, [3 * r1]
-    pxor    m0, m0
 
 .loop:
-    movu          m1, [r2]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0], m3
-    movu          [r0 + 32], m2
-    movu          m1, [r2 + r3]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + r1], m3
-    movu          [r0 + r1 + 32], m2
-    movu          m1, [r2 + 2 * r3]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + 2 * r1], m3
-    movu          [r0 + 2 * r1 + 32], m2
-    movu          m1, [r2 + r5]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + r6], m3
-    movu          [r0 + r6 + 32], m2
+    pmovzxbw      m0, [r2 +  0]
+    pmovzxbw      m1, [r2 + 16]
+    movu          [r0 +  0], m0
+    movu          [r0 + 32], m1
+
+    pmovzxbw      m0, [r2 + r3 +  0]
+    pmovzxbw      m1, [r2 + r3 + 16]
+    movu          [r0 + r1 +  0], m0
+    movu          [r0 + r1 + 32], m1
+
+    pmovzxbw      m0, [r2 + r3 * 2 +  0]
+    pmovzxbw      m1, [r2 + r3 * 2 + 16]
+    movu          [r0 + r1 * 2 +  0], m0
+    movu          [r0 + r1 * 2 + 32], m1
+
+    pmovzxbw      m0, [r2 + r5 +  0]
+    pmovzxbw      m1, [r2 + r5 + 16]
+    movu          [r0 + r6 +  0], m0
+    movu          [r0 + r6 + 32], m1
 
     lea           r0, [r0 + 4 * r1]
     lea           r2, [r2 + 4 * r3]
@@ -3228,71 +3218,51 @@
 INIT_YMM avx2
 cglobal blockcopy_ps_64x64, 4, 7, 4
     add     r1, r1
-    mov     r4d, 64/4
+    mov     r4d, 64/8
     lea     r5, [3 * r3]
     lea     r6, [3 * r1]
-    pxor    m0, m0
 
 .loop:
-    movu          m1, [r2]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0], m3
-    movu          [r0 + 32], m2
-    movu          m1, [r2 + 32]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + 64], m3
-    movu          [r0 + 96], m2
-    movu          m1, [r2 + r3]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + r1], m3
-    movu          [r0 + r1 + 32], m2
-    movu          m1, [r2 + r3 + 32]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + r1 + 64], m3
-    movu          [r0 + r1 + 96], m2
-    movu          m1, [r2 + 2 * r3]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + 2 * r1], m3
-    movu          [r0 + 2 * r1 + 32], m2
-    movu          m1, [r2 + 2 * r3 + 32]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + 2 * r1 + 64], m3
-    movu          [r0 + 2 * r1 + 96], m2
-    movu          m1, [r2 + r5]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + r6], m3
-    movu          [r0 + r6 + 32], m2
-    movu          m1, [r2 + r5 + 32]
-    punpcklbw     m2, m1, m0
-    punpckhbw     m1, m1, m0
-    vperm2i128    m3, m2, m1, 00100000b
-    vperm2i128    m2, m2, m1, 00110001b
-    movu          [r0 + r6 + 64], m3
-    movu          [r0 + r6 + 96], m2
+%rep 2
+    pmovzxbw      m0, [r2 +  0]
+    pmovzxbw      m1, [r2 + 16]
+    pmovzxbw      m2, [r2 + 32]
+    pmovzxbw      m3, [r2 + 48]
+    movu          [r0 +  0], m0
+    movu          [r0 + 32], m1
+    movu          [r0 + 64], m2
+    movu          [r0 + 96], m3
+
+    pmovzxbw      m0, [r2 + r3 +  0]
+    pmovzxbw      m1, [r2 + r3 + 16]
+    pmovzxbw      m2, [r2 + r3 + 32]
+    pmovzxbw      m3, [r2 + r3 + 48]
+    movu          [r0 + r1 +  0], m0
+    movu          [r0 + r1 + 32], m1
+    movu          [r0 + r1 + 64], m2
+    movu          [r0 + r1 + 96], m3
+
+    pmovzxbw      m0, [r2 + r3 * 2 +  0]
+    pmovzxbw      m1, [r2 + r3 * 2 + 16]
+    pmovzxbw      m2, [r2 + r3 * 2 + 32]
+    pmovzxbw      m3, [r2 + r3 * 2 + 48]
+    movu          [r0 + r1 * 2 +  0], m0
+    movu          [r0 + r1 * 2 + 32], m1
+    movu          [r0 + r1 * 2 + 64], m2
+    movu          [r0 + r1 * 2 + 96], m3
+
+    pmovzxbw      m0, [r2 + r5 +  0]
+    pmovzxbw      m1, [r2 + r5 + 16]
+    pmovzxbw      m2, [r2 + r5 + 32]
+    pmovzxbw      m3, [r2 + r5 + 48]
+    movu          [r0 + r6 +  0], m0
+    movu          [r0 + r6 + 32], m1
+    movu          [r0 + r6 + 64], m2
+    movu          [r0 + r6 + 96], m3
 
     lea           r0, [r0 + 4 * r1]
     lea           r2, [r2 + 4 * r3]
+%endrep
     dec           r4d
     jnz           .loop
     RET


More information about the x265-devel mailing list