[x265] [PATCH] asm: avx2 assembly code for 8bpp version of transpose(8, 16, 32 and 64)

murugan at multicorewareinc.com murugan at multicorewareinc.com
Mon Oct 13 14:49:40 CEST 2014


# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1413204420 -19800
#      Mon Oct 13 18:17:00 2014 +0530
# Node ID b7779a54dc5592178ca507e03276130e463c3339
# Parent  f26e81eb555aa586380b34314c302ea9b148f357
asm: avx2 assembly code for 8bpp version of transpose(8, 16, 32 and 64)

diff -r f26e81eb555a -r b7779a54dc55 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Oct 13 14:36:40 2014 +0530
+++ b/source/common/x86/asm-primitives.cpp	Mon Oct 13 18:17:00 2014 +0530
@@ -1792,6 +1792,11 @@
         p.idct[IDCT_8x8] = x265_idct8_avx2;
         p.idct[IDCT_16x16] = x265_idct16_avx2;
         p.idct[IDCT_32x32] = x265_idct32_avx2;
+
+        p.transpose[BLOCK_8x8] = x265_transpose8_avx2;
+        p.transpose[BLOCK_16x16] = x265_transpose16_avx2;
+        p.transpose[BLOCK_32x32] = x265_transpose32_avx2;
+        p.transpose[BLOCK_64x64] = x265_transpose64_avx2;
 #endif
     }
 #endif // if HIGH_BIT_DEPTH
diff -r f26e81eb555a -r b7779a54dc55 source/common/x86/pixel-util.h
--- a/source/common/x86/pixel-util.h	Mon Oct 13 14:36:40 2014 +0530
+++ b/source/common/x86/pixel-util.h	Mon Oct 13 18:17:00 2014 +0530
@@ -44,6 +44,11 @@
 void x265_transpose32_sse2(pixel *dest, pixel *src, intptr_t stride);
 void x265_transpose64_sse2(pixel *dest, pixel *src, intptr_t stride);
 
+void x265_transpose8_avx2(pixel *dest, pixel *src, intptr_t stride);
+void x265_transpose16_avx2(pixel *dest, pixel *src, intptr_t stride);
+void x265_transpose32_avx2(pixel *dest, pixel *src, intptr_t stride);
+void x265_transpose64_avx2(pixel *dest, pixel *src, intptr_t stride);
+
 uint32_t x265_quant_sse4(int32_t *coef, int32_t *quantCoeff, int32_t *deltaU, int16_t *qCoef, int qBits, int add, int numCoeff);
 uint32_t x265_quant_avx2(int32_t *coef, int32_t *quantCoeff, int32_t *deltaU, int16_t *qCoef, int qBits, int add, int numCoeff);
 uint32_t x265_nquant_sse4(int32_t *coef, int32_t *quantCoeff, int16_t *qCoef, int qBits, int add, int numCoeff);
diff -r f26e81eb555a -r b7779a54dc55 source/common/x86/pixel-util8.asm
--- a/source/common/x86/pixel-util8.asm	Mon Oct 13 14:36:40 2014 +0530
+++ b/source/common/x86/pixel-util8.asm	Mon Oct 13 18:17:00 2014 +0530
@@ -48,6 +48,8 @@
 hmulw_16p:  times 8 dw 1
             times 4 dw 1, -1
 
+trans8_shuf: dd 0, 4, 1, 5, 2, 6, 3, 7
+
 SECTION .text
 
 cextern pw_1
@@ -1530,7 +1532,42 @@
     mov    r3,    r0
     mov    r5,    16
     call   transpose8_internal
+    RET
 %else
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose8, 3, 4, 4
+    lea          r3, [r2 * 3]
+    movq         xm0, [r1]
+    movhps       xm0, [r1 + 2 * r2]
+    movq         xm1, [r1 + r2]
+    movhps       xm1, [r1 + r3]
+    lea          r1, [r1 + 4 * r2]
+    movq         xm2, [r1]
+    movhps       xm2, [r1 + 2 * r2]
+    movq         xm3, [r1 + r2]
+    movhps       xm3, [r1 + r3]
+
+    vinserti128  m0, m0, xm2, 1             ;[row1 row3 row5 row7]
+    vinserti128  m1, m1, xm3, 1             ;[row2 row4 row6 row8]
+
+    punpcklbw    m2, m0, m1                 ;[1 - 8; 1 - 8][row1row2; row5row6]
+    punpckhbw    m0, m1                     ;[1 - 8; 1 - 8][row3row4; row7row8]
+
+    punpcklwd    m1, m2, m0                 ;[1 - 4; 1 - 4][row1row2row3row4; row5row6row7row8]
+    punpckhwd    m2, m0                     ;[5 - 8; 5 - 8][row1row2row3row4; row5row6row7row8]
+
+    mova         m0, [trans8_shuf]
+
+    vpermd       m1, m0, m1                 ;[1 - 2; 3 - 4][row1row2row3row4row5row6row7row8]
+    vpermd       m2, m0, m2                 ;[4 - 5; 6 - 7][row1row2row3row4row5row6row7row8]
+
+    movu         [r0], m1
+    movu         [r0 + 32], m2
+    RET
+%endif
+
+INIT_XMM sse2
 cglobal transpose8, 3, 5, 8, dest, src, stride
     lea          r3,    [2 * r2]
     lea          r4,    [3 * r2]
@@ -1562,8 +1599,8 @@
     movu         [r0 + 16],    m2
     movu         [r0 + 32],    m1
     movu         [r0 + 48],    m3
+    RET
 %endif
-    RET
 
 %macro TRANSPOSE_8x8 1
 
@@ -1632,8 +1669,90 @@
     lea    r0,    [r6 + 8 * r5 + 16]
     mov    r3,    r0
     call   transpose8_internal
-
+    RET
 %else
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose16, 3, 5, 9
+    lea          r3, [r2 * 3]
+    lea          r4, [r1 + 8 * r2]
+
+    movu         xm0, [r1]
+    movu         xm1, [r1 + r2]
+    movu         xm2, [r1 + 2 * r2]
+    movu         xm3, [r1 + r3]
+    vinserti128  m0,  m0, [r4], 1
+    vinserti128  m1,  m1, [r4 + r2], 1
+    vinserti128  m2,  m2, [r4 + 2 * r2], 1
+    vinserti128  m3,  m3, [r4 + r3], 1
+    lea          r1,  [r1 + 4 * r2]
+    lea          r4,  [r4 + 4 * r2]
+
+    movu         xm4, [r1]
+    movu         xm5, [r1 + r2]
+    movu         xm6, [r1 + 2 * r2]
+    movu         xm7, [r1 + r3]
+    vinserti128  m4,  m4, [r4], 1
+    vinserti128  m5,  m5, [r4 + r2], 1
+    vinserti128  m6,  m6, [r4 + 2 * r2], 1
+    vinserti128  m7,  m7, [r4 + r3], 1
+
+    punpcklbw    m8, m0, m1                 ;[1 - 8 ; 1 - 8 ][1 2  9 10]
+    punpckhbw    m0, m1                     ;[9 - 16; 9 - 16][1 2  9 10]
+
+    punpcklbw    m1, m2, m3                 ;[1 - 8 ; 1 - 8 ][3 4 11 12]
+    punpckhbw    m2, m3                     ;[9 - 16; 9 - 16][3 4 11 12]
+
+    punpcklbw    m3, m4, m5                 ;[1 - 8 ; 1 - 8 ][5 6 13 14]
+    punpckhbw    m4, m5                     ;[9 - 16; 9 - 16][5 6 13 14]
+
+    punpcklbw    m5, m6, m7                 ;[1 - 8 ; 1 - 8 ][7 8 15 16]
+    punpckhbw    m6, m7                     ;[9 - 16; 9 - 16][7 8 15 16]
+
+    punpcklwd    m7, m8, m1                 ;[1 - 4 ; 1 - 4][1 2 3 4 9 10 11 12]
+    punpckhwd    m8, m1                     ;[5 - 8 ; 5 - 8][1 2 3 4 9 10 11 12]
+
+    punpcklwd    m1, m3, m5                 ;[1 - 4 ; 1 - 4][5 6 7 8 13 14 15 16]
+    punpckhwd    m3, m5                     ;[5 - 8 ; 5 - 8][5 6 7 8 13 14 15 16]
+
+    punpcklwd    m5, m0, m2                 ;[9 - 12; 9 -  12][1 2 3 4 9 10 11 12]
+    punpckhwd    m0, m2                     ;[13- 16; 13 - 16][1 2 3 4 9 10 11 12]
+
+    punpcklwd    m2, m4, m6                 ;[9 - 12; 9 -  12][5 6 7 8 13 14 15 16]
+    punpckhwd    m4, m6                     ;[13- 16; 13 - 16][5 6 7 8 13 14 15 16]
+
+    punpckldq    m6, m7, m1                 ;[1 - 2 ; 1 - 2][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhdq    m7, m1                     ;[3 - 4 ; 3 - 4][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpckldq    m1, m8, m3                 ;[5 - 6 ; 5 - 6][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhdq    m8, m3                     ;[7 - 8 ; 7 - 8][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpckldq    m3, m5, m2                 ;[9 - 10; 9 -  10][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhdq    m5, m2                     ;[11- 12; 11 - 12][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpckldq    m2, m0, m4                 ;[13- 14; 13 - 14][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhdq    m0, m4                     ;[15- 16; 15 - 16][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    vpermq       m6, m6, 0xD8
+    vpermq       m7, m7, 0xD8
+    vpermq       m1, m1, 0xD8
+    vpermq       m8, m8, 0xD8
+    vpermq       m3, m3, 0xD8
+    vpermq       m5, m5, 0xD8
+    vpermq       m2, m2, 0xD8
+    vpermq       m0, m0, 0xD8
+
+    movu         [r0 + 0 *  16], m6
+    movu         [r0 + 2 *  16], m7
+    movu         [r0 + 4 *  16], m1
+    movu         [r0 + 6 *  16], m8
+    movu         [r0 + 8 *  16], m3
+    movu         [r0 + 10 * 16], m5
+    movu         [r0 + 12 * 16], m2
+    movu         [r0 + 14 * 16], m0
+    RET
+%endif
+INIT_XMM sse2
 cglobal transpose16, 3, 5, 8, dest, src, stride
     mov    r3,    r0
     mov    r4,    r1
@@ -1647,8 +1766,8 @@
     lea    r1,    [r1 + 2 * r2]
     lea    r0,    [r3 + 8 * 16 + 8]
     TRANSPOSE_8x8 16
+    RET
 %endif
-    RET
 
 cglobal transpose16_internal
     TRANSPOSE_8x8 r6
@@ -1670,8 +1789,8 @@
 ;-----------------------------------------------------------------
 ; void transpose_32x32(pixel *dst, pixel *src, intptr_t stride)
 ;-----------------------------------------------------------------
+%if HIGH_BIT_DEPTH
 INIT_XMM sse2
-%if HIGH_BIT_DEPTH
 cglobal transpose32, 3, 7, 4, dest, src, stride
     add    r2,    r2
     mov    r3,    r0
@@ -1739,7 +1858,9 @@
     lea    r0,    [r6 + 24 * 64 + 48]
     mov    r3,    r0
     call   transpose8_internal
+    RET
 %else
+INIT_XMM sse2
 cglobal transpose32, 3, 7, 8, dest, src, stride
     mov    r3,    r0
     mov    r4,    r1
@@ -1758,8 +1879,193 @@
     lea    r0,    [r3 + 16 * 32 + 16]
     mov    r5,    r0
     call   transpose16_internal
+    RET
+
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+cglobal transpose32, 3, 5, 16
+    lea          r3, [r2 * 3]
+    mov          r4d, 2
+
+.loop:
+    movu         m0, [r1]
+    movu         m1, [r1 + r2]
+    movu         m2, [r1 + 2 * r2]
+    movu         m3, [r1 + r3]
+    lea          r1, [r1 + 4 * r2]
+
+    movu         m4, [r1]
+    movu         m5, [r1 + r2]
+    movu         m6, [r1 + 2 * r2]
+    movu         m7, [r1 + r3]
+
+    punpcklbw    m8, m0, m1                 ;[1 - 8 ; 17 - 24][1 2]
+    punpckhbw    m0, m1                     ;[9 - 16; 25 - 32][1 2]
+
+    punpcklbw    m1, m2, m3                 ;[1 - 8 ; 17 - 24][3 4]
+    punpckhbw    m2, m3                     ;[9 - 16; 25 - 32][3 4]
+
+    punpcklbw    m3, m4, m5                 ;[1 - 8 ; 17 - 24][5 6]
+    punpckhbw    m4, m5                     ;[9 - 16; 25 - 32][5 6]
+
+    punpcklbw    m5, m6, m7                 ;[1 - 8 ; 17 - 24][7 8]
+    punpckhbw    m6, m7                     ;[9 - 16; 25 - 32][7 8]
+
+    punpcklwd    m7, m8, m1                 ;[1 - 4 ; 17 - 20][1 2 3 4]
+    punpckhwd    m8, m1                     ;[5 - 8 ; 20 - 24][1 2 3 4]
+
+    punpcklwd    m1, m3, m5                 ;[1 - 4 ; 17 - 20][5 6 7 8]
+    punpckhwd    m3, m5                     ;[5 - 8 ; 20 - 24][5 6 7 8]
+
+    punpcklwd    m5, m0, m2                 ;[9 - 12; 25 - 28][1 2 3 4]
+    punpckhwd    m0, m2                     ;[13- 15; 29 - 32][1 2 3 4]
+
+    punpcklwd    m2, m4, m6                 ;[9 - 12; 25 - 28][5 6 7 8]
+    punpckhwd    m4, m6                     ;[13- 15; 29 - 32][5 6 7 8]
+
+    punpckldq    m6, m7, m1                 ;[1 - 2 ; 17 - 18][1 2 3 4 5 6 7 8]
+    punpckhdq    m7, m1                     ;[3 - 4 ; 19 - 20][1 2 3 4 5 6 7 8]
+
+    punpckldq    m1, m8, m3                 ;[5 - 6 ; 21 - 22][1 2 3 4 5 6 7 8]
+    punpckhdq    m8, m3                     ;[7 - 8 ; 23 - 24][1 2 3 4 5 6 7 8]
+
+    punpckldq    m3, m5, m2                 ;[9 - 10; 25 - 26][1 2 3 4 5 6 7 8]
+    punpckhdq    m5, m2                     ;[11- 12; 27 - 28][1 2 3 4 5 6 7 8]
+
+    punpckldq    m2, m0, m4                 ;[13- 14; 29 - 30][1 2 3 4 5 6 7 8]
+    punpckhdq    m0, m4                     ;[15- 16; 31 - 32][1 2 3 4 5 6 7 8]
+
+    movq         [r0 + 0  * 32], xm6
+    movhps       [r0 + 1  * 32], xm6
+    vextracti128 xm4, m6, 1
+    movq         [r0 + 16 * 32], xm4
+    movhps       [r0 + 17 * 32], xm4
+
+    lea          r1,  [r1 + 4 * r2]
+    movu         m9,  [r1]
+    movu         m10, [r1 + r2]
+    movu         m11, [r1 + 2 * r2]
+    movu         m12, [r1 + r3]
+    lea          r1,  [r1 + 4 * r2]
+
+    movu         m13, [r1]
+    movu         m14, [r1 + r2]
+    movu         m15, [r1 + 2 * r2]
+    movu         m6,  [r1 + r3]
+
+    punpcklbw    m4, m9, m10                ;[1 - 8 ; 17 - 24][9 10]
+    punpckhbw    m9, m10                    ;[9 - 16; 25 - 32][9 10]
+
+    punpcklbw    m10, m11, m12              ;[1 - 8 ; 17 - 24][11 12]
+    punpckhbw    m11, m12                   ;[9 - 16; 25 - 32][11 12]
+
+    punpcklbw    m12, m13, m14              ;[1 - 8 ; 17 - 24][13 14]
+    punpckhbw    m13, m14                   ;[9 - 16; 25 - 32][13 14]
+
+    punpcklbw    m14, m15, m6               ;[1 - 8 ; 17 - 24][15 16]
+    punpckhbw    m15, m6                    ;[9 - 16; 25 - 32][15 16]
+
+    punpcklwd    m6, m4, m10                ;[1 - 4 ; 17 - 20][9 10 11 12]
+    punpckhwd    m4, m10                    ;[5 - 8 ; 20 - 24][9 10 11 12]
+
+    punpcklwd    m10, m12, m14              ;[1 - 4 ; 17 - 20][13 14 15 16]
+    punpckhwd    m12, m14                   ;[5 - 8 ; 20 - 24][13 14 15 16]
+
+    punpcklwd    m14, m9, m11               ;[9 - 12; 25 - 28][9 10 11 12]
+    punpckhwd    m9, m11                    ;[13- 16; 29 - 32][9 10 11 12]
+
+    punpcklwd    m11, m13, m15              ;[9 - 12; 25 - 28][13 14 15 16]
+    punpckhwd    m13, m15                   ;[13- 16; 29 - 32][13 14 15 16]
+
+    punpckldq    m15, m6, m10               ;[1 - 2 ; 17 - 18][9 10 11 12 13 14 15 16]
+    punpckhdq    m6, m10                    ;[3 - 4 ; 19 - 20][9 10 11 12 13 14 15 16]
+
+    punpckldq    m10, m4, m12               ;[5 - 6 ; 21 - 22][9 10 11 12 13 14 15 16]
+    punpckhdq    m4, m12                    ;[7 - 8 ; 23 - 24][9 10 11 12 13 14 15 16]
+
+    punpckldq    m12, m14, m11              ;[9 - 10; 25 - 26][9 10 11 12 13 14 15 16]
+    punpckhdq    m14, m11                   ;[11- 12; 27 - 28][9 10 11 12 13 14 15 16]
+
+    punpckldq    m11, m9, m13               ;[13- 14; 29 - 30][9 10 11 12 13 14 15 16]
+    punpckhdq    m9, m13                    ;[15- 16; 31 - 32][9 10 11 12 13 14 15 16]
+
+
+    punpcklqdq   m13, m7, m6                ;[3 ; 19][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m7, m6                     ;[4 ; 20][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m6, m1, m10                ;[5 ; 21][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m1, m10                    ;[6 ; 22][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m10, m8, m4                ;[7 ; 23][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m8, m4                     ;[8 ; 24][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m4, m3, m12                ;[9 ; 25][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m3, m12                    ;[10; 26][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m12, m5, m14               ;[11; 27][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m5, m14                    ;[12; 28][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m14, m2, m11               ;[13; 29][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m2, m11                    ;[14; 30][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m11, m0, m9                ;[15; 31][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m0, m9                     ;[16; 32][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    movq         [r0 + 0  * 32 + 8], xm15
+    movhps       [r0 + 1  * 32 + 8], xm15
+    vextracti128 xm9, m15, 1
+    movq         [r0 + 16 * 32 + 8], xm9
+    movhps       [r0 + 17 * 32 + 8], xm9
+
+    movu         [r0 + 2  * 32], xm13
+    vextracti128 [r0 + 18 * 32], m13, 1
+
+    movu         [r0 + 3  * 32], xm7
+    vextracti128 [r0 + 19 * 32], m7, 1
+
+    movu         [r0 + 4  * 32], xm6
+    vextracti128 [r0 + 20 * 32], m6, 1
+
+    movu         [r0 + 5  * 32], xm1
+    vextracti128 [r0 + 21 * 32], m1, 1
+
+    movu         [r0 + 6  * 32], xm10
+    vextracti128 [r0 + 22 * 32], m10, 1
+
+    movu         [r0 + 7  * 32], xm8
+    vextracti128 [r0 + 23 * 32], m8, 1
+
+    movu         [r0 + 8  * 32], xm4
+    vextracti128 [r0 + 24 * 32], m4, 1
+
+    movu         [r0 + 9  * 32], xm3
+    vextracti128 [r0 + 25 * 32], m3, 1
+
+    movu         [r0 + 10 * 32], xm12
+    vextracti128 [r0 + 26 * 32], m12, 1
+
+    movu         [r0 + 11 * 32], xm5
+    vextracti128 [r0 + 27 * 32], m5, 1
+
+    movu         [r0 + 12 * 32], xm14
+    vextracti128 [r0 + 28 * 32], m14, 1
+
+    movu         [r0 + 13 * 32], xm2
+    vextracti128 [r0 + 29 * 32], m2, 1
+
+    movu         [r0 + 14 * 32], xm11
+    vextracti128 [r0 + 30 * 32], m11, 1
+
+    movu         [r0 + 15 * 32], xm0
+    vextracti128 [r0 + 31 * 32], m0, 1
+
+    add          r0, 16
+    lea          r1,  [r1 + 4 * r2]
+    dec          r4d
+    jnz          .loop
+    RET
 %endif
-    RET
+%endif
 
 ;-----------------------------------------------------------------
 ; void transpose_64x64(pixel *dst, pixel *src, intptr_t stride)
@@ -2032,7 +2338,225 @@
     lea    r0,    [r6 + 56 * 128 + 112]
     mov    r3,    r0
     call   transpose8_internal
+    RET
 %else
+%if ARCH_X86_64 == 1
+INIT_YMM avx2
+
+cglobal transpose16x32_avx2
+    movu         m0, [r1]
+    movu         m1, [r1 + r2]
+    movu         m2, [r1 + 2 * r2]
+    movu         m3, [r1 + r3]
+    lea          r1, [r1 + 4 * r2]
+
+    movu         m4, [r1]
+    movu         m5, [r1 + r2]
+    movu         m6, [r1 + 2 * r2]
+    movu         m7, [r1 + r3]
+
+    punpcklbw    m8, m0, m1                 ;[1 - 8 ; 17 - 24][1 2]
+    punpckhbw    m0, m1                     ;[9 - 16; 25 - 32][1 2]
+
+    punpcklbw    m1, m2, m3                 ;[1 - 8 ; 17 - 24][3 4]
+    punpckhbw    m2, m3                     ;[9 - 16; 25 - 32][3 4]
+
+    punpcklbw    m3, m4, m5                 ;[1 - 8 ; 17 - 24][5 6]
+    punpckhbw    m4, m5                     ;[9 - 16; 25 - 32][5 6]
+
+    punpcklbw    m5, m6, m7                 ;[1 - 8 ; 17 - 24][7 8]
+    punpckhbw    m6, m7                     ;[9 - 16; 25 - 32][7 8]
+
+    punpcklwd    m7, m8, m1                 ;[1 - 4 ; 17 - 20][1 2 3 4]
+    punpckhwd    m8, m1                     ;[5 - 8 ; 20 - 24][1 2 3 4]
+
+    punpcklwd    m1, m3, m5                 ;[1 - 4 ; 17 - 20][5 6 7 8]
+    punpckhwd    m3, m5                     ;[5 - 8 ; 20 - 24][5 6 7 8]
+
+    punpcklwd    m5, m0, m2                 ;[9 - 12; 25 - 28][1 2 3 4]
+    punpckhwd    m0, m2                     ;[12- 15; 29 - 32][1 2 3 4]
+
+    punpcklwd    m2, m4, m6                 ;[9 - 12; 25 - 28][5 6 7 8]
+    punpckhwd    m4, m6                     ;[12- 15; 29 - 32][5 6 7 8]
+
+    punpckldq    m6, m7, m1                 ;[1 - 2 ; 17 - 18][1 2 3 4 5 6 7 8]
+    punpckhdq    m7, m1                     ;[3 - 4 ; 19 - 20][1 2 3 4 5 6 7 8]
+
+    punpckldq    m1, m8, m3                 ;[5 - 6 ; 21 - 22][1 2 3 4 5 6 7 8]
+    punpckhdq    m8, m3                     ;[7 - 8 ; 23 - 24][1 2 3 4 5 6 7 8]
+
+    punpckldq    m3, m5, m2                 ;[9 - 10; 25 - 26][1 2 3 4 5 6 7 8]
+    punpckhdq    m5, m2                     ;[11- 12; 27 - 28][1 2 3 4 5 6 7 8]
+
+    punpckldq    m2, m0, m4                 ;[13- 14; 29 - 30][1 2 3 4 5 6 7 8]
+    punpckhdq    m0, m4                     ;[15- 16; 31 - 32][1 2 3 4 5 6 7 8]
+
+    movq         [r0 + 0  * 64], xm6
+    movhps       [r0 + 1  * 64], xm6
+    vextracti128 xm4, m6, 1
+    movq         [r0 + 16 * 64], xm4
+    movhps       [r0 + 17 * 64], xm4
+
+    lea          r1,  [r1 + 4 * r2]
+    movu         m9,  [r1]
+    movu         m10, [r1 + r2]
+    movu         m11, [r1 + 2 * r2]
+    movu         m12, [r1 + r3]
+    lea          r1,  [r1 + 4 * r2]
+
+    movu         m13, [r1]
+    movu         m14, [r1 + r2]
+    movu         m15, [r1 + 2 * r2]
+    movu         m6,  [r1 + r3]
+
+    punpcklbw    m4, m9, m10                ;[1 - 8 ; 17 - 24][9 10]
+    punpckhbw    m9, m10                    ;[9 - 16; 25 - 32][9 10]
+
+    punpcklbw    m10, m11, m12              ;[1 - 8 ; 17 - 24][11 12]
+    punpckhbw    m11, m12                   ;[9 - 16; 25 - 32][11 12]
+
+    punpcklbw    m12, m13, m14              ;[1 - 8 ; 17 - 24][13 14]
+    punpckhbw    m13, m14                   ;[9 - 16; 25 - 32][13 14]
+
+    punpcklbw    m14, m15, m6               ;[1 - 8 ; 17 - 24][15 16]
+    punpckhbw    m15, m6                    ;[9 - 16; 25 - 32][15 16]
+
+    punpcklwd    m6, m4, m10                ;[1 - 4 ; 17 - 20][9 10 11 12]
+    punpckhwd    m4, m10                    ;[5 - 8 ; 20 - 24][9 10 11 12]
+
+    punpcklwd    m10, m12, m14              ;[1 - 4 ; 17 - 20][13 14 15 16]
+    punpckhwd    m12, m14                   ;[5 - 8 ; 20 - 24][13 14 15 16]
+
+    punpcklwd    m14, m9, m11               ;[9 - 12; 25 - 28][9 10 11 12]
+    punpckhwd    m9, m11                    ;[13- 16; 29 - 32][9 10 11 12]
+
+    punpcklwd    m11, m13, m15              ;[9 - 12; 25 - 28][13 14 15 16]
+    punpckhwd    m13, m15                   ;[13- 16; 29 - 32][13 14 15 16]
+
+    punpckldq    m15, m6, m10               ;[1 - 2 ; 17 - 18][9 10 11 12 13 14 15 16]
+    punpckhdq    m6, m10                    ;[3 - 4 ; 19 - 20][9 10 11 12 13 14 15 16]
+
+    punpckldq    m10, m4, m12               ;[5 - 6 ; 21 - 22][9 10 11 12 13 14 15 16]
+    punpckhdq    m4, m12                    ;[7 - 8 ; 23 - 24][9 10 11 12 13 14 15 16]
+
+    punpckldq    m12, m14, m11              ;[9 - 10; 25 - 26][9 10 11 12 13 14 15 16]
+    punpckhdq    m14, m11                   ;[11- 12; 27 - 28][9 10 11 12 13 14 15 16]
+
+    punpckldq    m11, m9, m13               ;[13- 14; 29 - 30][9 10 11 12 13 14 15 16]
+    punpckhdq    m9, m13                    ;[15- 16; 31 - 32][9 10 11 12 13 14 15 16]
+
+
+    punpcklqdq   m13, m7, m6                ;[3 ; 19][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m7, m6                     ;[4 ; 20][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m6, m1, m10                ;[5 ; 21][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m1, m10                    ;[6 ; 22][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m10, m8, m4                ;[7 ; 23][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m8, m4                     ;[8 ; 24][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m4, m3, m12                ;[9 ; 25][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m3, m12                    ;[10; 26][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m12, m5, m14               ;[11; 27][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m5, m14                    ;[12; 28][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m14, m2, m11               ;[13; 29][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m2, m11                    ;[14; 30][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    punpcklqdq   m11, m0, m9                ;[15; 31][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+    punpckhqdq   m0, m9                     ;[16; 32][1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
+
+    movq         [r0 + 0  * 64 + 8], xm15
+    movhps       [r0 + 1  * 64 + 8], xm15
+    vextracti128 xm9, m15, 1
+    movq         [r0 + 16 * 64 + 8], xm9
+    movhps       [r0 + 17 * 64 + 8], xm9
+
+    movu         [r0 + 2  * 64], xm13
+    vextracti128 [r0 + 18 * 64], m13, 1
+
+    movu         [r0 + 3  * 64], xm7
+    vextracti128 [r0 + 19 * 64], m7, 1
+
+    movu         [r0 + 4  * 64], xm6
+    vextracti128 [r0 + 20 * 64], m6, 1
+
+    movu         [r0 + 5  * 64], xm1
+    vextracti128 [r0 + 21 * 64], m1, 1
+
+    movu         [r0 + 6  * 64], xm10
+    vextracti128 [r0 + 22 * 64], m10, 1
+
+    movu         [r0 + 7  * 64], xm8
+    vextracti128 [r0 + 23 * 64], m8, 1
+
+    movu         [r0 + 8  * 64], xm4
+    vextracti128 [r0 + 24 * 64], m4, 1
+
+    movu         [r0 + 9  * 64], xm3
+    vextracti128 [r0 + 25 * 64], m3, 1
+
+    movu         [r0 + 10 * 64], xm12
+    vextracti128 [r0 + 26 * 64], m12, 1
+
+    movu         [r0 + 11 * 64], xm5
+    vextracti128 [r0 + 27 * 64], m5, 1
+
+    movu         [r0 + 12 * 64], xm14
+    vextracti128 [r0 + 28 * 64], m14, 1
+
+    movu         [r0 + 13 * 64], xm2
+    vextracti128 [r0 + 29 * 64], m2, 1
+
+    movu         [r0 + 14 * 64], xm11
+    vextracti128 [r0 + 30 * 64], m11, 1
+
+    movu         [r0 + 15 * 64], xm0
+    vextracti128 [r0 + 31 * 64], m0, 1
+    ret
+
+cglobal transpose64, 3, 6, 16
+
+    lea          r3, [r2 * 3]
+    lea          r4, [r0 + 16]
+
+    lea          r5, [r1 + 32]
+    call         transpose16x32_avx2
+    lea          r0, [r0 + 32 * 64]
+    mov          r1, r5
+    call         transpose16x32_avx2
+
+    mov          r0, r4
+    lea          r5, [r1 + 4 * r2]
+
+    lea          r1, [r5 - 32]
+    call         transpose16x32_avx2
+    lea          r0, [r0 + 32 * 64]
+    mov          r1, r5
+    call         transpose16x32_avx2
+
+    lea          r0, [r4 + 16]
+    lea          r5, [r1 + 4 * r2]
+
+    lea          r1, [r5 - 32]
+    call         transpose16x32_avx2
+    lea          r0, [r0 + 32 * 64]
+    mov          r1, r5
+    call         transpose16x32_avx2
+
+    lea          r5, [r1 + 4 * r2]
+    lea          r0, [r4 + 32]
+
+    lea          r1, [r5 - 32]
+    call         transpose16x32_avx2
+    lea          r0, [r0 + 32 * 64]
+    mov          r1, r5
+    call         transpose16x32_avx2
+    RET
+%endif
+
+INIT_XMM sse2
 cglobal transpose64, 3, 7, 8, dest, src, stride
     mov    r3,    r0
     mov    r4,    r1
@@ -2102,8 +2626,9 @@
     lea    r0,    [r3 + 48 * 64 + 48]
     mov    r5,    r0
     call   transpose16_internal
+    RET
 %endif
-    RET
+
 
 ;=============================================================================
 ; SSIM


More information about the x265-devel mailing list