[x265] [PATCH 1 of 5] asm: AVX2 asm for intra_ang_32 mode 9, improved over 40% than SSE asm

dnyaneshwar at multicorewareinc.com dnyaneshwar at multicorewareinc.com
Tue Aug 18 06:11:35 CEST 2015


# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1439531917 -19800
#      Fri Aug 14 11:28:37 2015 +0530
# Node ID 5ed23f786ea8f98e003189a537f960e4ff16201f
# Parent  996ebce8c874fc511d495cee227d24413e99d0c1
asm: AVX2 asm for intra_ang_32 mode 9, improved over 40% than SSE asm

updated intra_ang_32 mode 27 AVX2 asm code, improved over 3% than previous AVX2 code
removed unnecessary constants from previous asm

diff -r 996ebce8c874 -r 5ed23f786ea8 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Aug 17 10:52:15 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Fri Aug 14 11:28:37 2015 +0530
@@ -3025,6 +3025,7 @@
         p.cu[BLOCK_32x32].intra_pred[6]  = PFX(intra_pred_ang32_6_avx2);
         p.cu[BLOCK_32x32].intra_pred[7]  = PFX(intra_pred_ang32_7_avx2);
         p.cu[BLOCK_32x32].intra_pred[8]  = PFX(intra_pred_ang32_8_avx2);
+        p.cu[BLOCK_32x32].intra_pred[9]  = PFX(intra_pred_ang32_9_avx2);
         p.cu[BLOCK_32x32].intra_pred[34] = PFX(intra_pred_ang32_34_avx2);
         p.cu[BLOCK_32x32].intra_pred[2] = PFX(intra_pred_ang32_2_avx2);
         p.cu[BLOCK_32x32].intra_pred[26] = PFX(intra_pred_ang32_26_avx2);
diff -r 996ebce8c874 -r 5ed23f786ea8 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Mon Aug 17 10:52:15 2015 +0530
+++ b/source/common/x86/intrapred8.asm	Fri Aug 14 11:28:37 2015 +0530
@@ -259,26 +259,6 @@
                      db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
                      db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
 
-
-ALIGN 32
-c_ang32_mode_27:    db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
-                    db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
-                    db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
-                    db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
-                    db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
-                    db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
-                    db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
-                    db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
-                    db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
-                    db 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
-                    db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
-                    db 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
-                    db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
-                    db 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
-                    db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
-                    db 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
-                    db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
-
 ALIGN 32
 c_ang32_mode_25:   db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
                    db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
@@ -13279,6 +13259,247 @@
     call ang32_mode_8_28_avx2
     RET
 
+cglobal ang32_mode_9_27_avx2
+    test        r7d,        r7d
+    ; rows 0 to 7
+    movu        m0,         [r2 +  1]           ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    movu        m1,         [r2 +  2]           ; [33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+    movu        m3,         [r2 + 17]           ; [48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17]
+    movu        m4,         [r2 + 18]           ; [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18]
+
+    punpckhbw   m2,         m0, m1              ; [33 32 32 31 31 30 30 29 29 28 28 27 27 26 26 25 17 16 16 15 15 14 14 13 13 12 12 11 11 10 10  9]
+    punpcklbw   m0,         m1                  ; [25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17  9  8  8  7  7  6  6  5  5  4  4  3  3  2  2  1]
+    punpcklbw   m3,         m4                  ; [41 40 40 39 39 38 38 37 37 36 36 35 35 34 34 33 25 24 24 23 23 22 22 21 21 20 20 19 19 18 18 17]
+
+    pmaddubsw   m4,         m0, [r3 - 14 * 32]  ; [2]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m2, [r3 - 14 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         m0, [r3 - 12 * 32]  ; [4]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m2, [r3 - 12 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         m0, [r3 - 10 * 32]  ; [6]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m2, [r3 - 10 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         m0, [r3 - 8 * 32]   ; [8]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         m2, [r3 - 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         m0, [r3 - 6 * 32]   ; [10]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         m2, [r3 - 6 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    pmaddubsw   m10,        m0, [r3 - 4 * 32]   ; [12]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         m2, [r3 - 4 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    pmaddubsw   m11,        m0, [r3 - 2 * 32]   ; [14]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         m2, [r3 - 2 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    pmaddubsw   m12,        m0, [r3]            ; [16]
+    pmulhrsw    m12,        m7
+    pmaddubsw   m1,         m2, [r3]
+    pmulhrsw    m1,         m7
+    packuswb    m12,        m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 12, 1, 0
+
+    ; rows 8 to 15
+
+    pmaddubsw   m4,         m0, [r3 + 2 * 32]   ; [18]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m9,         m2, [r3 + 2 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m4,         m9
+
+    pmaddubsw   m5,         m0, [r3 + 4 * 32]   ; [20]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m9,         m2, [r3 + 4 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m5,         m9
+
+    pmaddubsw   m6,         m0, [r3 + 6 * 32]   ; [22]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m2, [r3 + 6 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         m0, [r3 + 8 * 32]   ; [24]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         m2, [r3 + 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         m0, [r3 + 10 * 32]  ; [26]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m2, [r3 + 10 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        m0, [r3 + 12 * 32]  ; [28]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        m2, [r3 + 12 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    pmaddubsw   m11,        m0, [r3 + 14 * 32]  ; [30]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         m2, [r3 + 14 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    palignr     m3,         m2, 2
+    palignr     m2,         m0, 2
+    movu        m1,         [r2 + 2]            ; [0]
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 1, 0, 8
+
+    ; rows 16 to 23
+
+    jnz         .doNotAdjustBufferPtr
+    lea         r4,         [r4 + mmsize/2]
+    mov         r0,         r4
+.doNotAdjustBufferPtr:
+
+    pmaddubsw   m4,         m2, [r3 - 14 * 32]  ; [2]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m1,         m3, [r3 - 14 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m4,         m1
+
+    pmaddubsw   m5,         m2, [r3 - 12 * 32]  ; [4]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m8,         m3, [r3 - 12 * 32]
+    pmulhrsw    m8,         m7
+    packuswb    m5,         m8
+
+    pmaddubsw   m6,         m2, [r3 - 10 * 32]  ; [6]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m3, [r3 - 10 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         m2, [r3 - 8 * 32]   ; [8]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         m3, [r3 - 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         m2, [r3 - 6 * 32]   ; [10]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m1,         m3, [r3 - 6 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m9,         m1
+
+    pmaddubsw   m10,        m2, [r3 - 4 * 32]   ; [12]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m1,         m3, [r3 - 4 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m10,        m1
+
+    pmaddubsw   m11,        m2, [r3 - 2 * 32]   ; [14]
+    pmulhrsw    m11,        m7
+    pmaddubsw   m1,         m3, [r3 - 2 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m11,        m1
+
+    pmaddubsw   m0,         m2, [r3]            ; [16]
+    pmulhrsw    m0,         m7
+    pmaddubsw   m1,         m3, [r3]
+    pmulhrsw    m1,         m7
+    packuswb    m0,         m1
+
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 11, 0, 1, 16
+
+    ; rows 8 to 15
+
+    pmaddubsw   m4,         m2, [r3 + 2 * 32]   ; [18]
+    pmulhrsw    m4,         m7
+    pmaddubsw   m9,         m3, [r3 + 2 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m4,         m9
+
+    pmaddubsw   m5,         m2, [r3 + 4 * 32]   ; [20]
+    pmulhrsw    m5,         m7
+    pmaddubsw   m9,         m3, [r3 + 4 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m5,         m9
+
+    pmaddubsw   m6,         m2, [r3 + 6 * 32]   ; [22]
+    pmulhrsw    m6,         m7
+    pmaddubsw   m9,         m3, [r3 + 6 * 32]
+    pmulhrsw    m9,         m7
+    packuswb    m6,         m9
+
+    pmaddubsw   m8,         m2, [r3 + 8 * 32]   ; [24]
+    pmulhrsw    m8,         m7
+    pmaddubsw   m1,         m3, [r3 + 8 * 32]
+    pmulhrsw    m1,         m7
+    packuswb    m8,         m1
+
+    pmaddubsw   m9,         m2, [r3 + 10 * 32]  ; [26]
+    pmulhrsw    m9,         m7
+    pmaddubsw   m11,        m3, [r3 + 10 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m9,         m11
+
+    pmaddubsw   m10,        m2, [r3 + 12 * 32]  ; [28]
+    pmulhrsw    m10,        m7
+    pmaddubsw   m11,        m3, [r3 + 12 * 32]
+    pmulhrsw    m11,        m7
+    packuswb    m10,        m11
+
+    pmaddubsw   m2,         [r3 + 14 * 32]      ; [30]
+    pmulhrsw    m2,         m7
+    pmaddubsw   m3,         [r3 + 14 * 32]
+    pmulhrsw    m3,         m7
+    packuswb    m2,         m3
+
+    movu        m1,         [r2 + 3]                ; [0]
+    TRANSPOSE_32x8_AVX2 4, 5, 6, 8, 9, 10, 2, 1, 0, 24
+    ret
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_9, 3,8,13
+    add         r2, 64
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    mov         r4, r0
+    xor         r7d, r7d
+
+    call ang32_mode_9_27_avx2
+    RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_27, 3,8,13
+    lea         r3, [ang_table_avx2 + 32 * 16]
+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
+    lea         r6, [r1 * 4]            ; r6 -> 4 * stride
+    mova        m7, [pw_1024]
+    xor         r7d, r7d
+    inc         r7d
+
+    call ang32_mode_9_27_avx2
+    RET
+
 %endif  ; ARCH_X86_64
 ;-----------------------------------------------------------------------------------------
 ; end of intra_pred_ang32 angular modes avx2 asm
@@ -16010,181 +16231,6 @@
     vperm2i128        m6, m6, m8, 00110001b
 %endmacro
 
-
-INIT_YMM avx2
-cglobal intra_pred_ang32_27, 3, 5, 11
-    mova              m0, [pw_1024]
-    mova              m1, [intra_pred_shuff_0_8]
-    lea               r3, [3 * r1]
-    lea               r4, [c_ang32_mode_27]
-
-    vbroadcasti128    m2, [r2 + 1]
-    pshufb            m2, m1
-    vbroadcasti128    m3, [r2 + 9]
-    pshufb            m3, m1
-    vbroadcasti128    m4, [r2 + 17]
-    pshufb            m4, m1
-    vbroadcasti128    m5, [r2 + 25]
-    pshufb            m5, m1
-
-    ;row [0, 1]
-    mova              m10, [r4 + 0 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0], m7
-    movu              [r0 + r1], m6
-
-    ;row [2, 3]
-    mova              m10, [r4 + 1 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + 2 * r1], m7
-    movu              [r0 + r3], m6
-
-    ;row [4, 5]
-    mova              m10, [r4 + 2 * mmsize]
-    lea               r0, [r0 + 4 * r1]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0], m7
-    movu              [r0 + r1], m6
-
-    ;row [6, 7]
-    mova              m10, [r4 + 3 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + 2 * r1], m7
-    movu              [r0 + r3], m6
-
-    ;row [8, 9]
-    lea               r0, [r0 + 4 * r1]
-    add               r4, 4 * mmsize
-    mova              m10, [r4 + 0 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0], m7
-    movu              [r0 + r1], m6
-
-   ;row [10, 11]
-    mova              m10, [r4 + 1 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + 2 * r1], m7
-    movu              [r0 + r3], m6
-
-   ;row [12, 13]
-    lea               r0, [r0 + 4 * r1]
-    mova              m10, [r4 + 2 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0], m7
-    movu              [r0 + r1], m6
-
-   ;row [14]
-    mova              m10, [r4 + 3 * mmsize]
-    vperm2i128        m6, m2, m3, 00100000b
-    pmaddubsw         m6, m10
-    pmulhrsw          m6, m0
-    vperm2i128        m7, m4, m5, 00100000b
-    pmaddubsw         m7, m10
-    pmulhrsw          m7, m0
-    packuswb          m6, m7
-    vpermq            m6, m6, 11011000b
-    movu              [r0 + 2 * r1], m6
-
-    vbroadcasti128    m2, [r2 + 2]
-    pshufb            m2, m1
-    vbroadcasti128    m3, [r2 + 10]
-    pshufb            m3, m1
-    vbroadcasti128    m4, [r2 + 18]
-    pshufb            m4, m1
-    vbroadcasti128    m5, [r2 + 26]
-    pshufb            m5, m1
-
-    ;row [15, 16]
-    add               r4, 4 * mmsize
-    mova              m10, [r4 + 0 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + r3], m7
-    lea               r0, [r0 + 4 * r1]
-    movu              [r0], m6
-
-    ;row [17, 18]
-    mova              m10, [r4 + 1 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + r1], m7
-    movu              [r0 + 2 * r1], m6
-
-    ;row [19, 20]
-    mova              m10, [r4 + 2 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + r3], m7
-    lea               r0, [r0 + 4 * r1]
-    movu              [r0], m6
-
-    ;row [21, 22]
-    mova              m10, [r4 + 3 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + r1], m7
-    movu              [r0 + 2 * r1], m6
-
-    ;row [23, 24]
-    add               r4, 4 * mmsize
-    mova              m10, [r4 + 0 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + r3], m7
-    lea               r0, [r0 + 4 * r1]
-    movu              [r0], m6
-
-    ;row [25, 26]
-    mova              m10, [r4 + 1 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + r1], m7
-    movu              [r0 + 2 * r1], m6
-
-    ;row [27, 28]
-    mova              m10, [r4 + 2 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + r3], m7
-    lea               r0, [r0 + 4 * r1]
-    movu              [r0], m6
-
-    ;row [29, 30]
-    mova              m10, [r4 + 3 * mmsize]
-
-    INTRA_PRED_ANG32_CAL_ROW
-    movu              [r0 + r1], m7
-    movu              [r0 + 2 * r1], m6
-
-    ;row [31]
-    vbroadcasti128    m2, [r2 + 3]
-    pshufb            m2, m1
-    vbroadcasti128    m3, [r2 + 11]
-    pshufb            m3, m1
-    vbroadcasti128    m4, [r2 + 19]
-    pshufb            m4, m1
-    vbroadcasti128    m5, [r2 + 27]
-    pshufb            m5, m1
-
-    mova              m10, [r4 + 4 * mmsize]
-    vperm2i128        m6, m2, m3, 00100000b
-    pmaddubsw         m6, m10
-    pmulhrsw          m6, m0
-    vperm2i128        m7, m4, m5, 00100000b
-    pmaddubsw         m7, m10
-    pmulhrsw          m7, m0
-    packuswb          m6, m7
-    vpermq            m6, m6, 11011000b
-    movu              [r0 + r3], m6
-    RET
-
 INIT_YMM avx2
 cglobal intra_pred_ang32_25, 3, 5, 11
     mova              m0, [pw_1024]


More information about the x265-devel mailing list