[x265] [PATCH] asm-16bpp: assembly code for IntraAng32x32 all modes

dnyaneshwar at multicorewareinc.com dnyaneshwar at multicorewareinc.com
Wed Feb 12 13:09:30 CET 2014


# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1392206289 -19800
#      Wed Feb 12 17:28:09 2014 +0530
# Node ID 72eb4a66b72ae30e76a7b4bc6f6a95631ac29aae
# Parent  19d7752a4f9ca0863ea5efea80a040bae022be2b
asm-16bpp: assembly code for IntraAng32x32 all modes

diff -r 19d7752a4f9c -r 72eb4a66b72a source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Feb 12 00:35:27 2014 -0600
+++ b/source/common/x86/asm-primitives.cpp	Wed Feb 12 17:28:09 2014 +0530
@@ -734,6 +734,9 @@
 
         SETUP_INTRA_ANG4(2, 2, ssse3);
         SETUP_INTRA_ANG4(34, 2, ssse3);
+
+        SETUP_INTRA_ANG32(2, 2, ssse3);
+        SETUP_INTRA_ANG32(34, 2, ssse3);
     }
     if (cpuMask & X265_CPU_SSE4)
     {
@@ -780,6 +783,38 @@
         SETUP_INTRA_ANG4(31, 5, sse4);
         SETUP_INTRA_ANG4(32, 4, sse4);
         SETUP_INTRA_ANG4(33, 3, sse4);
+
+        SETUP_INTRA_ANG32(3,  3,  sse4);
+        SETUP_INTRA_ANG32(4,  4,  sse4);
+        SETUP_INTRA_ANG32(5,  5,  sse4);
+        SETUP_INTRA_ANG32(6,  6,  sse4);
+        SETUP_INTRA_ANG32(7,  7,  sse4);
+        SETUP_INTRA_ANG32(8,  8,  sse4);
+        SETUP_INTRA_ANG32(9,  9,  sse4);
+        SETUP_INTRA_ANG32(10, 10, sse4);
+        SETUP_INTRA_ANG32(11, 11, sse4);
+        SETUP_INTRA_ANG32(12, 12, sse4);
+        SETUP_INTRA_ANG32(13, 13, sse4);
+        SETUP_INTRA_ANG32(14, 14, sse4);
+        SETUP_INTRA_ANG32(15, 15, sse4);
+        SETUP_INTRA_ANG32(16, 16, sse4);
+        SETUP_INTRA_ANG32(17, 17, sse4);
+        SETUP_INTRA_ANG32(18, 18, sse4);
+        SETUP_INTRA_ANG32(19, 19, sse4);
+        SETUP_INTRA_ANG32(20, 20, sse4);
+        SETUP_INTRA_ANG32(21, 21, sse4);
+        SETUP_INTRA_ANG32(22, 22, sse4);
+        SETUP_INTRA_ANG32(23, 23, sse4);
+        SETUP_INTRA_ANG32(24, 24, sse4);
+        SETUP_INTRA_ANG32(25, 25, sse4);
+        SETUP_INTRA_ANG32(26, 26, sse4);
+        SETUP_INTRA_ANG32(27, 27, sse4);
+        SETUP_INTRA_ANG32(28, 28, sse4);
+        SETUP_INTRA_ANG32(29, 29, sse4);
+        SETUP_INTRA_ANG32(30, 30, sse4);
+        SETUP_INTRA_ANG32(31, 31, sse4);
+        SETUP_INTRA_ANG32(32, 32, sse4);
+        SETUP_INTRA_ANG32(33, 33, sse4);
     }
     if (cpuMask & X265_CPU_XOP)
     {
diff -r 19d7752a4f9c -r 72eb4a66b72a source/common/x86/intrapred16.asm
--- a/source/common/x86/intrapred16.asm	Wed Feb 12 00:35:27 2014 -0600
+++ b/source/common/x86/intrapred16.asm	Wed Feb 12 17:28:09 2014 +0530
@@ -35,6 +35,15 @@
 %assign x x+1
 %endrep
 
+const shuf_mode_13_23,      db  0,  0, 14, 15,  6,  7,  0,  1,  0,  0,  0,  0,  0,  0,  0,  0
+const shuf_mode_14_22,      db 14, 15, 10, 11,  4,  5,  0,  1,  0,  0,  0,  0,  0,  0,  0,  0
+const shuf_mode_15_21,      db 12, 13,  8,  9,  4,  5,  0,  1,  0,  0,  0,  0,  0,  0,  0,  0
+const shuf_mode_16_20,      db  2,  3,  0,  1, 14, 15, 12, 13,  8,  9,  6,  7,  2,  3,  0,  1
+const shuf_mode_17_19,      db  0,  1, 14, 15, 12, 13, 10, 11,  6,  7,  4,  5,  2,  3,  0,  1
+const shuf_mode32_18,       db 14, 15, 12, 13, 10, 11,  8,  9,  6,  7,  4,  5,  2,  3,  0,  1
+const pw_punpcklwd,         db  0,  1,  2,  3,  2,  3,  4,  5,  4,  5,  6,  7,  6,  7,  8,  9
+const c_mode32_10_0,        db  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1,  0,  1
+
 const pw_unpack0wd, times 4 db 0,1,8,8
 const pw_1023,      times 8 dw 1023
 
@@ -1336,3 +1345,4086 @@
     psrldq      m0, 2
     movh        [r0], m0
     RET
+
+%macro MODE_2_34 0
+    movu            m0, [r2 + 4]
+    movu            m1, [r2 + 20]
+    movu            m2, [r2 + 36]
+    movu            m3, [r2 + 52]
+    movu            m4, [r2 + 68]
+    movu            [r0], m0
+    movu            [r0 + 16], m1
+    movu            [r0 + 32], m2
+    movu            [r0 + 48], m3
+    palignr         m5, m1, m0, 2
+    movu            [r0 + r1], m5
+    palignr         m5, m2, m1, 2
+    movu            [r0 + r1 + 16], m5
+    palignr         m5, m3, m2, 2
+    movu            [r0 + r1 + 32], m5
+    palignr         m5, m4, m3, 2
+    movu            [r0 + r1 + 48], m5
+    palignr         m5, m1, m0, 4
+    movu            [r0 + r3], m5
+    palignr         m5, m2, m1, 4
+    movu            [r0 + r3 + 16], m5
+    palignr         m5, m3, m2, 4
+    movu            [r0 + r3 + 32], m5
+    palignr         m5, m4, m3, 4
+    movu            [r0 + r3 + 48], m5
+    palignr         m5, m1, m0, 6
+    movu            [r0 + r4], m5
+    palignr         m5, m2, m1, 6
+    movu            [r0 + r4 + 16], m5
+    palignr         m5, m3, m2, 6
+    movu            [r0 + r4 + 32], m5
+    palignr         m5, m4, m3, 6
+    movu            [r0 + r4 + 48], m5
+    lea             r0, [r0 + r1 * 4]
+    palignr         m5, m1, m0, 8
+    movu            [r0], m5
+    palignr         m5, m2, m1, 8
+    movu            [r0 + 16], m5
+    palignr         m5, m3, m2, 8
+    movu            [r0 + 32], m5
+    palignr         m5, m4, m3, 8
+    movu            [r0 + 48], m5
+    palignr         m5, m1, m0, 10
+    movu            [r0 + r1], m5
+    palignr         m5, m2, m1, 10
+    movu            [r0 + r1 + 16], m5
+    palignr         m5, m3, m2, 10
+    movu            [r0 + r1 + 32], m5
+    palignr         m5, m4, m3, 10
+    movu            [r0 + r1 + 48], m5
+    palignr         m5, m1, m0, 12
+    movu            [r0 + r3], m5
+    palignr         m5, m2, m1, 12
+    movu            [r0 + r3 + 16], m5
+    palignr         m5, m3, m2, 12
+    movu            [r0 + r3 + 32], m5
+    palignr         m5, m4, m3, 12
+    movu            [r0 + r3 + 48], m5
+    palignr         m5, m1, m0, 14
+    movu            [r0 + r4], m5
+    palignr         m5, m2, m1, 14
+    movu            [r0 + r4 + 16], m5
+    palignr         m5, m3, m2, 14
+    movu            [r0 + r4 + 32], m5
+    palignr         m5, m4, m3, 14
+    movu            [r0 + r4 + 48], m5
+    lea             r0, [r0 + r1 * 4]
+    movu            m0, [r2 + 84]
+    movu            [r0], m1
+    movu            [r0 + 16], m2
+    movu            [r0 + 32], m3
+    movu            [r0 + 48], m4
+    palignr         m5, m2, m1, 2
+    movu            [r0 + r1], m5
+    palignr         m5, m3, m2, 2
+    movu            [r0 + r1 + 16], m5
+    palignr         m5, m4, m3, 2
+    movu            [r0 + r1 + 32], m5
+    palignr         m5, m0, m4, 2
+    movu            [r0 + r1 + 48], m5
+    palignr         m5, m2, m1, 4
+    movu            [r0 + r3], m5
+    palignr         m5, m3, m2, 4
+    movu            [r0 + r3 + 16], m5
+    palignr         m5, m4, m3, 4
+    movu            [r0 + r3 + 32], m5
+    palignr         m5, m0, m4, 4
+    movu            [r0 + r3 + 48], m5
+    palignr         m5, m2, m1, 6
+    movu            [r0 + r4], m5
+    palignr         m5, m3, m2, 6
+    movu            [r0 + r4 + 16], m5
+    palignr         m5, m4, m3, 6
+    movu            [r0 + r4 + 32], m5
+    palignr         m5, m0, m4, 6
+    movu            [r0 + r4 + 48], m5
+    lea             r0, [r0 + r1 * 4]
+    palignr         m5, m2, m1, 8
+    movu            [r0], m5
+    palignr         m5, m3, m2, 8
+    movu            [r0 + 16], m5
+    palignr         m5, m4, m3, 8
+    movu            [r0 + 32], m5
+    palignr         m5, m0, m4, 8
+    movu            [r0 + 48], m5
+    palignr         m5, m2, m1, 10
+    movu            [r0 + r1], m5
+    palignr         m5, m3, m2, 10
+    movu            [r0 + r1 + 16], m5
+    palignr         m5, m4, m3, 10
+    movu            [r0 + r1 + 32], m5
+    palignr         m5, m0, m4, 10
+    movu            [r0 + r1 + 48], m5
+    palignr         m5, m2, m1, 12
+    movu            [r0 + r3], m5
+    palignr         m5, m3, m2, 12
+    movu            [r0 + r3 + 16], m5
+    palignr         m5, m4, m3, 12
+    movu            [r0 + r3 + 32], m5
+    palignr         m5, m0, m4, 12
+    movu            [r0 + r3 + 48], m5
+    palignr         m5, m2, m1, 14
+    movu            [r0 + r4], m5
+    palignr         m5, m3, m2, 14
+    movu            [r0 + r4 + 16], m5
+    palignr         m5, m4, m3, 14
+    movu            [r0 + r4 + 32], m5
+    palignr         m5, m0, m4, 14
+    movu            [r0 + r4 + 48], m5
+    lea             r0,    [r0 + r1 * 4]
+%endmacro
+;--------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_2_34(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;--------------------------------------------------------------------------------------------------------------------
+INIT_XMM ssse3
+cglobal intra_pred_ang32_2, 3,6,6
+    cmp             r4m, byte 34
+    cmove           r2, r3mp
+
+    add             r1, r1
+    lea             r3, [r1 * 2]
+    lea             r4, [r1 * 3]
+    mov             r5, 2
+
+.loop:
+    MODE_2_34
+    add             r2, 32
+    dec             r5
+    jnz             .loop
+    RET
+
+%macro TRANSPOSE_STORE_8x8 6
+  %if %2 == 1
+    ; transpose 4x8 and then store, used by angle BLOCK_16x16 and BLOCK_32x32
+    punpckhwd   m0, %3, %4
+    punpcklwd   %3, %4
+    punpckhwd   %4, %3, m0
+    punpcklwd   %3, m0
+
+    punpckhwd   m0, %5, %6
+    punpcklwd   %5, %6
+    punpckhwd   %6, %5, m0
+    punpcklwd   %5, m0
+
+    punpckhqdq  m0, %3, %5
+    punpcklqdq  %3, %5
+    punpcklqdq  %5, %4, %6
+    punpckhqdq  %4, %6
+
+    movu        [r0 + %1], %3
+    movu        [r0 + r1 + %1], m0
+    movu        [r0 + r1 * 2 + %1], %5
+    movu        [r0 + r5 + %1], %4
+  %else
+    ; store 8x4, used by angle BLOCK_16x16 and BLOCK_32x32
+    movh        [r0], %3
+    movhps      [r0 + r1], %3
+    movh        [r0 + r1 * 2], %4
+    movhps      [r0 + r5], %4
+    lea         r0, [r0 + r1 * 4]
+    movh        [r0], %5
+    movhps      [r0 + r1], %5
+    movh        [r0 + r1 * 2], %6
+    movhps      [r0 + r5], %6
+    lea         r0, [r0 + r1 * 4]
+  %endif
+%endmacro
+
+%macro MODE_3_33 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    mova        m7,        m0
+
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5] xmm2
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1] xmm0
+
+    palignr     m1,        m2, m0, 4                  ; [6 5 5 4 4 3 3 2] xmm1
+    pmaddwd     m4,        m0, [r3 + 10 * 16]         ; [26]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m5,        m1, [r3 + 4 * 16]          ; [20]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+    packusdw    m4,        m5
+
+    palignr     m5,        m2, m0, 8
+    pmaddwd     m5,        [r3 - 2 * 16]              ; [14]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m2, m0, 12
+    pmaddwd     m6,        [r3 - 8 * 16]              ; [ 8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m2, [r3 - 14 * 16]         ; [ 2]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m2, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m0,        m3, m2, 4                  ; [10 9 9 8 8 7 7 6]
+    pmaddwd     m1,        m0, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    psrldq      m2,        m3, 2   ; [x 16 15 14 13 12 11 10]
+    palignr     m2,        m0, 4   ;[11 10 10 9 9 8 8 7]
+
+    pmaddwd     m2,        [r3]                       ; [16]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m1,        m2
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    palignr     m0,        m3, m7, 14                 ; [15 14 13 12 11 10 9 8]
+    movu        m3,        [r2 + 32]                  ; [23 22 21 20 19 18 17 16]
+    palignr     m1,        m3, m0, 2                  ; [16 15 14 13 12 11 10 9]
+    punpckhwd   m7,        m0, m1                     ; [16 15 15 14 14 13 13 12]
+    punpcklwd   m0,        m1                         ; [12 11 11 10 10 9 9 8]
+
+    palignr     m5,        m7, m0, 4                  ; [13 12 12 11 11 10 10 9]
+    pmaddwd     m4,        m0, [r3 - 6 * 16]          ; [10]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m5, [r3 - 12 * 16]         ; [04]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 14 * 16]             ; [30]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m7, m0, 8                  ; [14 13 13 12 12 11 11 10]
+    pmaddwd     m6,        [r3 + 8 * 16]              ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m1,        m7, m0, 12                 ; [15 14 14 13 13 12 12 11]
+    pmaddwd     m6,        m1, [r3 + 2 * 16]          ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m2,        m3, m7, 4                  ; [17 16 16 15 15 14 14 13]
+    pmaddwd     m1,        m2, [r3 - 10 * 16]         ; [6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 28]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m0,        [r2 + 28]                  ; [35 34 33 32 31 30 29 28]
+    palignr     m1,        m0, 2                      ; [ x 35 34 33 32 31 30 29]
+    punpckhwd   m2,        m0, m1                     ; [ x 35 35 34 34 33 33 32]
+    punpcklwd   m0,        m1                         ; [32 31 31 30 30 29 29 28]
+
+    pmaddwd     m4,        m0, [r3 + 10 * 16]         ; [26]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m1,        m2, m0, 4                  ; [33 32 32 31 31 30 30 29]
+    pmaddwd     m1,        [r3 + 4 * 16]              ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m5,        m2, m0, 8                  ; [34 33 33 32 32 31 31 30]
+    pmaddwd     m5,        [r3 - 2 * 16]              ; [14]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m2, m0, 12                 ; [35 34 34 33 33 32 32 31]
+    pmaddwd     m6,        [r3 - 8 * 16]              ; [ 8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pinsrw      m2,        [r2 + 44], 7               ; [35 34 34 33 33 32 32 31]
+    pmaddwd     m6,        m2, [r3 - 14 * 16]         ; [ 2]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m2,        [r3 + 12 * 16]             ; [28]
+    paddd       m2,        [pd_16]
+    psrld       m2,        5
+    packusdw    m6,        m2
+
+    movu        m3,        [r2 + 38]                  ; [45 44 43 42 41 40 39 38]
+    palignr     m1,        m3, 2                      ; [ x 45 44 43 42 41 40 39]
+    punpckhwd   m2,        m3, m1                     ; [ x 35 35 34 34 33 33 32]
+    punpcklwd   m3,        m1                         ; [32 31 31 30 30 29 29 28]
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m0,        m2, m3, 4
+    pmaddwd     m0,        [r3]                       ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    palignr     m5,        m2, m3, 8
+    pmaddwd     m4,        m5, [r3 - 6 * 16]          ; [10]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m5,        m2, m3, 12
+    pmaddwd     m1,        m5, [r3 - 12 * 16]         ; [04]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 14 * 16]             ; [30]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 46]
+    palignr     m1,        m3, 2
+    punpckhwd   m2,        m3, m1
+    punpcklwd   m3,        m1
+
+    pmaddwd     m6,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m6,        m2, m3, 4
+    pmaddwd     m6,        [r3 + 2 * 16]              ; [18]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m1,        m2, m3, 8
+    pmaddwd     m1,        [r3 - 4 * 16]              ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m1,        m2, m3, 12
+    pmaddwd     m1,        [r3 - 10 * 16]             ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 54]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_3(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_3, 3,6,8
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_3_33 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+%macro MODE_4_32 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 + 5 * 16]          ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m5,        m2, m0, 4                  ; [6 5 5 4 4 3 3 2]
+    pmaddwd     m1,        m5, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 15 * 16]             ; [31]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m2, m0, 8
+    pmaddwd     m6,        [r3 + 4 * 16]              ; [ 20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m1,        m2, m0, 12
+    pmaddwd     m6,        m1, [r3 - 7 * 16]          ; [ 9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 14 * 16]             ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m2, [r3 + 3 * 16]          ; [19]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m7,        m3, m2, 4                  ; [10 9 9 8 7 6 5 4]
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 13 * 16]         ; [29]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m0,        [r2 + 34]                  ; [24 23 22 21 20 19 18 17]
+
+    palignr     m2,        m0, m3, 2                  ; [17 16 15 14 13 12 11 10]
+    palignr     m1,        m0, m3, 4                  ; [18 17 16 15 14 13 12 11]
+    punpckhwd   m3,        m2, m1                     ; [18 17 17 16 16 15 15 14]
+    punpcklwd   m2,        m1                         ; [14 13 13 12 12 11 11 10]
+
+    palignr     m1,        m2, m7, 4                  ; [11 10 10 9 9 8 7 6]
+    pmaddwd     m1,        [r3 +  2 * 16]             ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m5,        m2, m7, 8
+    mova        m6,        m5
+    pmaddwd     m5,        [r3 - 9 * 16]              ; [07]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        [r3 + 12 * 16]             ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m6,        m2, m7, 12
+    pmaddwd     m6,        [r3 +      16]             ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m2, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m2, [r3 + 11 * 16]         ; [27]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m7,        m3, m2, 4
+    pmaddwd     m7,        [r3]                       ; [16]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m1,        m7
+    mova        m7,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    palignr     m0,        m3, m2, 8
+    pmaddwd     m4,        m0, [r3 - 11 * 16]         ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m5,        m3, m2, 12
+    pmaddwd     m5,        [r3 - 16]                  ; [15]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    pmaddwd     m6,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m0,        [r2 + 50]                  ; [32 31 30 29 28 27 26 25]
+    palignr     m2,        m0, m7, 2                  ; [25 24 23 22 21 20 19 18]
+    palignr     m1,        m0, m7, 4                  ; [26 25 24 23 22 21 20 19]
+    punpckhwd   m7,        m2, m1                     ; [26 25 25 24 24 23 23 22]
+    punpcklwd   m2,        m1                         ; [22 21 21 20 20 19 19 18]
+
+    palignr     m1,        m2, m3, 4
+    pmaddwd     m1,        [r3 - 2 * 16]              ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m1,        m2, m3, 8
+    mova        m0,        m1
+    pmaddwd     m1,        [r3 - 13 * 16]             ; [3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        [r3 + 8 * 16]              ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    palignr     m4,        m2, m3, 12
+    pmaddwd     m4,        [r3 - 3 * 16]              ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m2, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m2, [r3 + 7 * 16]          ; [23]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m7, m2, 4
+    pmaddwd     m6,        [r3 - 4 * 16]              ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m1,        m7, m2, 8
+    pmaddwd     m6,        m1, [r3 - 15 * 16]         ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 6 * 16]              ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m1,        m7, m2, 12
+    pmaddwd     m1,        [r3 - 5 * 16]              ; [11]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 44]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_4(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_4, 3,6,8
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_4_32 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+%macro MODE_5_31 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 + 16]              ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m1,        m2, m0, 4
+    mova        m5,        m1
+    pmaddwd     m1,        [r3 - 14 * 16]             ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 3 * 16]              ; [19]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m2, m0, 8
+    mova        m1,        m6
+    pmaddwd     m6,        [r3 - 12 * 16]             ; [4]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m1, [r3 + 5 * 16]          ; [21]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m1,        m2, m0, 12
+    mova        m7,        m1
+    pmaddwd     m7,        [r3 - 10 * 16]             ; [6]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pmaddwd     m1,        [r3 + 7 * 16]              ; [23]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m7,        m2, [r3 - 8 * 16]          ; [8]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m1,        m7
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m2, [r3 + 9 * 16]          ; [25]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m7,        m3, m2, 4                  ; [10 9 9 8 7 6 5 4]
+    pmaddwd     m1,        m7, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 + 11 * 16]         ; [27]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m0,        [r2 + 34]                  ; [24 23 22 21 20 19 18 17]
+    palignr     m2,        m0, m3, 2                  ; [17 16 15 14 13 12 11 10]
+    palignr     m1,        m0, m3, 4                  ; [18 17 16 15 14 13 12 11]
+    punpckhwd   m3,        m2, m1                     ; [18 17 17 16 16 15 15 14]
+    punpcklwd   m2,        m1                         ; [14 13 13 12 12 11 11 10]
+
+    palignr     m6,        m2, m7, 4
+    pmaddwd     m1,        m6, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [29]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m1,        m2, m7, 8
+    mova        m0,        m1
+    pmaddwd     m1,        [r3 - 2 * 16]              ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m0, [r3 + 15 * 16]         ; [31]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m0,        m2, m7, 12
+    pmaddwd     m0,        [r3]                       ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m2, [r3 - 15 * 16]         ; [1]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m2, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m1,        m3, m2, 4
+    pmaddwd     m5,        m1, [r3 - 13 * 16]         ; [3]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m1,        [r3 + 4 * 16]              ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    palignr     m1,        m3, m2, 8
+    pmaddwd     m6,        m1, [r3 - 11 * 16]         ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 6 * 16]              ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m3, m2, 12
+    pmaddwd     m1,        m7, [r3 - 9 * 16]          ; [7]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m7,        [r3 + 8 * 16]              ; [24]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m1,        m7
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 7 * 16]          ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m0,        [r2 + 36]                  ; [25 24 23 22 21 20 19 18]
+    palignr     m1,        m0, 2                      ; [x 25 24 23 22 21 20 19]
+    punpcklwd   m0,        m1                         ; [22 21 21 20 20 19 19 18]
+
+    palignr     m1,        m0, m3, 4
+    pmaddwd     m5,        m1, [r3 - 5 * 16]          ; [11]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m1,        [r3 + 12 * 16]             ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    palignr     m1,        m0, m3, 8
+    pmaddwd     m6,        m1, [r3 - 3 * 16]          ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 14 * 16]             ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m1,        m0, m3, 12
+    pmaddwd     m1,        [r3 - 16]                  ; [15]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 36]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_5(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_5, 3,6,8
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_5_31 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+%macro MODE_6_30 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movu        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 - 3 * 16]          ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m1,        m2, m0, 4
+    pmaddwd     m5,        m1, [r3 - 9 * 16]          ; [7]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m1,        [r3 + 4 * 16]              ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    palignr     m1,        m2, m0, 8
+    pmaddwd     m6,        m1, [r3 - 15 * 16]         ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m7,        m1, [r3 - 2 * 16]          ; [14]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pmaddwd     m1,        [r3 + 11 * 16]             ; [27]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    palignr     m7,        m2, m0, 12
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 +  5 * 16]         ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m2, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m2, [r3 - 16]              ; [15]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m2, [r3 + 12 * 16]         ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m7,        m3, m2, 4
+    pmaddwd     m6,        m7, [r3 - 7 * 16]          ; [9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m0,        [r2 + 34]                  ; [24 23 22 21 20 19 18 17]
+    palignr     m2,        m0, m3, 2                  ; [17 16 15 14 13 12 11 10]
+    palignr     m1,        m0, m3, 4                  ; [18 17 16 15 14 13 12 11]
+    punpckhwd   m3,        m2, m1                     ; [18 17 17 16 16 15 15 14]
+    punpcklwd   m2,        m1                         ; [14 13 13 12 12 11 11 10]
+
+    palignr     m0,        m2, m7, 4
+    pmaddwd     m1,        m0, [r3 - 13 * 16]         ; [3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        [r3]                       ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    palignr     m4,        m2, m7, 4
+    pmaddwd     m4,        [r3 +  13 * 16]            ; [29]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    palignr     m5,        m2, m7, 8
+    pmaddwd     m1,        m5, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 7 * 16]              ; [23]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m1,        m2, m7, 12
+    pmaddwd     m6,        m1, [r3 - 12 * 16]         ; [4]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m1, [r3 + 16]              ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 14 * 16]             ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m2, [r3 - 5 * 16]          ; [11]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m2, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    palignr     m5,        m3, m2, 4
+    pmaddwd     m4,        m5, [r3 - 11 * 16]         ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m5, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        [r3 + 15 * 16]             ; [31]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m6,        m3, m2, 8
+    pmaddwd     m1,        m6, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m5,        m1
+
+    pmaddwd     m6,        [r3 + 9 * 16]              ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m1,        m3, m2, 12
+    pmaddwd     m0,        m1, [r3 - 10 * 16]         ; [6]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m1,        [r3 + 3 * 16]              ; [19]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 28]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_6(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_6, 3,6,8
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_6_30 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+%macro MODE_7_29 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movd        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 - 7 * 16]          ; [9]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m0, [r3 + 11 * 16]         ; [27]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m1,        m2, m0, 4
+    pmaddwd     m6,        m1, [r3 - 12 * 16]         ; [4]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m1, [r3 - 3 * 16]          ; [13]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m7,        m1, [r3 + 6 * 16]          ; [22]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m6,        m7
+
+    pmaddwd     m1,        [r3 + 15 * 16]             ; [31]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    mova        m3,        m0
+    palignr     m7,        m2, m0, 8
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 16]              ; [17]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    palignr     m1,        m2, m3, 12
+    pmaddwd     m5,        m1, [r3 - 13 * 16]         ; [3]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m1, [r3 - 4 * 16]          ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m1, [r3 + 5 * 16]          ; [21]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        [r3 + 14 * 16]             ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m2, [r3 - 9 * 16]          ; [7]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m2, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m2, [r3 + 9 * 16]          ; [25]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m7,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m7, 2                      ; [x 16 15 14 13 12 11 10]
+    punpcklwd   m7,        m1                         ; [13 12 12 11 11 10 10 9]
+
+    palignr     m6,        m7, m2, 4
+    pmaddwd     m1,        m6, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m6, [r3 - 5 * 16]          ; [11]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m6, [r3 + 4 * 16]          ; [20]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        [r3 + 13 * 16]             ; [29]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    palignr     m0,        m7, m2, 8
+    pmaddwd     m1,        m0, [r3 - 10 * 16]         ; [6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m0, [r3 - 16]              ; [15]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        [r3 + 8 * 16]              ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    palignr     m0,        m7, m2, 12
+    pmaddwd     m4,        m0, [r3 - 15 * 16]         ; [1]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m0, [r3 + 3 * 16]          ; [19]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        [r3 + 12 * 16]             ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m7, [r3 - 11 * 16]         ; [5]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m0,        m7, [r3 - 2 * 16]          ; [14]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m1,        m7, [r3 + 7 * 16]          ; [23]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 20]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_7(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_7, 3,6,8
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_7_29 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+%macro MODE_8_28 1
+    movu        m0,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    movd        m3,        [r2 + 18]                  ; [16 15 14 13 12 11 10 9]
+    palignr     m1,        m3, m0, 2                  ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m0, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m0,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m0, [r3 - 11 * 16]         ; [5]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m0, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m0, [r3 - 16]              ; [15]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m0, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m0, [r3 + 9 * 16]          ; [25]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m0, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    palignr     m7,        m2, m0, 4
+    pmaddwd     m1,        m7, [r3 - 13 * 16]         ; [3]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    mova        m3,        m0
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 - 3 * 16]          ; [13]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 + 7 * 16]          ; [23]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m7, [r3 + 12 * 16]         ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    palignr     m7,        m2, m3, 8
+    pmaddwd     m6,        m7, [r3 - 15 * 16]         ; [1]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 - 10 * 16]         ; [6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m7, [r3 - 5 * 16]          ; [11]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m7, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 5 * 16]          ; [21]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 + 15 * 16]         ; [31]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    palignr     m7,        m2, m3, 12
+    pmaddwd     m0,        m7, [r3 - 12 * 16]         ; [4]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m7, [r3 - 7 * 16]          ; [9]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m7, [r3 + 3 * 16]          ; [19]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m7, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 13 * 16]         ; [29]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m2, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m2, [r3 - 9 * 16]          ; [7]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m2, [r3 - 4 * 16]          ; [12]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m2, [r3 + 16]              ; [17]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m0,        m2, [r3 + 6 * 16]          ; [22]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m1,        m2, [r3 + 11 * 16]         ; [27]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 12]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_8(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_8, 3,6,8
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_8_28 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+%macro MODE_9_27 1
+    movu        m3,        [r2 + 2]                   ; [8 7 6 5 4 3 2 1]
+    palignr     m1,        m3, 2                      ; [9 8 7 6 5 4 3 2]
+    punpckhwd   m2,        m3, m1                     ; [9 8 8 7 7 6 6 5]
+    punpcklwd   m3,        m1                         ; [5 4 4 3 3 2 2 1]
+
+    pmaddwd     m4,        m3, [r3 - 14 * 16]         ; [2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 10 * 16]         ; [6]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 4]                   ; [00]
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    palignr     m7,        m2, m3, 4
+    pmaddwd     m4,        m7, [r3 - 14 * 16]         ; [2]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 - 10 * 16]         ; [6]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m7, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m7, [r3 - 6 * 16]          ; [10]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m7, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m7, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m7, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m7, [r3 + 2 * 16]          ; [18]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m7, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m7, [r3 + 6 * 16]          ; [22]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m7, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m7, [r3 + 10 * 16]         ; [26]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m0,        m7, [r3 + 12 * 16]         ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m6,        m0
+
+    pmaddwd     m7,        [r3 + 14 * 16]             ; [30]
+    paddd       m7,        [pd_16]
+    psrld       m7,        5
+    packusdw    m7,        m7
+    movhps      m7,        [r2 + 6]                   ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m7
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_9(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_9, 3,6,8
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    add         r1, r1
+    lea         r5, [r1 * 3]
+
+.loop:
+    MODE_9_27 1
+    lea         r0, [r0 + r1 * 4 ]
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_10(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_10, 4,7,8
+    mov         r6d, 4
+    add         r1, r1
+    lea         r5, [r1 * 3]
+    lea         r4, [r1 * 2]
+    lea         r3, [r1 * 4]
+    mova        m7, [c_mode32_10_0]
+
+.loop:
+    movu        m0, [r2 + 2]
+    pshufb      m1, m0, m7
+    movu        [r0], m1
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m1
+    movu        [r0 + 48], m1
+
+    palignr     m1, m0, 2
+    pshufb      m1, m7
+    movu        [r0 + r1], m1
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m1
+    movu        [r0 + r1 + 48], m1
+
+    palignr     m1, m0, 4
+    pshufb      m1, m7
+    movu        [r0 + r4], m1
+    movu        [r0 + r4 + 16], m1
+    movu        [r0 + r4 + 32], m1
+    movu        [r0 + r4 + 48], m1
+
+    palignr     m1, m0, 6
+    pshufb      m1, m7
+    movu        [r0 + r5], m1
+    movu        [r0 + r5 + 16], m1
+    movu        [r0 + r5 + 32], m1
+    movu        [r0 + r5 + 48], m1
+
+    add         r0, r3
+
+    palignr     m1, m0, 8
+    pshufb      m1, m7
+    movu        [r0], m1
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m1
+    movu        [r0 + 48], m1
+
+    palignr     m1, m0, 10
+    pshufb      m1, m7
+    movu        [r0 + r1], m1
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m1
+    movu        [r0 + r1 + 48], m1
+
+    palignr     m1, m0, 12
+    pshufb      m1, m7
+    movu        [r0 + r4], m1
+    movu        [r0 + r4 + 16], m1
+    movu        [r0 + r4 + 32], m1
+    movu        [r0 + r4 + 48], m1
+
+    palignr     m1, m0, 14
+    pshufb      m1, m7
+    movu        [r0 + r5], m1
+    movu        [r0 + r5 + 16], m1
+    movu        [r0 + r5 + 32], m1
+    movu        [r0 + r5 + 48], m1
+
+    add         r0, r3
+    add         r2, 16
+    dec         r6d
+    jnz         .loop
+    RET
+
+%macro MODE_11_25 1
+    movu        m3,        [r2 + 2]                   ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        [pw_punpcklwd]             ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 10 * 16]         ; [6]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 2]                   ; [00]
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2]                       ; [6 5 4 3 2 1 0 16]
+    pshufb      m3,        [pw_punpcklwd]             ; [3 2 2 1 1 0 0 16]
+
+    pmaddwd     m4,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [8]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 10 * 16]         ; [6]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_11(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_11, 4,6,7,0-(4*mmsize+4)
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 0*mmsize + 2], m0
+    movu     [rsp + 1*mmsize + 2], m1
+    movu     [rsp + 2*mmsize + 2], m2
+    movu     [rsp + 3*mmsize + 2], m3
+    mov      r4w, [r3+32]
+    mov      [rsp], r4w
+    mov      r4w, [r2+64]
+    mov      [rsp+66], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+
+.loop:
+    MODE_11_25 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz      .loop
+    RET
+
+%macro MODE_12_24 1
+    movu        m3,        [r2 + 8]                   ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 16]              ; [17]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 9 * 16]          ; [7]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [2]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 7 * 16]          ; [9]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [4]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [6]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 15 * 16]         ; [1]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [8]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 13 * 16]         ; [3]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 16]              ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 11 * 16]         ; [5]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_12(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_12, 4,6,7,0-(4*mmsize+10)
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 0*mmsize + 8], m0
+    movu     [rsp + 1*mmsize + 8], m1
+    movu     [rsp + 2*mmsize + 8], m2
+    movu     [rsp + 3*mmsize + 8], m3
+
+    mov      r4w, [r2+64]
+    mov      [rsp+72], r4w
+    mov      r4w, [r3+12]
+    mov      [rsp+6], r4w
+    mov      r4w, [r3+26]
+    mov      [rsp+4], r4w
+    mov      r4w, [r3+38]
+    mov      [rsp+2], r4w
+    mov      r4w, [r3+52]
+    mov      [rsp], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_12_24 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz      .loop
+    RET
+
+%macro MODE_13_23 1
+    movu        m3,        [r2 + 16]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 11 * 16]         ; [05]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 15 * 16]         ; [01]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 16]              ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 9 * 16]          ; [07]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 - 13 * 16]         ; [03]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 + 16]              ; [17]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 7 * 16]          ; [09]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_13(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_13, 4,6,7,0-(5*mmsize+2)
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 1*mmsize], m0
+    movu     [rsp + 2*mmsize], m1
+    movu     [rsp + 3*mmsize], m2
+    movu     [rsp + 4*mmsize], m3
+
+    mov      r4w, [r2+64]
+    mov      [rsp+80], r4w
+    movu     m0, [r3 + 8]
+    movu     m1, [r3 + 36]
+    pshufb   m0, [shuf_mode_13_23]
+    pshufb   m1, [shuf_mode_13_23]
+    movh     [rsp + 8], m0
+    movh     [rsp], m1
+    mov      r4w, [r3+28]
+    mov      [rsp+8], r4w
+    mov      r4w, [r3+56]
+    mov      [rsp], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_13_23 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+%macro MODE_14_22 1
+    movu        m3,        [r2 + 24]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 22]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 20]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 11 * 16]         ; [05]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 18]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 16]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 + 16]              ; [17]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 13 * 16]         ; [03]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 7 * 16]          ; [09]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 - 16]              ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 15 * 16]         ; [01]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 9 * 16]          ; [07]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_14(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_14, 4,6,7,0-(5*mmsize+10)
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 1*mmsize + 8], m0
+    movu     [rsp + 2*mmsize + 8], m1
+    movu     [rsp + 3*mmsize + 8], m2
+    movu     [rsp + 4*mmsize + 8], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 88], r4w
+    mov      r4w, [r3+4]
+    mov      [rsp+22], r4w
+    movu     m0, [r3 + 10]
+    movu     m1, [r3 + 30]
+    movu     m2, [r3 + 50]
+    pshufb   m0, [shuf_mode_14_22]
+    pshufb   m1, [shuf_mode_14_22]
+    pshufb   m2, [shuf_mode_14_22]
+    movh     [rsp + 14], m0
+    movh     [rsp + 6], m1
+    movh     [rsp - 2], m2
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_14_22 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+%macro MODE_15_21 1
+    movu        m3,        [r2 + 32]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 - 16]              ; [15]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 30]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 28]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 26]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 7 * 16]          ; [09]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 24]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 9 * 16]          ; [07]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 22]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 11 * 16]         ; [05]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 20]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    pmaddwd     m6,        m3, [r3 - 13 * 16]         ; [03]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 18]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 15 * 16]         ; [01]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 16]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m0,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 16]              ; [17]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_15(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_15, 4,6,7,0-(6*mmsize+2)
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 2*mmsize], m0
+    movu     [rsp + 3*mmsize], m1
+    movu     [rsp + 4*mmsize], m2
+    movu     [rsp + 5*mmsize], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 96], r4w
+    movu     m0, [r3 + 4]
+    movu     m1, [r3 + 18]
+    movu     m2, [r3 + 34]
+    movu     m3, [r3 + 48]
+    pshufb   m0, [shuf_mode_15_21]
+    pshufb   m1, [shuf_mode_15_21]
+    pshufb   m2, [shuf_mode_15_21]
+    pshufb   m3, [shuf_mode_15_21]
+    movh     [rsp + 24], m0
+    movh     [rsp + 16], m1
+    movh     [rsp + 8], m2
+    movh     [rsp], m3
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_15_21 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+%macro MODE_16_20 1
+    movu        m3,        [r2 + 40]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 - 5 * 16]          ; [11]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 38]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 15 * 16]         ; [01]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 36]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 34]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 7 * 16]          ; [23]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 32]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 3 * 16]          ; [13]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 30]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    pmaddwd     m4,        m3, [r3 - 13 * 16]         ; [03]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 28]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 26]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 9 * 16]          ; [25]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    pmaddwd     m6,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 24]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 16]              ; [15]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 22]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    pmaddwd     m1,        m3, [r3 - 11 * 16]         ; [05]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 20]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 18]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 11 * 16]         ; [27]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    pmaddwd     m1,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 16]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 16]              ; [17]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    pmaddwd     m6,        m3, [r3 - 9 * 16]          ; [07]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 13 * 16]         ; [29]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    pmaddwd     m0,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 3 * 16]          ; [19]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 7 * 16]          ; [09]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 15 * 16]         ; [31]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 5 * 16]          ; [21]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_16(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_16, 4,6,7,0-(6*mmsize+10)
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 2*mmsize + 8], m0
+    movu     [rsp + 3*mmsize + 8], m1
+    movu     [rsp + 4*mmsize + 8], m2
+    movu     [rsp + 5*mmsize + 8], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 104], r4w
+    movu     m0, [r3 + 4]
+    movu     m1, [r3 + 22]
+    movu     m2, [r3 + 40]
+    movd     m3, [r3 + 58]
+    pshufb   m0, [shuf_mode_16_20]
+    pshufb   m1, [shuf_mode_16_20]
+    pshufb   m2, [shuf_mode_16_20]
+    pshufb   m3, [shuf_mode_16_20]
+    movu     [rsp + 24], m0
+    movu     [rsp + 12], m1
+    movu     [rsp], m2
+    movd     [rsp], m3
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_16_20 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+%macro MODE_17_19 1
+    movu        m3,        [r2 + 50]                  ; [7 6 5 4 3 2 1 0]
+    pshufb      m3,        m2                         ; [4 3 3 2 2 1 1 0]
+
+    pmaddwd     m4,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 48]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 46]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 44]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 42]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 40]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 38]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 0, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 36]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 34]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 32]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 30]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 28]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 26]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2 + 26]                  ; [00]
+
+    TRANSPOSE_STORE_8x8 16, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 24]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 - 10 * 16]         ; [06]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 22]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 4 * 16]          ; [12]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    movu        m3,        [r2 + 20]
+    pshufb      m3,        m2
+
+    pmaddwd     m5,        m3, [r3 + 2 * 16]          ; [18]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 18]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3 + 8 * 16]          ; [24]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m5,        m0
+
+    movu        m3,        [r2 + 16]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 + 14 * 16]         ; [30]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    pmaddwd     m1,        m3, [r3 - 12 * 16]         ; [04]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2 + 14]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 - 6 * 16]          ; [10]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    movu        m3,        [r2 + 12]
+    pshufb      m3,        m2
+
+    pmaddwd     m0,        m3, [r3]                   ; [16]
+    paddd       m0,        [pd_16]
+    psrld       m0,        5
+    packusdw    m1,        m0
+
+    TRANSPOSE_STORE_8x8 32, %1, m4, m5, m6, m1
+
+    movu        m3,        [r2 + 10]
+    pshufb      m3,        m2
+
+    pmaddwd     m4,        m3, [r3 + 6 * 16]          ; [22]
+    paddd       m4,        [pd_16]
+    psrld       m4,        5
+
+    movu        m3,        [r2 + 8]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 12 * 16]         ; [28]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m4,        m1
+
+    pmaddwd     m5,        m3, [r3 - 14 * 16]         ; [02]
+    paddd       m5,        [pd_16]
+    psrld       m5,        5
+
+    movu        m3,        [r2 + 6]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 8 * 16]          ; [08]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+    packusdw    m5,        m6
+
+    movu        m3,        [r2 + 4]
+    pshufb      m3,        m2
+
+    pmaddwd     m6,        m3, [r3 - 2 * 16]          ; [14]
+    paddd       m6,        [pd_16]
+    psrld       m6,        5
+
+    movu        m3,        [r2 + 2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 4 * 16]          ; [20]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+    packusdw    m6,        m1
+
+    movu        m3,        [r2]
+    pshufb      m3,        m2
+
+    pmaddwd     m1,        m3, [r3 + 10 * 16]         ; [26]
+    paddd       m1,        [pd_16]
+    psrld       m1,        5
+
+    packusdw    m1,        m1
+    movhps      m1,        [r2]                       ; [00]
+
+    TRANSPOSE_STORE_8x8 48, %1, m4, m5, m6, m1
+%endmacro
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_17(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_17, 4,6,7,0-(7*mmsize+4)
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 3*mmsize + 2], m0
+    movu     [rsp + 4*mmsize + 2], m1
+    movu     [rsp + 5*mmsize + 2], m2
+    movu     [rsp + 6*mmsize + 2], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 114], r4w
+    movu     m0, [r3 + 8]
+    movu     m1, [r3 + 30]
+    movu     m2, [r3 + 50]
+    movd     m3, [r3 + 2]
+    pshufb   m0, [shuf_mode_17_19]
+    pshufb   m1, [shuf_mode_17_19]
+    pshufb   m2, [shuf_mode_17_19]
+    pshufb   m3, [shuf_mode_16_20]
+    movd     [rsp + 46], m3
+    movu     [rsp + 30], m0
+    movu     [rsp + 12], m1
+    movu     [rsp - 4], m2
+    mov      r4w, [r3 + 24]
+    mov      [rsp + 30], r4w
+    mov      r4w, [r3 + 28]
+    mov      [rsp + 28], r4w
+    mov      r4w, [r3 + 46]
+    mov      [rsp + 12], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_17_19 1
+    lea      r0, [r0 + r1 * 4 ]
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+;-------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_18(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;-------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_18, 4,7,8
+    movu        m0, [r3]               ; [7 6 5 4 3 2 1 0]
+    movu        m1, [r3 + 16]          ; [15 14 13 12 11 10 9 8]
+    movu        m2, [r3 + 32]          ; [23 22 21 20 19 18 17 16]
+    movu        m3, [r3 + 48]          ; [31 30 29 28 27 26 25 24]
+    movu        m4, [r2 + 2]           ; [8 7 6 5 4 3 2 1]
+    movu        m5, [r2 + 18]          ; [16 15 14 13 12 11 10 9]
+
+    add         r1, r1
+    lea         r6, [r1 * 2]
+    lea         r3, [r1 * 3]
+    lea         r4, [r1 * 4]
+
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m2
+    movu        [r0 + 48], m3
+
+    pshufb      m4, [shuf_mode32_18]   ; [1 2 3 4 5 6 7 8]
+    pshufb      m5, [shuf_mode32_18]   ; [9 10 11 12 13 14 15 16]
+
+    palignr     m6, m0, m4, 14
+    movu        [r0 + r1], m6
+    palignr     m6, m1, m0, 14
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m2, m1, 14
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m3, m2, 14
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m0, m4, 12
+    movu        [r0 + r6], m6
+    palignr     m6, m1, m0, 12
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m2, m1, 12
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m3, m2, 12
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m0, m4, 10
+    movu        [r0 + r3], m6
+    palignr     m6, m1, m0, 10
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m2, m1, 10
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m3, m2, 10
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    palignr     m6, m0, m4, 8
+    movu        [r0], m6
+    palignr     m6, m1, m0, 8
+    movu        [r0 + 16], m6
+    palignr     m6, m2, m1, 8
+    movu        [r0 + 32], m6
+    palignr     m6, m3, m2, 8
+    movu        [r0 + 48], m6
+
+    palignr     m6, m0, m4, 6
+    movu        [r0 + r1], m6
+    palignr     m6, m1, m0, 6
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m2, m1, 6
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m3, m2, 6
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m0, m4, 4
+    movu        [r0 + r6], m6
+    palignr     m6, m1, m0, 4
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m2, m1, 4
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m3, m2, 4
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m0, m4, 2
+    movu        [r0 + r3], m6
+    palignr     m6, m1, m0, 2
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m2, m1, 2
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m3, m2, 2
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    movu        [r0], m4
+    movu        [r0 + 16], m0
+    movu        [r0 + 32], m1
+    movu        [r0 + 48], m2
+
+    palignr     m6, m4, m5, 14
+    movu        [r0 + r1], m6
+    palignr     m6, m0, m4, 14
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m1, m0, 14
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m2, m1, 14
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m4, m5, 12
+    movu        [r0 + r6], m6
+    palignr     m6, m0, m4, 12
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m1, m0, 12
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m2, m1, 12
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m4, m5, 10
+    movu        [r0 + r3], m6
+    palignr     m6, m0, m4, 10
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m1, m0, 10
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m2, m1, 10
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    palignr     m6, m4, m5, 8
+    movu        [r0], m6
+    palignr     m6, m0, m4, 8
+    movu        [r0 + 16], m6
+    palignr     m6, m1, m0, 8
+    movu        [r0 + 32], m6
+    palignr     m6, m2, m1, 8
+    movu        [r0 + 48], m6
+
+    palignr     m6, m4, m5, 6
+    movu        [r0 + r1], m6
+    palignr     m6, m0, m4, 6
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m1, m0, 6
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m2, m1, 6
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m4, m5, 4
+    movu        [r0 + r6], m6
+    palignr     m6, m0, m4, 4
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m1, m0, 4
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m2, m1, 4
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m4, m5, 2
+    movu        [r0 + r3], m6
+    palignr     m6, m0, m4, 2
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m1, m0, 2
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m2, m1, 2
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    movu        m2, [r2 + 34]
+    movu        m3, [r2 + 50]
+    pshufb      m2, [shuf_mode32_18]
+    pshufb      m3, [shuf_mode32_18]
+
+    movu        [r0], m5
+    movu        [r0 + 16], m4
+    movu        [r0 + 32], m0
+    movu        [r0 + 48], m1
+
+    palignr     m6, m5, m2, 14
+    movu        [r0 + r1], m6
+    palignr     m6, m4, m5, 14
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m0, m4, 14
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m1, m0, 14
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m5, m2, 12
+    movu        [r0 + r6], m6
+    palignr     m6, m4, m5, 12
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m0, m4, 12
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m1, m0, 12
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m5, m2, 10
+    movu        [r0 + r3], m6
+    palignr     m6, m4, m5, 10
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m0, m4, 10
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m1, m0, 10
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    palignr     m6, m5, m2, 8
+    movu        [r0], m6
+    palignr     m6, m4, m5, 8
+    movu        [r0 + 16], m6
+    palignr     m6, m0, m4, 8
+    movu        [r0 + 32], m6
+    palignr     m6, m1, m0, 8
+    movu        [r0 + 48], m6
+
+    palignr     m6, m5, m2, 6
+    movu        [r0 + r1], m6
+    palignr     m6, m4, m5, 6
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m0, m4, 6
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m1, m0, 6
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m5, m2, 4
+    movu        [r0 + r6], m6
+    palignr     m6, m4, m5, 4
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m0, m4, 4
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m1, m0, 4
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m5, m2, 2
+    movu        [r0 + r3], m6
+    palignr     m6, m4, m5, 2
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m0, m4, 2
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m1, m0, 2
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    movu        [r0], m2
+    movu        [r0 + 16], m5
+    movu        [r0 + 32], m4
+    movu        [r0 + 48], m0
+
+    palignr     m6, m2, m3, 14
+    movu        [r0 + r1], m6
+    palignr     m6, m5, m2, 14
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m4, m5, 14
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m0, m4, 14
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m2, m3, 12
+    movu        [r0 + r6], m6
+    palignr     m6, m5, m2, 12
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m4, m5, 12
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m0, m4, 12
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m2, m3, 10
+    movu        [r0 + r3], m6
+    palignr     m6, m5, m2, 10
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m4, m5, 10
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m0, m4, 10
+    movu        [r0 + r3 + 48], m6
+
+    add         r0, r4
+
+    palignr     m6, m2, m3, 8
+    movu        [r0], m6
+    palignr     m6, m5, m2, 8
+    movu        [r0 + 16], m6
+    palignr     m6, m4, m5, 8
+    movu        [r0 + 32], m6
+    palignr     m6, m0, m4, 8
+    movu        [r0 + 48], m6
+
+    palignr     m6, m2, m3, 6
+    movu        [r0 + r1], m6
+    palignr     m6, m5, m2, 6
+    movu        [r0 + r1 + 16], m6
+    palignr     m6, m4, m5, 6
+    movu        [r0 + r1 + 32], m6
+    palignr     m6, m0, m4, 6
+    movu        [r0 + r1 + 48], m6
+
+    palignr     m6, m2, m3, 4
+    movu        [r0 + r6], m6
+    palignr     m6, m5, m2, 4
+    movu        [r0 + r6 + 16], m6
+    palignr     m6, m4, m5, 4
+    movu        [r0 + r6 + 32], m6
+    palignr     m6, m0, m4, 4
+    movu        [r0 + r6 + 48], m6
+
+    palignr     m6, m2, m3, 2
+    movu        [r0 + r3], m6
+    palignr     m6, m5, m2, 2
+    movu        [r0 + r3 + 16], m6
+    palignr     m6, m4, m5, 2
+    movu        [r0 + r3 + 32], m6
+    palignr     m6, m0, m4, 2
+    movu        [r0 + r3 + 48], m6
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_19(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_19, 4,7,7,0-(7*mmsize+4)
+    xchg     r2, r3
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 3*mmsize + 2], m0
+    movu     [rsp + 4*mmsize + 2], m1
+    movu     [rsp + 5*mmsize + 2], m2
+    movu     [rsp + 6*mmsize + 2], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 114], r4w
+    movu     m0, [r3 + 8]
+    movu     m1, [r3 + 30]
+    movu     m2, [r3 + 50]
+    movd     m3, [r3 + 2]
+    pshufb   m0, [shuf_mode_17_19]
+    pshufb   m1, [shuf_mode_17_19]
+    pshufb   m2, [shuf_mode_17_19]
+    pshufb   m3, [shuf_mode_16_20]
+    movd     [rsp + 46], m3
+    movu     [rsp + 30], m0
+    movu     [rsp + 12], m1
+    movu     [rsp - 4], m2
+    mov      r4w, [r3 + 24]
+    mov      [rsp + 30], r4w
+    mov      r4w, [r3 + 28]
+    mov      [rsp + 28], r4w
+    mov      r4w, [r3 + 46]
+    mov      [rsp + 12], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_17_19 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_20(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_20, 4,7,7,0-(6*mmsize+10)
+    xchg     r2, r3
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 2*mmsize + 8], m0
+    movu     [rsp + 3*mmsize + 8], m1
+    movu     [rsp + 4*mmsize + 8], m2
+    movu     [rsp + 5*mmsize + 8], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 104], r4w
+    movu     m0, [r3 + 4]
+    movu     m1, [r3 + 22]
+    movu     m2, [r3 + 40]
+    movd     m3, [r3 + 58]
+    pshufb   m0, [shuf_mode_16_20]
+    pshufb   m1, [shuf_mode_16_20]
+    pshufb   m2, [shuf_mode_16_20]
+    pshufb   m3, [shuf_mode_16_20]
+    movu     [rsp + 24], m0
+    movu     [rsp + 12], m1
+    movu     [rsp], m2
+    movd     [rsp], m3
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_16_20 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_21(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_21, 4,7,7,0-(6*mmsize+2)
+    xchg     r2, r3
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 2*mmsize], m0
+    movu     [rsp + 3*mmsize], m1
+    movu     [rsp + 4*mmsize], m2
+    movu     [rsp + 5*mmsize], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 96], r4w
+    movu     m0, [r3 + 4]
+    movu     m1, [r3 + 18]
+    movu     m2, [r3 + 34]
+    movu     m3, [r3 + 48]
+    pshufb   m0, [shuf_mode_15_21]
+    pshufb   m1, [shuf_mode_15_21]
+    pshufb   m2, [shuf_mode_15_21]
+    pshufb   m3, [shuf_mode_15_21]
+    movh     [rsp + 24], m0
+    movh     [rsp + 16], m1
+    movh     [rsp + 8], m2
+    movh     [rsp], m3
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_15_21 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_22(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_22, 4,7,7,0-(5*mmsize+10)
+    xchg     r2, r3
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 1*mmsize + 8], m0
+    movu     [rsp + 2*mmsize + 8], m1
+    movu     [rsp + 3*mmsize + 8], m2
+    movu     [rsp + 4*mmsize + 8], m3
+
+    mov      r4w, [r2 + 64]
+    mov      [rsp + 88], r4w
+    mov      r4w, [r3+4]
+    mov      [rsp+22], r4w
+    movu     m0, [r3 + 10]
+    movu     m1, [r3 + 30]
+    movu     m2, [r3 + 50]
+    pshufb   m0, [shuf_mode_14_22]
+    pshufb   m1, [shuf_mode_14_22]
+    pshufb   m2, [shuf_mode_14_22]
+    movh     [rsp + 14], m0
+    movh     [rsp + 6], m1
+    movh     [rsp - 2], m2
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_14_22 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_23(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_23, 4,7,7,0-(5*mmsize+2)
+    xchg     r2, r3
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 1*mmsize], m0
+    movu     [rsp + 2*mmsize], m1
+    movu     [rsp + 3*mmsize], m2
+    movu     [rsp + 4*mmsize], m3
+
+    mov      r4w, [r2+64]
+    mov      [rsp+80], r4w
+    movu     m0, [r3 + 8]
+    movu     m1, [r3 + 36]
+    pshufb   m0, [shuf_mode_13_23]
+    pshufb   m1, [shuf_mode_13_23]
+    movh     [rsp + 8], m0
+    movh     [rsp], m1
+    mov      r4w, [r3+28]
+    mov      [rsp+8], r4w
+    mov      r4w, [r3+56]
+    mov      [rsp], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mova     m2, [pw_punpcklwd]
+    mov      r6, r0
+
+.loop:
+    MODE_13_23 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz     .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_24(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_24, 4,7,7,0-(4*mmsize+10)
+    xchg     r2, r3
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+
+    movu     [rsp + 0*mmsize + 8], m0
+    movu     [rsp + 1*mmsize + 8], m1
+    movu     [rsp + 2*mmsize + 8], m2
+    movu     [rsp + 3*mmsize + 8], m3
+
+    mov      r4w, [r2+64]
+    mov      [rsp+72], r4w
+    mov      r4w, [r3+12]
+    mov      [rsp+6], r4w
+    mov      r4w, [r3+26]
+    mov      [rsp+4], r4w
+    mov      r4w, [r3+38]
+    mov      [rsp+2], r4w
+    mov      r4w, [r3+52]
+    mov      [rsp], r4w
+
+    lea      r3, [ang_table + 16 * 16]
+    mov      r4d, 8
+    mov      r2, rsp
+    add      r1, r1
+    lea      r5, [r1 * 3]
+    mov     r6, r0
+    mova     m2, [pw_punpcklwd]
+
+.loop:
+    MODE_12_24 0
+    add      r6, 8
+    mov      r0, r6
+    add      r2, 8
+    dec      r4
+    jnz      .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_25(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_25, 4,7,7,0-(4*mmsize+4)
+    xchg     r2, r3
+    movu     m0, [r2 + 0*mmsize]
+    movu     m1, [r2 + 1*mmsize]
+    movu     m2, [r2 + 2*mmsize]
+    movu     m3, [r2 + 3*mmsize]
+    movu     [rsp + 0*mmsize + 2], m0
+    movu     [rsp + 1*mmsize + 2], m1
+    movu     [rsp + 2*mmsize + 2], m2
+    movu     [rsp + 3*mmsize + 2], m3
+    mov      r4w, [r3+32]
+    mov      [rsp], r4w
+    mov      r4w, [r2+64]
+    mov      [rsp+66], r4w
+
+    lea         r3, [ang_table + 16 * 16]
+    mov         r4d, 8
+    mov         r2, rsp
+    add         r1, r1
+    lea         r5, [r1 * 3]
+    mov         r6, r0
+
+.loop:
+    MODE_11_25 0
+    add         r6, 8
+    mov         r0, r6
+    add         r2, 8
+    dec         r4
+    jnz         .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_26(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_26, 4,7,5
+    mov         r6d, 4
+    add         r1, r1
+    lea         r2, [r1 * 2]
+    lea         r4, [r1 * 3]
+    lea         r5, [r1 * 4]
+    mova        m4, [c_mode32_10_0]
+
+    movu        m0, [r3 + 2]
+    movu        m1, [r3 + 18]
+    movu        m2, [r3 + 34]
+    movu        m3, [r3 + 50]
+
+.loop:
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m2
+    movu        [r0 + 48], m3
+
+    movu        [r0 + r1], m0
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m2
+    movu        [r0 + r1 + 48], m3
+
+    movu        [r0 + r2], m0
+    movu        [r0 + r2 + 16], m1
+    movu        [r0 + r2 + 32], m2
+    movu        [r0 + r2 + 48], m3
+
+    movu        [r0 + r4], m0
+    movu        [r0 + r4 + 16], m1
+    movu        [r0 + r4 + 32], m2
+    movu        [r0 + r4 + 48], m3
+
+    add         r0, r5
+
+    movu        [r0], m0
+    movu        [r0 + 16], m1
+    movu        [r0 + 32], m2
+    movu        [r0 + 48], m3
+
+    movu        [r0 + r1], m0
+    movu        [r0 + r1 + 16], m1
+    movu        [r0 + r1 + 32], m2
+    movu        [r0 + r1 + 48], m3
+
+    movu        [r0 + r2], m0
+    movu        [r0 + r2 + 16], m1
+    movu        [r0 + r2 + 32], m2
+    movu        [r0 + r2 + 48], m3
+
+    movu        [r0 + r4], m0
+    movu        [r0 + r4 + 16], m1
+    movu        [r0 + r4 + 32], m2
+    movu        [r0 + r4 + 48], m3
+
+    add         r0, r5
+    dec         r6d
+    jnz         .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_27(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_27, 4,7,8
+    xchg   r2, r3mp
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_9_27 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_28(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_28, 4,7,8
+    xchg   r2, r3mp
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_8_28 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_29(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_29, 4,7,8
+    xchg   r2, r3mp
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_7_29 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_30(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_30, 4,7,8
+    xchg   r2, r3mp
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_6_30 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_31(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_31, 4,7,8
+    xchg   r2, r3mp
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_5_31 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_32(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_32, 4,7,8
+    xchg   r2, r3mp
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+
+.loop:
+    MODE_4_32 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET
+
+;------------------------------------------------------------------------------------------------------------------
+; void intraPredAng32_33(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+;------------------------------------------------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal intra_pred_ang32_33, 4,7,8
+    xchg   r2, r3mp
+    lea    r3, [ang_table + 16 * 16]
+    add    r1, r1
+    lea    r5, [r1 * 3]
+    mov    r6, r0
+    mov    r4d, 8
+.loop:
+    MODE_3_33 0
+    add    r6, 8
+    mov    r0, r6
+    add    r2, 8
+    dec    r4
+    jnz    .loop
+    RET


More information about the x265-devel mailing list