[x265] [PATCH] asm: intra_pred_ang8 assembly code for mode 3 and 33

yuvaraj at multicorewareinc.com yuvaraj at multicorewareinc.com
Fri Jan 10 13:21:32 CET 2014


# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1389356193 -19800
#      Fri Jan 10 17:46:33 2014 +0530
# Node ID 2242d8d235d41e946fc02f0f185b562d6ef3849e
# Parent  80b63c3ee144e6edbafbbe281ad3d1d8505be1f6
asm: intra_pred_ang8 assembly code for mode 3 and 33

diff -r 80b63c3ee144 -r 2242d8d235d4 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Thu Jan 09 12:50:16 2014 +0550
+++ b/source/common/x86/asm-primitives.cpp	Fri Jan 10 17:46:33 2014 +0530
@@ -1006,6 +1006,9 @@
         SETUP_INTRA_ANG4(32, 4, sse4);
         SETUP_INTRA_ANG4(33, 3, sse4);
 
+        SETUP_INTRA_ANG8(3, 3, sse4);
+        SETUP_INTRA_ANG8(33, 3, sse4);
+
         p.dct[DCT_8x8] = x265_dct8_sse4;
     }
     if (cpuMask & X265_CPU_AVX)
diff -r 80b63c3ee144 -r 2242d8d235d4 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Thu Jan 09 12:50:16 2014 +0550
+++ b/source/common/x86/intrapred8.asm	Fri Jan 10 17:46:33 2014 +0530
@@ -1106,7 +1106,7 @@
     movd        [r0], m0
     RET
 ;-----------------------------------------------------------------------------
-; void intraPredAng(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
+; void intraPredAng8(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
 ;-----------------------------------------------------------------------------
 INIT_XMM ssse3
 cglobal intra_pred_ang8_2, 3,5,2
@@ -1133,6 +1133,98 @@
     movh        [r0 + r4],      m1
     RET
 
+INIT_XMM sse4
+cglobal intra_pred_ang8_3, 3,5,7
+    cmp         r4m,       byte 33
+    cmove       r2,        r3mp
+    lea         r3,        [ang_table + 20 * 16]
+
+    movu        m0,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
+    palignr     m1,        m2, m0, 2                  ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
+
+    movu        m3,        [r3 + 6 * 16]              ; [26]
+    movu        m4,        [r3]                       ; [20]
+
+    pmaddubsw   m6,        m0, m3
+    pmulhrsw    m6,        [pw_1024]
+    pmaddubsw   m1,        m4
+    pmulhrsw    m1,        [pw_1024]
+    packuswb    m4,        m6, m1
+
+    palignr     m1,        m2, m0, 4                  ; [11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3]
+
+    movu        m3,        [r3 - 6 * 16]              ; [14]
+    pmaddubsw   m1,        m3
+    pmulhrsw    m1,        [pw_1024]
+
+    palignr     m6,        m2, m0, 6                  ; [12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4]
+
+    movu        m3,        [r3 - 12 * 16]             ; [ 8]
+    pmaddubsw   m6,        m3
+    pmulhrsw    m6,        [pw_1024]
+    packuswb    m5,        m1, m6
+
+    palignr     m1,        m2, m0, 8                  ; [13 12 12 11 11 10 10 9 9 8 8 7 7 6 6 5]
+
+    movu        m3,        [r3 - 18 * 16]             ; [ 2]
+    pmaddubsw   m6,        m1, m3
+    pmulhrsw    m6,        [pw_1024]
+
+    movu        m3,        [r3 + 8 * 16]              ; [28]
+    pmaddubsw   m1,        m3
+    pmulhrsw    m1,        [pw_1024]
+    packuswb    m6,        m1
+
+    palignr     m1,        m2, m0, 10                 ; [14 13 13 12 12 11 11 10 10 9 9 8 8 7 7 6]
+
+    movu        m3,        [r3 + 2 * 16]              ; [22]
+    pmaddubsw   m1,        m3
+    pmulhrsw    m1,        [pw_1024]
+
+    palignr     m2,        m0, 12                     ; [15 14 14 13 13 12 12 11 11 10 10 9 9 8 8 7]
+
+    movu        m3,        [r3 - 4 * 16]              ; [16]
+    pmaddubsw   m2,        m3
+    pmulhrsw    m2,        [pw_1024]
+    packuswb    m1,        m2
+
+    jz         .store
+
+    ; transpose 8x8
+    punpckhbw   m0,        m4, m5
+    punpcklbw   m4,        m5
+    punpckhbw   m2,        m4, m0
+    punpcklbw   m4,        m0
+
+    punpckhbw   m0,        m6, m1
+    punpcklbw   m6,        m1
+    punpckhbw   m1,        m6, m0
+    punpcklbw   m6,        m0
+
+    punpckhdq   m5,        m4, m6
+    punpckldq   m4,        m6
+    punpckldq   m6,        m2, m1
+    punpckhdq   m2,        m1
+    mova        m1,        m2
+
+.store:
+    lea         r4,              [r1 * 3]
+    movh        [r0],            m4
+    movhps      [r0 + r1],       m4
+    movh        [r0 + r1 * 2],   m5
+    movhps      [r0 + r4],       m5
+    lea         r0,              [r0 + r1 * 4]
+    movh        [r0],            m6
+    movhps      [r0 + r1],       m6
+    movh        [r0 + r1 * 2],   m1
+    movhps      [r0 + r4],       m1
+
+    RET
+
 ;-----------------------------------------------------------------------------
 ; void intraPredAng16(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
 ;-----------------------------------------------------------------------------


More information about the x265-devel mailing list