[x265] [PATCH] asm: intra_pred_ang8 assembly code for mode 3 and 33

chen chenm003 at 163.com
Fri Jan 10 12:38:32 CET 2014


>diff -r 80b63c3ee144 -r bb6278b7d332 source/common/x86/intrapred8.asm
>--- a/source/common/x86/intrapred8.asm Thu Jan 09 12:50:16 2014 +0550
>+++ b/source/common/x86/intrapred8.asm Fri Jan 10 16:23:59 2014 +0530
>@@ -1106,7 +1106,7 @@
>     movd        [r0], m0
>     RET
> ;-----------------------------------------------------------------------------
>-; void intraPredAng(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
>+; void intraPredAng8(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
> ;-----------------------------------------------------------------------------
> INIT_XMM ssse3
> cglobal intra_pred_ang8_2, 3,5,2
>@@ -1133,6 +1133,98 @@
>     movh        [r0 + r4],      m1
>     RET
> 
>+INIT_XMM sse4
>+cglobal intra_pred_ang8_3, 3,5,8
>+    cmp         r4m,       byte 33
>+    cmove       r2,        r3mp
>+    lea         r3,        [ang_table + 20 * 16]
>+
>+    movu        m0,        [r2 + 1]                   ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
>+    palignr     m1,        m0, 1                      ; [x 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
>+
>+    punpckhbw   m2,        m0, m1                     ; [x 16 16 15 15 14 14 13 13 12 12 11 11 10 10 9]
>+    punpcklbw   m0,        m1                         ; [9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1]
>+    palignr     m1,        m2, m0, 2                  ; [10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2]
>+
>+    movu        m3,        [r3 + 6 * 16]              ; [26]
>+    movu        m4,        [r3]                       ; [20]
>+
>+    pmaddubsw   m6,        m0, m3
>+    pmulhrsw    m6,        [pw_1024]
>+    pmaddubsw   m7,        m1, m4
>+    pmulhrsw    m7,        [pw_1024]
>+    packuswb    m4,        m6, m7
>+
>+    palignr     m1,        m2, m0, 4                  ; [11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3]
>+
>+    movu        m3,        [r3 - 6 * 16]              ; [14]
>+    pmaddubsw   m6,        m1, m3
you didn't need m1 anymore, why use a reg-reg copy here and below
 
>+    pmulhrsw    m6,        [pw_1024]
>+
>+    palignr     m1,        m2, m0, 6                  ; [12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4]
>+
>+    movu        m3,        [r3 - 12 * 16]             ; [ 8]
>+    pmaddubsw   m7,        m1, m3
>+    pmulhrsw    m7,        [pw_1024]
>+    packuswb    m5,        m6, m7
>+
>+    palignr     m1,        m2, m0, 8                  ; [13 12 12 11 11 10 10 9 9 8 8 7 7 6 6 5]
>+
>+    movu        m3,        [r3 - 18 * 16]             ; [ 2]
>+    pmaddubsw   m6,        m1, m3
>+    pmulhrsw    m6,        [pw_1024]
>+
>+    movu        m3,        [r3 + 8 * 16]              ; [28]
>+    pmaddubsw   m7,        m1, m3
>+    pmulhrsw    m7,        [pw_1024]
>+    packuswb    m6,        m7
>+
>+    palignr     m1,        m2, m0, 10                 ; [14 13 13 12 12 11 11 10 10 9 9 8 8 7 7 6]
>+
>+    movu        m3,        [r3 + 2 * 16]              ; [22]
>+    pmaddubsw   m7,        m1, m3
>+    pmulhrsw    m7,        [pw_1024]
>+
>+    palignr     m1,        m2, m0, 12                 ; [15 14 14 13 13 12 12 11 11 10 10 9 9 8 8 7]
>+
>+    movu        m3,        [r3 - 4 * 16]              ; [16]
>+    pmaddubsw   m0,        m1, m3
>+    pmulhrsw    m0,        [pw_1024]
>+    packuswb    m7,        m0
>+
>+    jz         .store
>+
>+    ; transpose 8x8
>+    punpckhbw   m0,        m4, m5
>+    punpcklbw   m4,        m5
>+    punpckhbw   m1,        m4, m0
>+    punpcklbw   m4,        m0
>+
>+    punpckhbw   m0,        m6, m7
>+    punpcklbw   m6,        m7
>+    punpckhbw   m7,        m6, m0
>+    punpcklbw   m6,        m0
>+
>+    punpckhdq   m5,        m4, m6
>+    punpckldq   m4,        m6
>+    punpckldq   m6,        m1, m7
>+    punpckhdq   m1,        m7
>+    mova        m7,        m1
>+
>+.store:
>+    lea         r4,              [r1 * 3]
>+    movh        [r0],            m4
>+    movhps      [r0 + r1],       m4
>+    movh        [r0 + r1 * 2],   m5
>+    movhps      [r0 + r4],       m5
>+    lea         r0,              [r0 + r1 * 4]
>+    movh        [r0],            m6
>+    movhps      [r0 + r1],       m6
>+    movh        [r0 + r1 * 2],   m7
>+    movhps      [r0 + r4],       m7
>+
>+    RET
>+
> ;-----------------------------------------------------------------------------
> ; void intraPredAng16(pixel* dst, intptr_t dstStride, pixel *refLeft, pixel *refAbove, int dirMode, int bFilter)
> ;-----------------------------------------------------------------------------
>_______________________________________________
>x265-devel mailing list
>x265-devel at videolan.org
>https://mailman.videolan.org/listinfo/x265-devel
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20140110/6f8c6e86/attachment.html>


More information about the x265-devel mailing list