[x265] [PATCH] asm: code for intra_pred[BLOCK_16x16] mode 3

Murugan Vairavel murugan at multicorewareinc.com
Mon Jan 20 09:04:26 CET 2014


Ignore this patch, Need Modifications.



On Mon, Jan 20, 2014 at 11:49 AM, chen <chenm003 at 163.com> wrote:

> right
>
> At 2014-01-20 13:24:20,murugan at multicorewareinc.com wrote:
>
> ># HG changeset patch
> ># User Murugan Vairavel <murugan at multicorewareinc.com>
> ># Date 1390195451 -19800
> >#      Mon Jan 20 10:54:11 2014 +0530
> ># Node ID 879feee2a43535ff490a3d82cebac64b03b3db8d
> ># Parent  c88314c4a1a1bd0182be180e048f3788de0c2108
> >asm: code for intra_pred[BLOCK_16x16] mode 3
> >
> >diff -r c88314c4a1a1 -r 879feee2a435 source/common/x86/asm-primitives.cpp
> >--- a/source/common/x86/asm-primitives.cpp Fri Jan 17 12:18:25 2014 +0530
> >+++ b/source/common/x86/asm-primitives.cpp Mon Jan 20 10:54:11 2014 +0530
> >@@ -1011,6 +1011,8 @@
> >         SETUP_INTRA_ANG4(32, 4, sse4);
> >         SETUP_INTRA_ANG4(33, 3, sse4);
> >
> >+        SETUP_INTRA_ANG16(3, 3, sse4);
> >+
> >         p.dct[DCT_8x8] = x265_dct8_sse4;
> >     }
> >     if (cpuMask & X265_CPU_AVX)
> >diff -r c88314c4a1a1 -r 879feee2a435 source/common/x86/intrapred8.asm
> >--- a/source/common/x86/intrapred8.asm Fri Jan 17 12:18:25 2014 +0530
> >+++ b/source/common/x86/intrapred8.asm Mon Jan 20 10:54:11 2014 +0530
> >@@ -1182,6 +1182,159 @@
> >     movu            [r0 + r1], m2
> >     RET
> >
> >+%macro TRANSPOSE_STORE_8x8 1
> >+    punpckhbw   m0,        m4, m5
> >+    punpcklbw   m4,        m5
> >+    punpckhbw   m2,        m4, m0
> >+    punpcklbw   m4,        m0
> >+
> >+    punpckhbw   m0,        m6, m1
> >+    punpcklbw   m6,        m1
> >+    punpckhbw   m1,        m6, m0
> >+    punpcklbw   m6,        m0
> >+
> >+    punpckhdq   m5,        m4, m6
> >+    punpckldq   m4,        m6
> >+    punpckldq   m6,        m2, m1
> >+    punpckhdq   m2,        m1
> >+
> >+    movh        [r0         + %1 * 8], m4
> >+    movhps      [r0 +  r1   + %1 * 8], m4
> >+    movh        [r0 +  r1*2 + %1 * 8], m5
> >+    movhps      [r0 +  r5   + %1 * 8], m5
> >+    movh        [r6         + %1 * 8], m6
> >+    movhps      [r6 +  r1   + %1 * 8], m6
> >+    movh        [r6 +  r1*2 + %1 * 8], m2
> >+    movhps      [r6 +  r5   + %1 * 8], m2
> >+%endmacro
> >+
> >+INIT_XMM sse4
> >+cglobal intra_pred_ang16_3, 3,7,8
> >+
> >+    lea         r3,        [ang_table + 16 * 16]
> >+    mov         r4d, 2
> >+    lea         r5, [r1 * 3]            ; r5 -> 3 * stride
> >+    lea         r6, [r0 + r1 * 4]       ; r6 -> 4 * stride
> >+    mova        m7,        [pw_1024]
> >+
> >+.loop:
> >+    movu        m0,        [r2 + 1]
> >+    palignr     m1,        m0, 1
> >+
> >+    punpckhbw   m2,        m0, m1
> >+    punpcklbw   m0,        m1
> >+    palignr     m1,        m2, m0, 2
> >+
> >+    movu        m3,        [r3 + 10 * 16]             ; [26]
> >+    movu        m6,        [r3 + 4 * 16]              ; [20]
> >+
> >+    pmaddubsw   m4,        m0, m3
> >+    pmulhrsw    m4,        m7
> >+    pmaddubsw   m1,        m6
> >+    pmulhrsw    m1,        m7
> >+    packuswb    m4,        m1
> >+
> >+    palignr     m5,        m2, m0, 4
> >+
> >+    movu        m3,        [r3 - 2 * 16]              ; [14]
> >+    pmaddubsw   m5,        m3
> >+    pmulhrsw    m5,        m7
> >+
> >+    palignr     m6,        m2, m0, 6
> >+
> >+    movu        m3,        [r3 - 8 * 16]              ; [ 8]
> >+    pmaddubsw   m6,        m3
> >+    pmulhrsw    m6,        m7
> >+    packuswb    m5,        m6
> >+
> >+    palignr     m1,        m2, m0, 8
> >+
> >+    movu        m3,        [r3 - 14 * 16]             ; [ 2]
> >+    pmaddubsw   m6,        m1, m3
> >+    pmulhrsw    m6,        m7
> >+
> >+    movu        m3,        [r3 + 12 * 16]             ; [28]
> >+    pmaddubsw   m1,        m3
> >+    pmulhrsw    m1,        m7
> >+    packuswb    m6,        m1
> >+
> >+    palignr     m1,        m2, m0, 10
> >+
> >+    movu        m3,        [r3 + 6 * 16]              ; [22]
> >+    pmaddubsw   m1,        m3
> >+    pmulhrsw    m1,        m7
> >+
> >+    palignr     m2,        m0, 12
> >+
> >+    movu        m3,        [r3]                       ; [16]
> >+    pmaddubsw   m2,        m3
> >+    pmulhrsw    m2,        m7
> >+    packuswb    m1,        m2
> >+
> >+    TRANSPOSE_STORE_8x8 0
> >+
> >+    movu        m0,        [r2 + 8]
> >+    palignr     m1,        m0, 1
> >+
> >+    punpckhbw   m2,        m0, m1
> >+    punpcklbw   m0,        m1
> >+    palignr     m5,        m2, m0, 2
> >+
> >+    movu        m3,        [r3 - 6 * 16]              ; [10]
> >+    movu        m6,        [r3 - 12 * 16]             ; [04]
> >+
> >+    pmaddubsw   m4,        m0, m3
> >+    pmulhrsw    m4,        m7
> >+    pmaddubsw   m1,        m5, m6
> >+    pmulhrsw    m1,        m7
> >+    packuswb    m4,        m1
> >+
> >+    movu        m3,        [r3 + 14 * 16]             ; [30]
> >+    pmaddubsw   m5,        m3
> >+    pmulhrsw    m5,        m7
> >+
> >+    palignr     m6,        m2, m0, 4
> >+
> >+    movu        m3,        [r3 + 8 * 16]              ; [24]
> >+    pmaddubsw   m6,        m3
> >+    pmulhrsw    m6,        m7
> >+    packuswb    m5,        m6
> >+
> >+    palignr     m1,        m2, m0, 6
> >+
> >+    movu        m3,        [r3 + 2 * 16]              ; [18]
> >+    pmaddubsw   m6,        m1, m3
> >+    pmulhrsw    m6,        m7
> >+
> >+    palignr     m1,        m2, m0, 8
> >+
> >+    movu        m3,        [r3 - 4 * 16]              ; [12]
> >+    pmaddubsw   m1,        m3
> >+    pmulhrsw    m1,        m7
> >+    packuswb    m6,        m1
> >+
> >+    palignr     m1,        m2, m0, 10
> >+
> >+    movu        m3,        [r3 - 10 * 16]             ; [06]
> >+    pmaddubsw   m1,        m3
> >+    pmulhrsw    m1,        m7
> >+
> >+    palignr     m2,        m0, 12
> >+
> >+    movu        m3,        [r3 - 16 * 16]             ; [0]
> >+    pmaddubsw   m2,        m3
> >+    pmulhrsw    m2,        m7
> >+    packuswb    m1,        m2
> >+
> >+    TRANSPOSE_STORE_8x8 1
> >+
> >+    lea         r0, [r6 + r1 * 4]
> >+    lea         r6, [r6 + r1 * 8]
> >+    add         r2, 8
> >+    dec         r4
> >+    jnz        .loop
> >+
> >+    RET
>
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>
>


-- 
With Regards,

Murugan. V
+919659287478
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20140120/2618b5fc/attachment-0001.html>


More information about the x265-devel mailing list