[x265] [PATCH] asm: code for intra_pred[BLOCK_32x32] mode 2 and 34

chen chenm003 at 163.com
Wed Jan 15 13:46:31 CET 2014


>+    palignr         m2, m1, m0, 16
this equal to 'mova m2, m1'

>+    movu            [r0], m2
>+    movu            m0, [r2 + 50]
>+    palignr         m2, m3, m1, 16
>+    movu            [r0 + 16], m2
>+    palignr         m2, m3, m1, 1
>+    movu            [r0 + r1], m2
>+    palignr         m2, m0, m3, 1
>+    movu            [r0 + r1 + 16], m2
>+    palignr         m2, m3, m1, 2
>+    movu            [r0 + r1 * 2], m2
>+    palignr         m2, m0, m3, 2
>+    movu            [r0 + r1 * 2 + 16], m2
>+    palignr         m2, m3, m1, 3
>+    movu            [r0 + r3], m2
>+    palignr         m2, m0, m3, 3
>+    movu            [r0 + r3 + 16], m2
>+
>+    lea             r0, [r0 + r1 * 4]
>+
>+    palignr         m2, m3, m1, 4
>+    movu            [r0], m2
>+    palignr         m2, m0, m3, 4
>+    movu            [r0 + 16], m2
>+    palignr         m2, m3, m1, 5
>+    movu            [r0 + r1], m2
>+    palignr         m2, m0, m3, 5
>+    movu            [r0 + r1 + 16], m2
>+    palignr         m2, m3, m1, 6
>+    movu            [r0 + r1 * 2], m2
>+    palignr         m2, m0, m3, 6
>+    movu            [r0 + r1 * 2 + 16], m2
>+    palignr         m2, m3, m1, 7
>+    movu            [r0 + r3], m2
>+    palignr         m2, m0, m3, 7
>+    movu            [r0 + r3 + 16], m2
>+
>+    lea             r0, [r0 + r1 * 4]
>+
>+    palignr         m2, m3, m1, 8
>+    movu            [r0], m2
>+    palignr         m2, m0, m3, 8
>+    movu            [r0 + 16], m2
>+    palignr         m2, m3, m1, 9
>+    movu            [r0 + r1], m2
>+    palignr         m2, m0, m3, 9
>+    movu            [r0 + r1 + 16], m2
>+    palignr         m2, m3, m1, 10
>+    movu            [r0 + r1 * 2], m2
>+    palignr         m2, m0, m3, 10
>+    movu            [r0 + r1 * 2 + 16], m2
>+    palignr         m2, m3, m1, 11
>+    movu            [r0 + r3], m2
>+    palignr         m2, m0, m3, 11
>+    movu            [r0 + r3 + 16], m2
>+
>+    lea             r0, [r0 + r1 * 4]
>+
>+    palignr         m2, m3, m1, 12
>+    movu            [r0], m2
>+    palignr         m2, m0, m3, 12
>+    movu            [r0 + 16], m2
>+    palignr         m2, m3, m1, 13
>+    movu            [r0 + r1], m2
>+    palignr         m2, m0, m3, 13
>+    movu            [r0 + r1 + 16], m2
>+    palignr         m2, m3, m1, 14
>+    movu            [r0 + r1 * 2], m2
>+    palignr         m2, m0, m3, 14
>+    movu            [r0 + r1 * 2 + 16], m2
>+    palignr         m2, m3, m1, 15
>+    movu            [r0 + r3], m2
>+    palignr         m2, m0, m3, 15
>+    movu            [r0 + r3 + 16], m2
>+    RET
>+
> ;-----------------------------------------------------------------------------
> ; void all_angs_pred_4x4(pixel *dest, pixel *above0, pixel *left0, pixel *above1, pixel *left1, bool bLuma)
> ;-----------------------------------------------------------------------------
>_______________________________________________
>x265-devel mailing list
>x265-devel at videolan.org
>https://mailman.videolan.org/listinfo/x265-devel
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20140115/3629827e/attachment-0001.html>


More information about the x265-devel mailing list