[x265] [PATCH] asm: luma_vpp[8x8] in avx2: improve 701c->396c

chen chenm003 at 163.com
Mon Nov 17 17:16:55 CET 2014


 

At 2014-11-17 17:20:22,"Divya Manivannan" <divya at multicorewareinc.com> wrote:
># HG changeset patch
># User Divya Manivannan divya at multicorewareinc.com>
># Date 1416215891 -19800
>#      Mon Nov 17 14:48:11 2014 +0530
># Node ID 5fec38f8c75606ddf4d5b7c9244dfd373892af5d
># Parent  27d36c4b4a27d2872430c6a6fc538fbddcf791e6
>asm: luma_vpp[8x8] in avx2: improve 701c->396c
>
diff -r 27d36c4b4a27 -r 5fec38f8c756 source/common/x86/ipfilter8.asm
>--- a/source/common/x86/ipfilter8.asm	Mon Nov 17 01:30:26 2014 +0530
>+++ b/source/common/x86/ipfilter8.asm	Mon Nov 17 14:48:11 2014 +0530
>@@ -122,6 +122,26 @@
>                   times 8 db 58, -10
>                   times 8 db 4, -1
> 
>+tab_LumaCoeffVer_32: times 16 db 0, 0
>+                     times 16 db 0, 64

>+                     times 16 db 0, 0
>+                     times 16 db 0, 0
>+
>+                     times 16 db -1, 4
>+                     times 16 db -10, 58
>+                     times 16 db 17, -5
>+                     times 16 db 1, 0
>+
>+                     times 16 db -1, 4
>+                     times 16 db -11, 40
>+                     times 16 db 40, -11
>+                     times 16 db 4, -1
>+
>+                     times 16 db 0, 1
>+                     times 16 db -5, 17
>+                     times 16 db 58, -10
>+                     times 16 db 4, -1
>+
> tab_c_64_n64:   times 8 db 64, -64
> 
> 
>@@ -3532,6 +3552,80 @@
> ;-------------------------------------------------------------------------------------------------------------
> FILTER_VER_LUMA_4xN 4, 16, ps
> 
>+%macro PROCESS_LUMA_AVX2_W8_8R 0
>+    movq            xm1, [r0]                       ; m1 = row 0
>+    movq            xm2, [r0 + r1]                  ; m2 = row 1
>+    punpcklbw       xm1, xm2                        ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
>+    lea             r6, [r0 + r1 * 2]

why not r0?
 
>+    movq            xm3, [r6]                       ; m3 = row 2
>+    punpcklbw       xm2, xm3                        ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
>+    vinserti128     m5, m1, xm2, 1                  ; m5 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]

in this case, your format row[1 0] is no problem, 
for other size, I suggest use format row[2 0], you may reduce one vextracti128 in future, because packuswb combin based on 128bits.
 
>+    pmaddubsw       m5, [r5]
>+    movq            xm4, [r6 + r1]                  ; m4 = row 3
>+    punpcklbw       xm3, xm4                        ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
>+    lea             r6, [r6 + r1 * 2]
>+    movq            xm1, [r6]                       ; m1 = row 4
>+    punpcklbw       xm4, xm1                        ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
>+    vinserti128     m2, m3, xm4, 1                  ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
>+    pmaddubsw       m0, m2, [r5 + 32]

32 -> 1 * mmsize
>+    paddw           m5, m0
>+    pmaddubsw       m2, [r5]
>+    movq            xm3, [r6 + r1]                  ; m3 = row 5
>+    punpcklbw       xm1, xm3                        ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
>+    lea             r6, [r6 + r1 * 2]
>+    movq            xm4, [r6]                       ; m4 = row 6
>+    punpcklbw       xm3, xm4                        ; m3 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
>+    vinserti128     m1, m1, xm3, 1                  ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
>+    pmaddubsw       m3, m1, [r5 + 64]
>+    paddw           m5, m3
>+    pmaddubsw       m0, m1, [r5 + 32]
>+    paddw           m2, m0
>+    pmaddubsw       m1, [r5]
>+    movq            xm3, [r6 + r1]                  ; m3 = row 7
>+    punpcklbw       xm4, xm3                        ; m4 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
>+    lea             r6, [r6 + r1 * 2]
>+    movq            xm0, [r6]                       ; m0 = row 8
>+    punpcklbw       xm3, xm0                        ; m3 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
>+    vinserti128     m4, m4, xm3, 1                  ; m4 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
>+    pmaddubsw       m3, m4, [r5 + 96]
>+    paddw           m5, m3
>+    pmaddubsw       m3, m4, [r5 + 64]
>+    paddw           m2, m3
>+    pmaddubsw       m3, m4, [r5 + 32]
>+    paddw           m1, m3
>+    pmaddubsw       m4, [r5]
>+    movq            xm3, [r6 + r1]                  ; m3 = row 9
>+    punpcklbw       xm0, xm3                        ; m0 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
>+    lea             r6, [r6 + r1 * 2]
>+    movq            xm6, [r6]                       ; m6 = row 10
>+    punpcklbw       xm3, xm6                        ; m3 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90]
>+    vinserti128     m0, m0, xm3, 1                  ; m0 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90] - [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
>+    pmaddubsw       m3, m0, [r5 + 96]
>+    paddw           m2, m3
>+    pmaddubsw       m3, m0, [r5 + 64]
>+    paddw           m1, m3
>+    pmaddubsw       m0, [r5 + 32]
>+    paddw           m4, m0
>+
>+    movq            xm3, [r6 + r1]                  ; m3 = row 11
>+    punpcklbw       xm6, xm3                        ; m6 = [B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0]
>+    lea             r6, [r6 + r1 * 2]
>+    movq            xm0, [r6]                       ; m0 = row 12
>+    punpcklbw       xm3, xm0                        ; m3 = [C7 B7 C6 B6 C5 B5 C4 B4 C3 B3 C2 B2 C1 B1 C0 B0]
>+    vinserti128     m6, m6, xm3, 1                  ; m6 = [C7 B7 C6 B6 C5 B5 C4 B4 C3 B3 C2 B2 C1 B1 C0 B0] - [B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0]
>+    pmaddubsw       m3, m6, [r5 + 96]
>+    paddw           m1, m3
>+    pmaddubsw       m6, [r5 + 64]
>+    paddw           m4, m6
>+    movq            xm3, [r6 + r1]                  ; m3 = row 13
>+    punpcklbw       xm0, xm3                        ; m0 = [D7 C7 D6 C6 D5 C5 D4 C4 D3 C3 D2 C2 D1 C1 D0 C0]
>+    movq            xm6, [r6 + r1 * 2]              ; m6 = row 14
>+    punpcklbw       xm3, xm6                        ; m3 = [E7 D7 E6 D6 E5 D5 E4 D4 E3 D3 E2 D2 E1 D1 E0 D0]
>+    vinserti128     m0, m0, xm3, 1                  ; m0 = [E7 D7 E6 D6 E5 D5 E4 D4 E3 D3 E2 D2 E1 D1 E0 D0] - [D7 C7 D6 C6 D5 C5 D4 C4 D3 C3 D2 C2 D1 C1 D0 C0]
>+    pmaddubsw       m0, [r5 + 96]
>+    paddw           m4, m0
>+%endmacro
>+
> ;-------------------------------------------------------------------------------------------------------------
> ; void interp_8tap_vert_%3_8x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
> ;-------------------------------------------------------------------------------------------------------------
>@@ -3601,6 +3695,42 @@
>     RET
> %endmacro
> 
>+INIT_YMM avx2
>+cglobal interp_8tap_vert_pp_8x8, 4,7,7
>+    mov             r4d, r4m
>+    shl             r4d, 7
>+    lea             r5, [r1 * 3]
>+    sub             r0, r5
>+
>+%ifdef PIC
>+    lea             r5, [tab_LumaCoeffVer_32]
>+    lea             r5, [r5 + r4]

add r5, r4
 
>+%else
>+    lea             r5, [tab_LumaCoeffVer_32 + r4]
>+%endif
>+
>+    PROCESS_LUMA_AVX2_W8_8R
>+    pmulhrsw        m5, [pw_512]                    ; m5 = word: row 0, row 1
>+    pmulhrsw        m2, [pw_512]                    ; m2 = word: row 2, row 3
>+    pmulhrsw        m1, [pw_512]                    ; m1 = word: row 4, row 5
>+    pmulhrsw        m4, [pw_512]                    ; m4 = word: row 6, row 7

in here you have many free register, buffer [pw_512] to get more performance
>+    packuswb        m5, m2
>+    packuswb        m1, m4
>+    vextracti128    xm2, m5, 1
>+    vextracti128    xm4, m1, 1
>+    movq            [r2], xm5
>+    movq            [r2 + r3], xm2
>+    lea             r2, [r2 + r3 * 2]

we have free register, buffer r3*3 to reduce a LEA
>+    movhps          [r2], xm5
>+    movhps          [r2 + r3], xm2
>+    lea             r2, [r2 + r3 * 2]
>+    movq            [r2], xm1
>+    movq            [r2 + r3], xm4
>+    lea             r2, [r2 + r3 * 2]
>+    movhps          [r2], xm1
>+    movhps          [r2 + r3], xm4
>+    RET
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20141118/a1a378b4/attachment-0001.html>


More information about the x265-devel mailing list