[x265] [PATCH] asm: luma_vpp[16x16] in avx2 for 32 bit: improve 2141c->1488c

Divya Manivannan divya at multicorewareinc.com
Wed Nov 19 13:21:01 CET 2014


# HG changeset patch
# User Divya Manivannan <divya at multicorewareinc.com>
# Date 1416399611 -19800
#      Wed Nov 19 17:50:11 2014 +0530
# Node ID b9ad1bc1610c0941eff5df6c5c0eb6a42ac9fdca
# Parent  ce962434be14a13338286ce6f824e2dfbd127eb2
asm: luma_vpp[16x16] in avx2 for 32 bit: improve 2141c->1488c

diff -r ce962434be14 -r b9ad1bc1610c source/common/x86/ipfilter8.asm
--- a/source/common/x86/ipfilter8.asm	Wed Nov 19 17:07:02 2014 +0530
+++ b/source/common/x86/ipfilter8.asm	Wed Nov 19 17:50:11 2014 +0530
@@ -3882,6 +3882,80 @@
 ;-------------------------------------------------------------------------------------------------------------
 FILTER_VER_LUMA_12xN 12, 16, ps
 
+%macro PROCESS_LUMA_AVX2_16_W8_8R 0
+    movq            xm1, [r0]                       ; m1 = row 0
+    movq            xm2, [r0 + r1]                  ; m2 = row 1
+    punpcklbw       xm1, xm2                        ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    lea             r6, [r0 + r1 * 2]
+    movq            xm3, [r6]                       ; m3 = row 2
+    punpcklbw       xm2, xm3                        ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+    vinserti128     m5, m1, xm2, 1                  ; m5 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+    pmaddubsw       m5, [r5]
+    movq            xm4, [r6 + r1]                  ; m4 = row 3
+    punpcklbw       xm3, xm4                        ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    lea             r6, [r6 + r1 * 2]
+    movq            xm1, [r6]                       ; m1 = row 4
+    punpcklbw       xm4, xm1                        ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+    vinserti128     m2, m3, xm4, 1                  ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+    pmaddubsw       m0, m2, [r5 + 32]
+    paddw           m5, m0
+    pmaddubsw       m2, [r5]
+    movq            xm3, [r6 + r1]                  ; m3 = row 5
+    punpcklbw       xm1, xm3                        ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    lea             r6, [r6 + r1 * 2]
+    movq            xm4, [r6]                       ; m4 = row 6
+    punpcklbw       xm3, xm4                        ; m3 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+    vinserti128     m1, m1, xm3, 1                  ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+    pmaddubsw       m3, m1, [r5 + 64]
+    paddw           m5, m3
+    pmaddubsw       m0, m1, [r5 + 32]
+    paddw           m2, m0
+    pmaddubsw       m1, [r5]
+    movq            xm3, [r6 + r1]                  ; m3 = row 7
+    punpcklbw       xm4, xm3                        ; m4 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    lea             r6, [r6 + r1 * 2]
+    movq            xm0, [r6]                       ; m0 = row 8
+    punpcklbw       xm3, xm0                        ; m3 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
+    vinserti128     m4, m4, xm3, 1                  ; m4 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+    pmaddubsw       m3, m4, [r5 + 96]
+    paddw           m5, m3
+    pmaddubsw       m3, m4, [r5 + 64]
+    paddw           m2, m3
+    pmaddubsw       m3, m4, [r5 + 32]
+    paddw           m1, m3
+    pmaddubsw       m4, [r5]
+    movq            xm3, [r6 + r1]                  ; m3 = row 9
+    punpcklbw       xm0, xm3                        ; m0 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+    lea             r6, [r6 + r1 * 2]
+    movq            xm6, [r6]                       ; m6 = row 10
+    punpcklbw       xm3, xm6                        ; m3 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90]
+    vinserti128     m0, m0, xm3, 1                  ; m0 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90] - [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+    pmaddubsw       m3, m0, [r5 + 96]
+    paddw           m2, m3
+    pmaddubsw       m3, m0, [r5 + 64]
+    paddw           m1, m3
+    pmaddubsw       m0, [r5 + 32]
+    paddw           m4, m0
+
+    movq            xm3, [r6 + r1]                  ; m3 = row 11
+    punpcklbw       xm6, xm3                        ; m6 = [B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0]
+    lea             r6, [r6 + r1 * 2]
+    movq            xm0, [r6]                       ; m0 = row 12
+    punpcklbw       xm3, xm0                        ; m3 = [C7 B7 C6 B6 C5 B5 C4 B4 C3 B3 C2 B2 C1 B1 C0 B0]
+    vinserti128     m6, m6, xm3, 1                  ; m6 = [C7 B7 C6 B6 C5 B5 C4 B4 C3 B3 C2 B2 C1 B1 C0 B0] - [B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0]
+    pmaddubsw       m3, m6, [r5 + 96]
+    paddw           m1, m3
+    pmaddubsw       m6, [r5 + 64]
+    paddw           m4, m6
+    movq            xm3, [r6 + r1]                  ; m3 = row 13
+    punpcklbw       xm0, xm3                        ; m0 = [D7 C7 D6 C6 D5 C5 D4 C4 D3 C3 D2 C2 D1 C1 D0 C0]
+    movq            xm6, [r6 + r1 * 2]              ; m6 = row 14
+    punpcklbw       xm3, xm6                        ; m3 = [E7 D7 E6 D6 E5 D5 E4 D4 E3 D3 E2 D2 E1 D1 E0 D0]
+    vinserti128     m0, m0, xm3, 1                  ; m0 = [E7 D7 E6 D6 E5 D5 E4 D4 E3 D3 E2 D2 E1 D1 E0 D0] - [D7 C7 D6 C6 D5 C5 D4 C4 D3 C3 D2 C2 D1 C1 D0 C0]
+    pmaddubsw       m0, [r5 + 96]
+    paddw           m4, m0
+%endmacro
+
 INIT_YMM avx2
 %if ARCH_X86_64 == 1
 cglobal interp_8tap_vert_pp_16x16, 4, 7, 15
@@ -4171,6 +4245,58 @@
     movu            [r2 + r3 * 2], xm0
     movu            [r2 + r6], xm1
     RET
+
+%else
+cglobal interp_8tap_vert_pp_16x16, 4, 7, 7, 0-gprsize
+    mov             r4d, r4m
+    shl             r4d, 7
+    lea             r5, [r1 * 3]
+    sub             r0, r5
+
+%ifdef PIC
+    lea             r5, [tab_LumaCoeffVer_32]
+    add             r5, r4
+%else
+    lea             r5, [tab_LumaCoeffVer_32 + r4]
+%endif
+
+    mov             word [rsp], 2
+
+.loopH:
+    mov             r4d, 2
+.loopW:
+    PROCESS_LUMA_AVX2_16_W8_8R
+    lea             r6, [r2]
+    mova            m3, [pw_512]
+    pmulhrsw        m5, m3                          ; m5 = word: row 0, row 1
+    pmulhrsw        m2, m3                          ; m2 = word: row 2, row 3
+    pmulhrsw        m1, m3                          ; m1 = word: row 4, row 5
+    pmulhrsw        m4, m3                          ; m4 = word: row 6, row 7
+    packuswb        m5, m2
+    packuswb        m1, m4
+    vextracti128    xm2, m5, 1
+    vextracti128    xm4, m1, 1
+    movq            [r6], xm5
+    movq            [r6 + r3], xm2
+    lea             r6, [r6 + r3 * 2]
+    movhps          [r6], xm5
+    movhps          [r6 + r3], xm2
+    lea             r6, [r6 + r3 * 2]
+    movq            [r6], xm1
+    movq            [r6 + r3], xm4
+    lea             r6, [r6 + r3 * 2]
+    movhps          [r6], xm1
+    movhps          [r6 + r3], xm4
+
+    add             r0, 8
+    add             r2, 8
+    dec             r4d
+    jnz             .loopW
+    lea             r2, [r2 + r3 * 8 - 16]
+    lea             r0, [r0 + r1 * 8 - 16]
+    dec             word [rsp]
+    jnz             .loopH
+    RET
 %endif
 
 ;-------------------------------------------------------------------------------------------------------------


More information about the x265-devel mailing list