[x265] [PATCH 3 of 3] asm: AVX2 version luma_vpp[4x4], improve 391c -> 302c

Min Chen chenm003 at 163.com
Thu Oct 30 23:46:54 CET 2014


# HG changeset patch
# User Min Chen <chenm003 at 163.com>
# Date 1414709200 25200
# Node ID 5d0b20f6e4de0b59b8c3306793c7267e01b9a41b
# Parent  529ff7eca135838dc50c227d52db97725a79f0db
asm: AVX2 version luma_vpp[4x4], improve 391c -> 302c

diff -r 529ff7eca135 -r 5d0b20f6e4de source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Thu Oct 30 15:46:23 2014 -0700
+++ b/source/common/x86/asm-primitives.cpp	Thu Oct 30 15:46:40 2014 -0700
@@ -1799,6 +1799,7 @@
         p.transpose[BLOCK_64x64] = x265_transpose64_avx2;
 #endif
         p.luma_hpp[LUMA_4x4] = x265_interp_8tap_horiz_pp_4x4_avx2;
+        p.luma_vpp[LUMA_4x4] = x265_interp_8tap_vert_pp_4x4_avx2;
     }
 #endif // if HIGH_BIT_DEPTH
 }
diff -r 529ff7eca135 -r 5d0b20f6e4de source/common/x86/ipfilter8.asm
--- a/source/common/x86/ipfilter8.asm	Thu Oct 30 15:46:23 2014 -0700
+++ b/source/common/x86/ipfilter8.asm	Thu Oct 30 15:46:40 2014 -0700
@@ -3420,6 +3420,88 @@
     RET
 %endmacro
 
+
+INIT_YMM avx2
+cglobal interp_8tap_vert_pp_4x4, 4,6,8
+    mov             r4d, r4m
+    lea             r5, [r1 * 3]
+    sub             r0, r5
+
+    ; TODO: VPGATHERDD
+    movd            xm1, [r0]                       ; m1 = row0
+    movd            xm2, [r0 + r1]                  ; m2 = row1
+    punpcklbw       xm1, xm2                        ; m1 = [13 03 12 02 11 01 10 00]
+
+    movd            xm3, [r0 + r1 * 2]              ; m3 = row2
+    punpcklbw       xm2, xm3                        ; m2 = [23 13 22 12 21 11 20 10]
+    movd            xm4, [r0 + r5]
+    punpcklbw       xm3, xm4                        ; m3 = [33 23 32 22 31 21 30 20]
+    punpcklwd       xm1, xm3                        ; m1 = [33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00]
+
+    lea             r0, [r0 + r1 * 4]
+    movd            xm5, [r0]                       ; m5 = row4
+    punpcklbw       xm4, xm5                        ; m4 = [43 33 42 32 41 31 40 30]
+    punpcklwd       xm2, xm4                        ; m2 = [43 33 21 13 42 32 22 12 41 31 21 11 40 30 20 10]
+    vinserti128     m1, m1, xm2, 1                  ; m1 = [43 33 21 13 42 32 22 12 41 31 21 11 40 30 20 10] - [33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00]
+    movd            xm2, [r0 + r1]                  ; m2 = row5
+    punpcklbw       xm5, xm2                        ; m5 = [53 43 52 42 51 41 50 40]
+    punpcklwd       xm3, xm5                        ; m3 = [53 43 44 23 52 42 32 22 51 41 31 21 50 40 30 20]
+    movd            xm6, [r0 + r1 * 2]              ; m6 = row6
+    punpcklbw       xm2, xm6                        ; m2 = [63 53 62 52 61 51 60 50]
+    punpcklwd       xm4, xm2                        ; m4 = [63 53 43 33 62 52 42 32 61 51 41 31 60 50 40 30]
+    vinserti128     m3, m3, xm4, 1                  ; m3 = [63 53 43 33 62 52 42 32 61 51 41 31 60 50 40 30] - [53 43 44 23 52 42 32 22 51 41 31 21 50 40 30 20]
+    movd            xm4, [r0 + r5]                  ; m4 = row7
+    punpcklbw       xm6, xm4                        ; m6 = [73 63 72 62 71 61 70 60]
+    punpcklwd       xm5, xm6                        ; m5 = [73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40]
+
+    lea             r0, [r0 + r1 * 4]
+    movd            xm7, [r0]                       ; m7 = row8
+    punpcklbw       xm4, xm7                        ; m4 = [83 73 82 72 81 71 80 70]
+    punpcklwd       xm2, xm4                        ; m2 = [83 73 63 53 82 72 62 52 81 71 61 51 80 70 60 50]
+    vinserti128     m5, m5, xm2, 1                  ; m5 = [83 73 63 53 82 72 62 52 81 71 61 51 80 70 60 50] - [73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40]
+    movd            xm2, [r0 + r1]                  ; m2 = row9
+    punpcklbw       xm7, xm2                        ; m7 = [93 83 92 82 91 81 90 80]
+    punpcklwd       xm6, xm7                        ; m6 = [93 83 73 63 92 82 72 62 91 81 71 61 90 80 70 60]
+    movd            xm7, [r0 + r1 * 2]              ; m7 = rowA
+    punpcklbw       xm2, xm7                        ; m2 = [A3 93 A2 92 A1 91 A0 90]
+    punpcklwd       xm4, xm2                        ; m4 = [A3 93 83 73 A2 92 82 72 A1 91 81 71 A0 90 80 70]
+    vinserti128     m6, m6, xm4, 1                  ; m6 = [A3 93 83 73 A2 92 82 72 A1 91 81 71 A0 90 80 70] - [93 83 73 63 92 82 72 62 91 81 71 61 90 80 70 60]
+
+    ; load filter coeff
+%ifdef PIC
+    lea             r5, [tab_LumaCoeff]
+    vpbroadcastd    m0, [r5 + r4 * 8 + 0]
+    vpbroadcastd    m2, [r5 + r4 * 8 + 4]
+%else
+    vpbroadcastq    m0, [tab_LumaCoeff + r4 * 8 + 0]
+    vpbroadcastd    m2, [tab_LumaCoeff + r4 * 8 + 4]
+%endif
+
+    pmaddubsw       m1, m0
+    pmaddubsw       m3, m0
+    pmaddubsw       m5, m2
+    pmaddubsw       m6, m2
+    vbroadcasti128  m0, [pw_1]
+    pmaddwd         m1, m0
+    pmaddwd         m3, m0
+    pmaddwd         m5, m0
+    pmaddwd         m6, m0
+    paddd           m1, m5                          ; m1 = DQWORD ROW[1 0]
+    paddd           m3, m6                          ; m3 = DQWORD ROW[3 2]
+    packssdw        m1, m3                          ; m1 =  QWORD ROW[3 1 2 0]
+
+    ; TODO: does it overflow?
+    pmulhrsw        m1, [pw_512]
+    vextracti128    xm2, m1, 1
+    packuswb        xm1, xm2                        ; m1 =  DWORD ROW[3 1 2 0]
+    movd            [r2], xm1
+    pextrd          [r2 + r3], xm1, 2
+    pextrd          [r2 + r3 * 2], xm1, 1
+    lea             r4, [r3 * 3]
+    pextrd          [r2 + r4], xm1, 3
+    RET
+
+
 ;-------------------------------------------------------------------------------------------------------------
 ; void interp_8tap_vert_pp_4x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
 ;-------------------------------------------------------------------------------------------------------------



More information about the x265-devel mailing list