[x265] [PATCH] asm: interp_4tap_vert_X[12x16], [24x32] avx2 10bit code for i420

rajesh at multicorewareinc.com rajesh at multicorewareinc.com
Tue Jun 9 13:38:01 CEST 2015


# HG changeset patch
# User Rajesh Paulraj<rajesh at multicorewareinc.com>
# Date 1433842142 -19800
#      Tue Jun 09 14:59:02 2015 +0530
# Node ID 2c7b1c9c83d4e54128f3d0687c2548b28e17a4fd
# Parent  5994f794ff36e881e3f992e78b2167a0e4ad4768
asm: interp_4tap_vert_X[12x16],[24x32] avx2 10bit code for i420

avx2:
chroma_vpp[12x16]       10.69x   2580.09         27573.60
chroma_vps[12x16]       8.84x    2287.66         20215.64
chroma_vsp[12x16]       10.29x   2586.21         26613.50
chroma_vss[12x16]       9.33x    2144.93         20014.79
chroma_vpp[24x32]       13.27x   7917.02         105030.23
chroma_vps[24x32]       11.14x   7022.05         78229.66
chroma_vsp[24x32]       12.84x   8023.87         102988.77
chroma_vss[24x32]       11.13x   6895.48         76760.37

sse4:
chroma_vpp[12x16]       5.98x    4382.46         26223.65
chroma_vps[12x16]       5.08x    4015.30         20378.18
chroma_vsp[12x16]       6.79x    3936.60         26712.20
chroma_vss[12x16]       5.61x    3563.51         19997.63
chroma_vpp[24x32]       7.36x    14440.38        106316.66
chroma_vps[24x32]       5.70x    13725.61        78266.04
chroma_vsp[24x32]       7.36x    14560.77        107151.01
chroma_vss[24x32]       6.37x    12103.02        77121.60

diff -r 5994f794ff36 -r 2c7b1c9c83d4 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Jun 09 14:39:01 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Jun 09 14:59:02 2015 +0530
@@ -1714,6 +1714,10 @@
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].filter_vss = x265_interp_4tap_vert_ss_8x64_avx2;
 
 
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vpp = x265_interp_4tap_vert_pp_12x16_avx2;
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vps = x265_interp_4tap_vert_ps_12x16_avx2;
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vss = x265_interp_4tap_vert_ss_12x16_avx2;
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vsp = x265_interp_4tap_vert_sp_12x16_avx2;
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp = x265_interp_4tap_vert_pp_16x4_avx2;
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vpp = x265_interp_4tap_vert_pp_16x8_avx2;
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vpp = x265_interp_4tap_vert_pp_16x12_avx2;
@@ -1734,6 +1738,10 @@
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vsp = x265_interp_4tap_vert_sp_16x12_avx2;
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vsp = x265_interp_4tap_vert_sp_16x16_avx2;
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vsp = x265_interp_4tap_vert_sp_16x32_avx2;
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vpp = x265_interp_4tap_vert_pp_24x32_avx2;
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vps = x265_interp_4tap_vert_ps_24x32_avx2;
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vss = x265_interp_4tap_vert_ss_24x32_avx2;
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vsp = x265_interp_4tap_vert_sp_24x32_avx2;
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vpp = x265_interp_4tap_vert_pp_32x8_avx2;
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vpp = x265_interp_4tap_vert_pp_32x16_avx2;
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vpp = x265_interp_4tap_vert_pp_32x24_avx2;
diff -r 5994f794ff36 -r 2c7b1c9c83d4 source/common/x86/ipfilter16.asm
--- a/source/common/x86/ipfilter16.asm	Tue Jun 09 14:39:01 2015 +0530
+++ b/source/common/x86/ipfilter16.asm	Tue Jun 09 14:59:02 2015 +0530
@@ -5116,6 +5116,274 @@
     FILTER_VER_CHROMA_W16_32xN_avx2 24, sp, 15
     FILTER_VER_CHROMA_W16_32xN_avx2 32, sp, 15
 
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W16_12xN_avx2 3
+INIT_YMM avx2
+cglobal interp_4tap_vert_%2_12x%1, 5, 8, %3
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+    mov       r4d, %1/2
+
+%ifidn %2, pp
+    mova      m7, [tab_c_32]
+%elifidn %2, sp
+    mova      m7, [pd_524800]
+%elifidn %2, ps
+    mova      m7, [tab_c_n32768]
+%endif
+
+.loopH:
+    PROCESS_CHROMA_VERT_W16_2R
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%elifidn %2, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, 2
+    psrad     m1, 2
+    psrad     m2, 2
+    psrad     m3, 2
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+ %ifidn %2, pp
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+%else
+    psrad     m0, 10
+    psrad     m1, 10
+    psrad     m2, 10
+    psrad     m3, 10
+%endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, [pw_pixel_max]
+%endif
+
+    movu      [r2], xm0
+    movu      [r2 + r3], xm2
+    vextracti128 xm0, m0, 1
+    vextracti128 xm2, m2, 1
+    movq      [r2 + 16], xm0
+    movq      [r2 + r3 + 16], xm2
+    lea       r2, [r2 + 2 * r3]
+    dec       r4d
+    jnz       .loopH
+    RET
+%endmacro
+    FILTER_VER_CHROMA_W16_12xN_avx2 16, ss, 7
+    FILTER_VER_CHROMA_W16_12xN_avx2 16, sp, 8
+    FILTER_VER_CHROMA_W16_12xN_avx2 16, ps, 8
+    FILTER_VER_CHROMA_W16_12xN_avx2 16, pp, 8
+
+%macro PROCESS_CHROMA_VERT_W24_2R 0
+    movu       m1, [r0]
+    movu       m3, [r0 + r1]
+    punpcklwd  m0, m1, m3
+    pmaddwd    m0, [r5 + 0 * mmsize]
+    punpckhwd  m1, m3
+    pmaddwd    m1, [r5 + 0 * mmsize]
+
+    movu       xm9, [r0 + mmsize]
+    movu       xm11, [r0 + r1 + mmsize]
+    punpcklwd  xm8, xm9, xm11
+    pmaddwd    xm8, [r5 + 0 * mmsize]
+    punpckhwd  xm9, xm11
+    pmaddwd    xm9, [r5 + 0 * mmsize]
+
+    movu       m4, [r0 + 2 * r1]
+    punpcklwd  m2, m3, m4
+    pmaddwd    m2, [r5 + 0 * mmsize]
+    punpckhwd  m3, m4
+    pmaddwd    m3, [r5 + 0 * mmsize]
+
+    movu       xm12, [r0 + 2 * r1 + mmsize]
+    punpcklwd  xm10, xm11, xm12
+    pmaddwd    xm10, [r5 + 0 * mmsize]
+    punpckhwd  xm11, xm12
+    pmaddwd    xm11, [r5 + 0 * mmsize]
+
+    lea        r6, [r0 + 2 * r1]
+    movu       m5, [r6 + r1]
+    punpcklwd  m6, m4, m5
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m0, m6
+    punpckhwd  m4, m5
+    pmaddwd    m4, [r5 + 1 * mmsize]
+    paddd      m1, m4
+
+    movu       xm13, [r6 + r1 + mmsize]
+    punpcklwd  xm14, xm12, xm13
+    pmaddwd    xm14, [r5 + 1 * mmsize]
+    paddd      xm8, xm14
+    punpckhwd  xm12, xm13
+    pmaddwd    xm12, [r5 + 1 * mmsize]
+    paddd      xm9, xm12
+
+    movu       m4, [r6 + 2 * r1]
+    punpcklwd  m6, m5, m4
+    pmaddwd    m6, [r5 + 1 * mmsize]
+    paddd      m2, m6
+    punpckhwd  m5, m4
+    pmaddwd    m5, [r5 + 1 * mmsize]
+    paddd      m3, m5
+
+    movu       xm12, [r6 + 2 * r1 + mmsize]
+    punpcklwd  xm14, xm13, xm12
+    pmaddwd    xm14, [r5 + 1 * mmsize]
+    paddd      xm10, xm14
+    punpckhwd  xm13, xm12
+    pmaddwd    xm13, [r5 + 1 * mmsize]
+    paddd      xm11, xm13
+%endmacro
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_CHROMA_W16_24xN_avx2 3
+INIT_YMM avx2
+%if ARCH_X86_64 
+cglobal interp_4tap_vert_%2_24x%1, 5, 7, %3
+    add       r1d, r1d
+    add       r3d, r3d
+    sub       r0, r1
+    shl       r4d, 6
+
+%ifdef PIC
+    lea       r5, [tab_ChromaCoeffV]
+    lea       r5, [r5 + r4]
+%else
+    lea       r5, [tab_ChromaCoeffV + r4]
+%endif
+    mov       r4d, %1/2
+
+%ifidn %2, pp
+    mova      m7, [tab_c_32]
+%elifidn %2, sp
+    mova      m7, [pd_524800]
+%elifidn %2, ps
+    mova      m7, [tab_c_n32768]
+%endif
+
+.loopH:
+    PROCESS_CHROMA_VERT_W24_2R
+%ifidn %2, ss
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+
+    psrad     m8, 6
+    psrad     m9, 6
+    psrad     m10, 6
+    psrad     m11, 6
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+%elifidn %2, ps
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    psrad     m0, 2
+    psrad     m1, 2
+    psrad     m2, 2
+    psrad     m3, 2
+    paddd     m8, m7
+    paddd     m9, m7
+    paddd     m10, m7
+    paddd     m11, m7
+    psrad     m8, 2
+    psrad     m9, 2
+    psrad     m10, 2
+    psrad     m11, 2
+
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+%else
+    paddd     m0, m7
+    paddd     m1, m7
+    paddd     m2, m7
+    paddd     m3, m7
+    paddd     m8, m7
+    paddd     m9, m7
+    paddd     m10, m7
+    paddd     m11, m7
+ %ifidn %2, pp
+    psrad     m0, 6
+    psrad     m1, 6
+    psrad     m2, 6
+    psrad     m3, 6
+    psrad     m8, 6
+    psrad     m9, 6
+    psrad     m10, 6
+    psrad     m11, 6
+%else
+    psrad     m0, 10
+    psrad     m1, 10
+    psrad     m2, 10
+    psrad     m3, 10
+    psrad     m8, 10
+    psrad     m9, 10
+    psrad     m10, 10
+    psrad     m11, 10
+%endif
+    packssdw  m0, m1
+    packssdw  m2, m3
+    packssdw  m8, m9
+    packssdw  m10, m11
+    pxor      m5, m5
+    CLIPW2    m0, m2, m5, [pw_pixel_max]
+    CLIPW2    m8, m10, m5, [pw_pixel_max]
+%endif
+
+    movu      [r2], m0
+    movu      [r2 + r3], m2
+    movu      [r2 + mmsize], xm8
+    movu      [r2 + r3 + mmsize], xm10
+    lea       r2, [r2 + 2 * r3]
+    lea       r0, [r0 + 2 * r1]
+    dec       r4d
+    jnz       .loopH
+    RET
+%endif
+%endmacro
+    FILTER_VER_CHROMA_W16_24xN_avx2 32, ss, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 32, sp, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 32, ps, 15
+    FILTER_VER_CHROMA_W16_24xN_avx2 32, pp, 15
+
 INIT_XMM sse2
 cglobal chroma_p2s, 3, 7, 3
 


More information about the x265-devel mailing list