[x265] [PATCH] asm: psyCost_ss avx2 code for all sizes(4x4, 8x8, 16x16, 32x32, 64x64)

dnyaneshwar at multicorewareinc.com dnyaneshwar at multicorewareinc.com
Wed Mar 25 08:38:32 CET 2015


# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1427268038 -19800
#      Wed Mar 25 12:50:38 2015 +0530
# Node ID af5172cb915de56d0eb57fcfea3b41361c1f76b1
# Parent  e637273e2ae6391d43a037d9a4298a8be8e178ad
asm: psyCost_ss avx2 code for all sizes(4x4,8x8,16x16,32x32,64x64)

AVX2:
psy_cost_ss[4x4]       6.53x    336.42          2195.55
psy_cost_ss[8x8]       6.10x    1422.97         8678.92
psy_cost_ss[16x16]     6.23x    5639.05         35154.69
psy_cost_ss[32x32]     6.19x    23208.20        143647.30
psy_cost_ss[64x64]     6.28x    89826.32        564206.44

SSE4:
psy_cost_ss[4x4]       4.52x    514.43          2322.86
psy_cost_ss[8x8]       3.48x    2579.79         8978.54
psy_cost_ss[16x16]     3.52x    10234.08        36056.70
psy_cost_ss[32x32]     3.46x    44220.05        152957.89
psy_cost_ss[64x64]     3.49x    159862.55       557929.25

diff -r e637273e2ae6 -r af5172cb915d source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Mar 24 15:31:05 2015 -0500
+++ b/source/common/x86/asm-primitives.cpp	Wed Mar 25 12:50:38 2015 +0530
@@ -1426,6 +1426,12 @@
 #if X86_64
     if (cpuMask & X265_CPU_AVX2)
     {
+        p.cu[BLOCK_4x4].psy_cost_ss = x265_psyCost_ss_4x4_avx2;
+        p.cu[BLOCK_8x8].psy_cost_ss = x265_psyCost_ss_8x8_avx2;
+        p.cu[BLOCK_16x16].psy_cost_ss = x265_psyCost_ss_16x16_avx2;
+        p.cu[BLOCK_32x32].psy_cost_ss = x265_psyCost_ss_32x32_avx2;
+        p.cu[BLOCK_64x64].psy_cost_ss = x265_psyCost_ss_64x64_avx2;
+
         p.cu[BLOCK_4x4].psy_cost_pp = x265_psyCost_pp_4x4_avx2;
         p.cu[BLOCK_8x8].psy_cost_pp = x265_psyCost_pp_8x8_avx2;
         p.cu[BLOCK_16x16].psy_cost_pp = x265_psyCost_pp_16x16_avx2;
diff -r e637273e2ae6 -r af5172cb915d source/common/x86/const-a.asm
--- a/source/common/x86/const-a.asm	Tue Mar 24 15:31:05 2015 -0500
+++ b/source/common/x86/const-a.asm	Wed Mar 25 12:50:38 2015 +0530
@@ -83,7 +83,7 @@
 const pw_pmpmpmpm, dw 1,-1,1,-1,1,-1,1,-1
 const pw_pmmpzzzz, dw 1,-1,-1,1,0,0,0,0
 const pd_1,        times 8 dd 1
-const pd_2,        times 4 dd 2
+const pd_2,        times 8 dd 2
 const pd_4,        times 4 dd 4
 const pd_8,        times 4 dd 8
 const pd_16,       times 4 dd 16
diff -r e637273e2ae6 -r af5172cb915d source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm	Tue Mar 24 15:31:05 2015 -0500
+++ b/source/common/x86/pixel-a.asm	Wed Mar 25 12:50:38 2015 +0530
@@ -43,8 +43,10 @@
 mask_1100: times 2 dd 0, -1
 hmul_8w:   times 4 dw 1
            times 2 dw 1, -1
+           times 4 dw 1
+           times 2 dw 1, -1
 ALIGN 32
-hmul_w:    dw 1, -1, 1, -1, 1, -1, 1, -1
+hmul_w:    times 2 dw 1, -1, 1, -1, 1, -1, 1, -1
 ALIGN 32
 transd_shuf1: SHUFFLE_MASK_W 0, 8, 2, 10, 4, 12, 6, 14
 transd_shuf2: SHUFFLE_MASK_W 1, 9, 3, 11, 5, 13, 7, 15
@@ -9863,3 +9865,533 @@
     movd            eax, m15
     RET
 %endif
+
+INIT_YMM avx2
+cglobal psyCost_ss_4x4, 4, 5, 8
+    add             r1, r1
+    add             r3, r3
+    lea             r4, [3 * r1]
+    movddup         m0, [r0]
+    movddup         m1, [r0 + r1]
+    movddup         m2, [r0 + r1 * 2]
+    movddup         m3, [r0 + r4]
+
+    lea             r4, [3 * r3]
+    movddup         m4, [r2]
+    movddup         m5, [r2 + r3]
+    movddup         m6, [r2 + r3 * 2]
+    movddup         m7, [r2 + r4]
+
+    vinserti128     m0, m0, xm4, 1
+    vinserti128     m1, m1, xm5, 1
+    vinserti128     m2, m2, xm6, 1
+    vinserti128     m3, m3, xm7, 1
+
+    pabsw           m4, m0
+    pabsw           m5, m1
+    paddw           m5, m4
+    pabsw           m4, m2
+    paddw           m5, m4
+    pabsw           m4, m3
+    paddw           m5, m4
+    pmaddwd         m5, [pw_1]
+    psrldq          m4, m5, 4
+    paddd           m5, m4
+    psrld           m6, m5, 2
+
+    mova            m4, [hmul_8w]
+    pmaddwd         m0, m4
+    pmaddwd         m1, m4
+    pmaddwd         m2, m4
+    pmaddwd         m3, m4
+
+    psrldq          m4, m0, 4
+    psubd           m5, m0, m4
+    paddd           m0, m4
+    shufps          m0, m0, m5, 10001000b
+
+    psrldq          m4, m1, 4
+    psubd           m5, m1, m4
+    paddd           m1, m4
+    shufps          m1, m1, m5, 10001000b
+
+    psrldq          m4, m2, 4
+    psubd           m5, m2, m4
+    paddd           m2, m4
+    shufps          m2, m2, m5, 10001000b
+
+    psrldq          m4, m3, 4
+    psubd           m5, m3, m4
+    paddd           m3, m4
+    shufps          m3, m3, m5, 10001000b
+
+    mova            m4, m0
+    paddd           m0, m1
+    psubd           m1, m4
+    mova            m4, m2
+    paddd           m2, m3
+    psubd           m3, m4
+    mova            m4, m0
+    paddd           m0, m2
+    psubd           m2, m4
+    mova            m4, m1
+    paddd           m1, m3
+    psubd           m3, m4
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    psrldq          m1, m0, 8
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    psrld           m0, 1
+    psubd           m0, m6
+    vextracti128    xm1, m0, 1
+    psubd           m0, m1
+    pabsd           m0, m0
+    movd            eax, xm0
+    RET
+
+%macro PSY_SS_8x8 0
+    lea             r4, [3 * r1]
+    lea             r6, [r0 + r1 * 4]
+    movu            xm0, [r0]
+    movu            xm1, [r0 + r1]
+    movu            xm2, [r0 + r1 * 2]
+    movu            xm3, [r0 + r4]
+    movu            xm4, [r6]
+    movu            xm5, [r6 + r1]
+    movu            xm6, [r6 + r1 * 2]
+    movu            xm7, [r6 + r4]
+
+    lea             r4, [3 * r3]
+    lea             r6, [r2 + r3 * 4]
+    movu            xm8, [r2]
+    movu            xm9, [r2 + r3]
+    movu            xm10, [r2 + r3 * 2]
+    movu            xm11, [r2 + r4]
+    vinserti128     m0, m0, xm8, 1
+    vinserti128     m1, m1, xm9, 1
+    vinserti128     m2, m2, xm10, 1
+    vinserti128     m3, m3, xm11, 1
+    movu            xm8, [r6]
+    movu            xm9, [r6 + r3]
+    movu            xm10, [r6 + r3 * 2]
+    movu            xm11, [r6 + r4]
+    vinserti128     m4, m4, xm8, 1
+    vinserti128     m5, m5, xm9, 1
+    vinserti128     m6, m6, xm10, 1
+    vinserti128     m7, m7, xm11, 1
+
+    ;; store on stack to use later
+    mova            [rsp + 0 * mmsize], m0
+    mova            [rsp + 1 * mmsize], m1
+    mova            [rsp + 2 * mmsize], m2
+    mova            [rsp + 3 * mmsize], m3
+    mova            [rsp + 4 * mmsize], m4
+    mova            [rsp + 5 * mmsize], m5
+    mova            [rsp + 6 * mmsize], m6
+    mova            [rsp + 7 * mmsize], m7
+
+    pabsw           m8, m0
+    pabsw           m9, m1
+    paddw           m8, m9
+    pabsw           m10, m2
+    pabsw           m11, m3
+    paddw           m10, m11
+    paddw           m8, m10
+    pabsw           m9, m4
+    pabsw           m10, m5
+    paddw           m9, m10
+    pabsw           m11, m6
+    pabsw           m10, m7
+    paddw           m11, m10
+    paddw           m9, m11
+    paddw           m8, m9
+    psrldq          m9, m8, 8
+
+    vextracti128    xm10, m8, 1
+    vextracti128    xm11, m9, 1
+
+    vpmovzxwd       m8, xm8
+    vpmovzxwd       m9, xm9
+    vpmovzxwd       m10, xm10
+    vpmovzxwd       m11, xm11
+
+    vinserti128     m8, m8, xm10, 1
+    vinserti128     m9, m9, xm11, 1
+
+    paddd           m8, m9
+    psrldq          m9, m8, 8
+    paddd           m8, m9
+    psrldq          m9, m8, 4
+    paddd           m8, m9
+    psrld           m8, 2       ; sad_4x4
+
+    pmaddwd         m0, m13
+    pmaddwd         m1, m13
+    pmaddwd         m2, m13
+    pmaddwd         m3, m13
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    vshufps         m0, m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    vshufps         m0, m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    vshufps         m1, m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    vshufps         m1, m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    vshufps         m2, m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    vshufps         m2, m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    vshufps         m3, m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    vshufps         m3, m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    pmaddwd         m4, m13
+    pmaddwd         m5, m13
+    pmaddwd         m6, m13
+    pmaddwd         m7, m13
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    vshufps         m4, m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    vshufps         m4, m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    vshufps         m5, m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    vshufps         m5, m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    vshufps         m6, m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    vshufps         m6, m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    vshufps         m7, m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    vshufps         m7, m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m11, m0, m7
+
+    pmaddwd         m0, m12, [rsp + 0 * mmsize]
+    pmaddwd         m1, m12, [rsp + 1 * mmsize]
+    pmaddwd         m2, m12, [rsp + 2 * mmsize]
+    pmaddwd         m3, m12, [rsp + 3 * mmsize]
+
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    vshufps         m0, m0, m10, 10001000b
+    psrldq          m9, m0, 4
+    psubd           m10, m0, m9
+    paddd           m0, m9
+    vshufps         m0, m0, m10, 10001000b
+
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    vshufps         m1, m1, m10, 10001000b
+    psrldq          m9, m1, 4
+    psubd           m10, m1, m9
+    paddd           m1, m9
+    vshufps         m1, m1, m10, 10001000b
+
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    vshufps         m2, m2, m10, 10001000b
+    psrldq          m9, m2, 4
+    psubd           m10, m2, m9
+    paddd           m2, m9
+    vshufps         m2, m2, m10, 10001000b
+
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    vshufps         m3, m3, m10, 10001000b
+    psrldq          m9, m3, 4
+    psubd           m10, m3, m9
+    paddd           m3, m9
+    vshufps         m3, m3, m10, 10001000b
+
+    SUMSUB_BA d, 0, 1, 9
+    SUMSUB_BA d, 2, 3, 9
+    SUMSUB_BA d, 0, 2, 9
+    SUMSUB_BA d, 1, 3, 9
+
+    pmaddwd         m4, m12, [rsp + 4 * mmsize]
+    pmaddwd         m5, m12, [rsp + 5 * mmsize]
+    pmaddwd         m6, m12, [rsp + 6 * mmsize]
+    pmaddwd         m7, m12, [rsp + 7 * mmsize]
+
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    vshufps         m4, m4, m10, 10001000b
+    psrldq          m9, m4, 4
+    psubd           m10, m4, m9
+    paddd           m4, m9
+    vshufps         m4, m4, m10, 10001000b
+
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    vshufps         m5, m5, m10, 10001000b
+    psrldq          m9, m5, 4
+    psubd           m10, m5, m9
+    paddd           m5, m9
+    vshufps         m5, m5, m10, 10001000b
+
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    vshufps         m6, m6, m10, 10001000b
+    psrldq          m9, m6, 4
+    psubd           m10, m6, m9
+    paddd           m6, m9
+    vshufps         m6, m6, m10, 10001000b
+
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    vshufps         m7, m7, m10, 10001000b
+    psrldq          m9, m7, 4
+    psubd           m10, m7, m9
+    paddd           m7, m9
+    vshufps         m7, m7, m10, 10001000b
+
+    SUMSUB_BA d, 4, 5, 9
+    SUMSUB_BA d, 6, 7, 9
+    SUMSUB_BA d, 4, 6, 9
+    SUMSUB_BA d, 5, 7, 9
+
+    SUMSUB_BA d, 0, 4, 9
+    SUMSUB_BA d, 1, 5, 9
+    SUMSUB_BA d, 2, 6, 9
+    SUMSUB_BA d, 3, 7, 9
+
+    pabsd           m0, m0
+    pabsd           m2, m2
+    pabsd           m1, m1
+    pabsd           m3, m3
+    pabsd           m4, m4
+    pabsd           m5, m5
+    pabsd           m6, m6
+    pabsd           m7, m7
+
+    paddd           m0, m2
+    paddd           m1, m3
+    paddd           m0, m1
+    paddd           m5, m4
+    paddd           m0, m5
+    paddd           m7, m6
+    paddd           m0, m7
+    paddd           m0, m11
+
+    psrldq          m1, m0, 8
+    paddd           m0, m1
+    psrldq          m1, m0, 4
+    paddd           m0, m1
+    paddd           m0, [pd_2]
+    psrld           m0, 2
+    psubd           m0, m8
+    vextracti128    xm1, m0, 1
+    psubd           m0, m1
+    pabsd           m0, m0
+%endmacro
+
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal psyCost_ss_8x8, 4, 7, 14
+    ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+    mov             r5, rsp
+    sub             rsp, 8*mmsize
+    and             rsp, ~63
+
+    mova            m12, [pw_1]
+    mova            m13, [hmul_w]
+    add             r1, r1
+    add             r3, r3
+
+    PSY_SS_8x8
+
+    movd            eax, xm0
+    mov             rsp, r5
+    RET
+%endif
+
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal psyCost_ss_16x16, 4, 9, 15
+    ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+    mov             r5, rsp
+    sub             rsp, 8*mmsize
+    and             rsp, ~63
+
+    mova            m12, [pw_1]
+    mova            m13, [hmul_w]
+    add             r1, r1
+    add             r3, r3
+    pxor            m14, m14
+
+    mov             r7d, 2
+.loopH:
+    mov             r8d, 2
+.loopW:
+    PSY_SS_8x8
+
+    paddd           m14, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 32]
+    lea             r2, [r2 + r3 * 8 - 32]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, xm14
+    mov             rsp, r5
+    RET
+%endif
+
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal psyCost_ss_32x32, 4, 9, 15
+    ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+    mov             r5, rsp
+    sub             rsp, 8*mmsize
+    and             rsp, ~63
+
+    mova            m12, [pw_1]
+    mova            m13, [hmul_w]
+    add             r1, r1
+    add             r3, r3
+    pxor            m14, m14
+
+    mov             r7d, 4
+.loopH:
+    mov             r8d, 4
+.loopW:
+    PSY_SS_8x8
+
+    paddd           m14, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 64]
+    lea             r2, [r2 + r3 * 8 - 64]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, xm14
+    mov             rsp, r5
+    RET
+%endif
+
+%if ARCH_X86_64
+INIT_YMM avx2
+cglobal psyCost_ss_64x64, 4, 9, 15
+    ; NOTE: align stack to 64 bytes, so all of local data in same cache line
+    mov             r5, rsp
+    sub             rsp, 8*mmsize
+    and             rsp, ~63
+
+    mova            m12, [pw_1]
+    mova            m13, [hmul_w]
+    add             r1, r1
+    add             r3, r3
+    pxor            m14, m14
+
+    mov             r7d, 8
+.loopH:
+    mov             r8d, 8
+.loopW:
+    PSY_SS_8x8
+
+    paddd           m14, m0
+    add             r0, 16
+    add             r2, 16
+    dec             r8d
+    jnz             .loopW
+    lea             r0, [r0 + r1 * 8 - 128]
+    lea             r2, [r2 + r3 * 8 - 128]
+    dec             r7d
+    jnz             .loopH
+    movd            eax, xm14
+    mov             rsp, r5
+    RET
+%endif
diff -r e637273e2ae6 -r af5172cb915d source/common/x86/pixel.h
--- a/source/common/x86/pixel.h	Tue Mar 24 15:31:05 2015 -0500
+++ b/source/common/x86/pixel.h	Wed Mar 25 12:50:38 2015 +0530
@@ -267,6 +267,12 @@
 int x265_psyCost_pp_32x32_avx2(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
 int x265_psyCost_pp_64x64_avx2(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
 
+int x265_psyCost_ss_4x4_avx2(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride);
+int x265_psyCost_ss_8x8_avx2(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride);
+int x265_psyCost_ss_16x16_avx2(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride);
+int x265_psyCost_ss_32x32_avx2(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride);
+int x265_psyCost_ss_64x64_avx2(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride);
+
 #undef DECL_PIXELS
 #undef DECL_HEVC_SSD
 #undef DECL_X1


More information about the x265-devel mailing list