[x265] [PATCH 3 of 5] asm: avx2 code for high_bit_depth satd_32xN, improved over ~50% than previous asm

dnyaneshwar at multicorewareinc.com dnyaneshwar at multicorewareinc.com
Fri May 8 07:16:44 CEST 2015


# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1430989736 -19800
#      Thu May 07 14:38:56 2015 +0530
# Node ID 9fc3850626b8d2f2b7e28c2ddf70dd043c0fc165
# Parent  0ea3afb105ecb96db2fe5a2f8a6e19d718a794b4
asm: avx2 code for high_bit_depth satd_32xN, improved over ~50% than previous asm

diff -r 0ea3afb105ec -r 9fc3850626b8 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Thu May 07 14:20:01 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Thu May 07 14:38:56 2015 +0530
@@ -1181,6 +1181,12 @@
     }
     if (cpuMask & X265_CPU_AVX2)
     {
+        p.pu[LUMA_32x8].satd = x265_pixel_satd_32x8_avx2;
+        p.pu[LUMA_32x16].satd = x265_pixel_satd_32x16_avx2;
+        p.pu[LUMA_32x24].satd = x265_pixel_satd_32x24_avx2;
+        p.pu[LUMA_32x32].satd = x265_pixel_satd_32x32_avx2;
+        p.pu[LUMA_32x64].satd = x265_pixel_satd_32x64_avx2;
+
         p.pu[LUMA_16x4].satd = x265_pixel_satd_16x4_avx2;
         p.pu[LUMA_16x8].satd = x265_pixel_satd_16x8_avx2;
         p.pu[LUMA_16x12].satd = x265_pixel_satd_16x12_avx2;
diff -r 0ea3afb105ec -r 9fc3850626b8 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm	Thu May 07 14:20:01 2015 +0530
+++ b/source/common/x86/pixel-a.asm	Thu May 07 14:38:56 2015 +0530
@@ -11458,4 +11458,160 @@
     paddd           xm6, xm7
     movd            eax, xm6
     RET
+
+cglobal pixel_satd_32x8, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x16, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x24, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x32, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+
+cglobal pixel_satd_32x64, 4,8,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+    mov             r6, r0
+    mov             r7, r2
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    lea             r0, [r6 + 32]
+    lea             r2, [r7 + 32]
+
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
 %endif ; ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 1


More information about the x265-devel mailing list