[x265] [PATCH 1 of 5] asm: avx2 code for high_bit_depth satd_16x8

dnyaneshwar at multicorewareinc.com dnyaneshwar at multicorewareinc.com
Fri May 8 07:16:42 CEST 2015


# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1430986298 -19800
#      Thu May 07 13:41:38 2015 +0530
# Node ID 948636c0bbabd45320b451834471d0976cce947b
# Parent  7a1fd70739410f874f8ccd51ad688a29364d5e72
asm: avx2 code for high_bit_depth satd_16x8

AVX2:
satd[ 16x8] 8.92x    500.34          4461.95

AVX:
satd[ 16x8] 4.35x    1039.88         4521.10

diff -r 7a1fd7073941 -r 948636c0bbab source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue May 05 14:44:19 2015 -0700
+++ b/source/common/x86/asm-primitives.cpp	Thu May 07 13:41:38 2015 +0530
@@ -1181,6 +1181,8 @@
     }
     if (cpuMask & X265_CPU_AVX2)
     {
+        p.pu[LUMA_16x8].satd = x265_pixel_satd_16x8_avx2;
+
         p.cu[BLOCK_32x32].ssd_s = x265_pixel_ssd_s_32_avx2;
         p.cu[BLOCK_16x16].sse_ss = x265_pixel_ssd_ss_16x16_avx2;
 
diff -r 7a1fd7073941 -r 948636c0bbab source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm	Tue May 05 14:44:19 2015 -0700
+++ b/source/common/x86/pixel-a.asm	Thu May 07 13:41:38 2015 +0530
@@ -10516,7 +10516,7 @@
 ;; r2   - pix1
 ;; r3   - pix1Stride
 
-%if ARCH_X86_64 == 1
+%if ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
 INIT_YMM avx2
 cglobal calc_satd_16x8    ; function to compute satd cost for 16 columns, 8 rows
     pxor                m6, m6
@@ -11146,5 +11146,147 @@
     paddd           xm0, xm1
     movd            eax, xm0
     RET
-
-%endif  ; if ARCH_X86_64 == 1
+%endif ; ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
+
+%if ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 1
+INIT_YMM avx2
+cglobal calc_satd_16x8    ; function to compute satd cost for 16 columns, 8 rows
+    ; rows 0-3
+    movu            m0, [r0]
+    movu            m4, [r2]
+    psubw           m0, m4
+    movu            m1, [r0 + r1]
+    movu            m5, [r2 + r3]
+    psubw           m1, m5
+    movu            m2, [r0 + r1 * 2]
+    movu            m4, [r2 + r3 * 2]
+    psubw           m2, m4
+    movu            m3, [r0 + r4]
+    movu            m5, [r2 + r5]
+    psubw           m3, m5
+    lea             r0, [r0 + r1 * 4]
+    lea             r2, [r2 + r3 * 4]
+    paddw           m4, m0, m1
+    psubw           m1, m0
+    paddw           m0, m2, m3
+    psubw           m3, m2
+    punpckhwd       m2, m4, m1
+    punpcklwd       m4, m1
+    punpckhwd       m1, m0, m3
+    punpcklwd       m0, m3
+    paddw           m3, m4, m0
+    psubw           m0, m4
+    paddw           m4, m2, m1
+    psubw           m1, m2
+    punpckhdq       m2, m3, m0
+    punpckldq       m3, m0
+    paddw           m0, m3, m2
+    psubw           m2, m3
+    punpckhdq       m3, m4, m1
+    punpckldq       m4, m1
+    paddw           m1, m4, m3
+    psubw           m3, m4
+    punpckhqdq      m4, m0, m1
+    punpcklqdq      m0, m1
+    pabsw           m0, m0
+    pabsw           m4, m4
+    pmaxsw          m0, m0, m4
+    punpckhqdq      m1, m2, m3
+    punpcklqdq      m2, m3
+    pabsw           m2, m2
+    pabsw           m1, m1
+    pmaxsw          m2, m1
+    pxor            m7, m7
+    mova            m1, m0
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m0
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    pxor            m7, m7
+    mova            m1, m2
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m2
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    ; rows 4-7
+    movu            m0, [r0]
+    movu            m4, [r2]
+    psubw           m0, m4
+    movu            m1, [r0 + r1]
+    movu            m5, [r2 + r3]
+    psubw           m1, m5
+    movu            m2, [r0 + r1 * 2]
+    movu            m4, [r2 + r3 * 2]
+    psubw           m2, m4
+    movu            m3, [r0 + r4]
+    movu            m5, [r2 + r5]
+    psubw           m3, m5
+    lea             r0, [r0 + r1 * 4]
+    lea             r2, [r2 + r3 * 4]
+    paddw           m4, m0, m1
+    psubw           m1, m0
+    paddw           m0, m2, m3
+    psubw           m3, m2
+    punpckhwd       m2, m4, m1
+    punpcklwd       m4, m1
+    punpckhwd       m1, m0, m3
+    punpcklwd       m0, m3
+    paddw           m3, m4, m0
+    psubw           m0, m4
+    paddw           m4, m2, m1
+    psubw           m1, m2
+    punpckhdq       m2, m3, m0
+    punpckldq       m3, m0
+    paddw           m0, m3, m2
+    psubw           m2, m3
+    punpckhdq       m3, m4, m1
+    punpckldq       m4, m1
+    paddw           m1, m4, m3
+    psubw           m3, m4
+    punpckhqdq      m4, m0, m1
+    punpcklqdq      m0, m1
+    pabsw           m0, m0
+    pabsw           m4, m4
+    pmaxsw          m0, m0, m4
+    punpckhqdq      m1, m2, m3
+    punpcklqdq      m2, m3
+    pabsw           m2, m2
+    pabsw           m1, m1
+    pmaxsw          m2, m1
+    pxor            m7, m7
+    mova            m1, m0
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m0
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    pxor            m7, m7
+    mova            m1, m2
+    punpcklwd       m1, m7
+    paddd           m6, m1
+    mova            m1, m2
+    punpckhwd       m1, m7
+    paddd           m6, m1
+    ret
+
+cglobal pixel_satd_16x8, 4,6,8
+    add             r1d, r1d
+    add             r3d, r3d
+    lea             r4, [3 * r1]
+    lea             r5, [3 * r3]
+    pxor            m6, m6
+
+    call            calc_satd_16x8
+
+    vextracti128    xm7, m6, 1
+    paddd           xm6, xm7
+    pxor            xm7, xm7
+    movhlps         xm7, xm6
+    paddd           xm6, xm7
+    pshufd          xm7, xm6, 1
+    paddd           xm6, xm7
+    movd            eax, xm6
+    RET
+%endif ; ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 1


More information about the x265-devel mailing list