[x265] [PATCH] asm: Add sse_ss for [16x16], [32x32] & [64x64] for 8bpp avx2

ramya at multicorewareinc.com ramya at multicorewareinc.com
Wed Sep 30 07:53:18 CEST 2015


# HG changeset patch
# User Ramya Sriraman <ramya at multicorewareinc.com>
# Date 1443592336 -19800
#      Wed Sep 30 11:22:16 2015 +0530
# Node ID 29b61906162c657da241aecee9012e3f2da34b6d
# Parent  5f1451e5842252b31442e8b6519138d8033bbb2b
asm: Add sse_ss for [16x16],[32x32] & [64x64] for 8bpp avx2

diff -r 5f1451e58422 -r 29b61906162c source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Sep 28 16:43:47 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Wed Sep 30 11:22:16 2015 +0530
@@ -2678,6 +2678,10 @@
 #if X86_64
     if (cpuMask & X265_CPU_AVX2)
     {
+        p.cu[BLOCK_16x16].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_ss_16x16_avx2);
+        p.cu[BLOCK_32x32].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_ss_32x32_avx2);
+        p.cu[BLOCK_64x64].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_ss_64x64_avx2);
+
         p.cu[BLOCK_16x16].var = PFX(pixel_var_16x16_avx2);
         p.cu[BLOCK_32x32].var = PFX(pixel_var_32x32_avx2);
         p.cu[BLOCK_64x64].var = PFX(pixel_var_64x64_avx2);
diff -r 5f1451e58422 -r 29b61906162c source/common/x86/ssd-a.asm
--- a/source/common/x86/ssd-a.asm	Mon Sep 28 16:43:47 2015 +0530
+++ b/source/common/x86/ssd-a.asm	Wed Sep 30 11:22:16 2015 +0530
@@ -1100,8 +1100,195 @@
 SSD_SS_32xN
 SSD_SS_48
 SSD_SS_64xN
+
+INIT_YMM avx2
+cglobal pixel_ssd_ss_16x16, 4,4,5
+    add     r1d, r1d
+    add     r3d, r3d
+    pxor    m4, m4
+
+    movu        m0, [r0]
+    movu        m1, [r0+r1]
+    movu        m2, [r2]
+    movu        m3, [r2+r3]
+    psubw       m0, m2
+    psubw       m1, m3
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m0 , m1
+    paddd       m4, m0
+
+    movu        m0, [r0]
+    movu        m1, [r0+r1]
+    movu        m2, [r2]
+    movu        m3, [r2+r3]
+    psubw       m0, m2
+    psubw       m1, m3
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m0, m1
+    paddd       m4, m0
+
+    movu        m0, [r0]
+    movu        m1, [r0+r1]
+    movu        m2, [r2]
+    movu        m3, [r2+r3]
+    psubw       m0, m2
+    psubw       m1, m3
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m0, m1
+    paddd       m4, m0
+
+    movu        m0, [r0]
+    movu        m1, [r0+r1]
+    movu        m2, [r2]
+    movu        m3, [r2+r3]
+    psubw       m0, m2
+    psubw       m1, m3
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m0, m1
+    paddd       m4, m0
+
+    movu        m0, [r0]
+    movu        m1, [r0+r1]
+    movu        m2, [r2]
+    movu        m3, [r2+r3]
+    psubw       m0, m2
+    psubw       m1, m3
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m0, m1
+    paddd       m4, m0
+
+    movu        m0, [r0]
+    movu        m1, [r0+r1]
+    movu        m2, [r2]
+    movu        m3, [r2+r3]
+    psubw       m0, m2
+    psubw       m1, m3
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m0, m1
+    paddd       m4, m0
+
+    movu        m0, [r0]
+    movu        m1, [r0+r1]
+    movu        m2, [r2]
+    movu        m3, [r2+r3]
+    psubw       m0, m2
+    psubw       m1, m3
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m0, m1
+    paddd       m4, m0
+
+    movu        m0, [r0]
+    movu        m1, [r0+r1]
+    movu        m2, [r2]
+    movu        m3, [r2+r3]
+    psubw       m0, m2
+    psubw       m1, m3
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m0, m1
+    paddd       m4, m0
+
+    HADDD        m4,m0
+    movd         eax, xm4
+    RET
+
+INIT_YMM avx2
+cglobal pixel_ssd_ss_32x32, 4,5,5
+    add         r1d, r1d
+    add         r3d, r3d
+    pxor        m4, m4
+    mov         r4d, 16
+.loop:
+    movu        m0, [r0]
+    movu        m1, [r0+mmsize]
+    movu        m2, [r2]
+    movu        m3, [r2+mmsize]
+    psubw       m0, m2
+    psubw       m1, m3
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m4, m0
+    paddd       m4, m1
+    movu        m0, [r0+r1]
+    movu        m1, [r0+r1+mmsize]
+    movu        m2, [r2+r3]
+    movu        m3, [r2+r3+mmsize]
+    psubw       m0, m2
+    psubw       m1, m3
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m4, m0
+    paddd       m4, m1
+    lea         r0, [r0+2*r1]
+    lea         r2, [r2+2*r3]
+    dec         r4d
+    jne         .loop
+
+    HADDD        m4,m0
+    movd         eax, xm4
+    RET
+INIT_YMM avx2
+cglobal pixel_ssd_ss_64x64, 4,5,5
+    add         r1d, r1d
+    add         r3d, r3d
+    pxor        m4, m4
+    mov         r4d,64
+.loop:
+    movu        m0, [r0]
+    movu        m1, [r0+mmsize]
+    movu        m2, [r2]
+    movu        m3, [r2+mmsize]
+    psubw       m0, m2
+    psubw       m1, m3
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m4, m0
+    paddd       m4, m1
+    movu        m0, [r0+2*mmsize]
+    movu        m1, [r0+3*mmsize]
+    movu        m2, [r2+2*mmsize]
+    movu        m3, [r2+3*mmsize]
+    psubw       m0, m2
+    psubw       m1, m3
+    pmaddwd     m0, m0
+    pmaddwd     m1, m1
+    paddd       m4, m0
+    paddd       m4, m1
+
+    add         r0, r1
+    add         r2, r3
+
+    dec         r4d
+    jne         .loop
+
+    HADDD        m4,m0
+    movd         eax, xm4
+    RET
+
 %endif ; !HIGH_BIT_DEPTH
-
 %if HIGH_BIT_DEPTH == 0
 %macro SSD_LOAD_FULL 5
     movu      m1, [t0+%1]


More information about the x265-devel mailing list