[x265] [PATCH] asm: avx2 8bpp code for sub_ps[16x16](8.06x), sub_ps[32x32](10.20x),

rajesh at multicorewareinc.com rajesh at multicorewareinc.com
Tue Mar 17 05:53:27 CET 2015


# HG changeset patch
# User Rajesh Paulraj<rajesh at multicorewareinc.com>
# Date 1426567915 -19800
#      Tue Mar 17 10:21:55 2015 +0530
# Node ID 77dfe9f386a25900d9d5bd83e7325aeaf9b08774
# Parent  b9948752d5516a72eeaf824e3ee6f0feb097381c
asm: avx2 8bpp code for sub_ps[16x16](8.06x), sub_ps[32x32](10.20x),
                        sub_ps[64x64](8.28x)

diff -r b9948752d551 -r 77dfe9f386a2 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Mar 16 20:40:12 2015 -0500
+++ b/source/common/x86/asm-primitives.cpp	Tue Mar 17 10:21:55 2015 +0530
@@ -1421,6 +1421,10 @@
         p.cu[BLOCK_32x32].add_ps = x265_pixel_add_ps_32x32_avx2;
         p.cu[BLOCK_64x64].add_ps = x265_pixel_add_ps_64x64_avx2;
 
+        p.cu[BLOCK_16x16].sub_ps = x265_pixel_sub_ps_16x16_avx2;
+        p.cu[BLOCK_32x32].sub_ps = x265_pixel_sub_ps_32x32_avx2;
+        p.cu[BLOCK_64x64].sub_ps = x265_pixel_sub_ps_64x64_avx2;
+
         p.pu[LUMA_16x4].pixelavg_pp = x265_pixel_avg_16x4_avx2;
         p.pu[LUMA_16x8].pixelavg_pp = x265_pixel_avg_16x8_avx2;
         p.pu[LUMA_16x12].pixelavg_pp = x265_pixel_avg_16x12_avx2;
diff -r b9948752d551 -r 77dfe9f386a2 source/common/x86/pixel-util8.asm
--- a/source/common/x86/pixel-util8.asm	Mon Mar 16 20:40:12 2015 -0500
+++ b/source/common/x86/pixel-util8.asm	Tue Mar 17 10:21:55 2015 +0530
@@ -4335,6 +4335,44 @@
 
 
 ;-----------------------------------------------------------------------------
+; void pixel_sub_ps_16x16(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixel_sub_ps_16x16, 6, 7, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+    add         r1,     r1
+    lea         r6,     [r1 * 3]
+
+%rep 4
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r3]
+    pmovzxbw    m2,     [r2 + r4]
+    pmovzxbw    m3,     [r3 + r5]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+
+    movu        [r0],            m0
+    movu        [r0 + r1],       m2
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r3]
+    pmovzxbw    m2,     [r2 + r4]
+    pmovzxbw    m3,     [r3 + r5]
+
+    psubw       m0,     m1
+    psubw       m2,     m3
+
+    movu        [r0 + r1 * 2],   m0
+    movu        [r0 + r6],       m2
+
+    lea         r0,     [r0 + r1 * 4]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+%endrep
+    RET
+;-----------------------------------------------------------------------------
 ; void pixel_sub_ps_32x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
 ;-----------------------------------------------------------------------------
 %macro PIXELSUB_PS_W32_H2 2
@@ -4469,6 +4507,132 @@
 
 
 ;-----------------------------------------------------------------------------
+; void pixel_sub_ps_32x32(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixel_sub_ps_32x32, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+     add         r1,    r1
+
+%rep 4
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r3]
+    pmovzxbw    m3,     [r3 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0],            m0
+    movu        [r0 + 32],       m1
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+
+    movu        [r0 + r1],       m0
+    movu        [r0 + r1 + 32],  m1
+
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+    lea         r0,     [r0 + r1 * 2]
+
+    movu        [r0 ],           m0
+    movu        [r0 + 32],       m1
+
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+    add         r0,     r1
+
+    movu        [r0 ],           m0
+    movu        [r0 + 32],       m1
+
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+    add         r0,     r1
+
+    movu        [r0 ],           m0
+    movu        [r0 + 32],       m1
+
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+    add         r0,     r1
+
+    movu        [r0 ],           m0
+    movu        [r0 + 32],       m1
+
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+    add         r0,     r1
+
+    movu        [r0 ],           m0
+    movu        [r0 + 32],       m1
+
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2 + r4]
+    pmovzxbw    m1,     [r2 + r4 + 16]
+    pmovzxbw    m2,     [r3 + r5]
+    pmovzxbw    m3,     [r3 + r5 + 16]
+
+    psubw       m0,     m2
+    psubw       m1,     m3
+    add         r0,     r1
+
+    movu        [r0 ],           m0
+    movu        [r0 + 32],       m1
+
+    lea         r0,     [r0 + r1]
+    lea         r2,     [r2 + r4 * 2]
+    lea         r3,     [r3 + r5 * 2]
+%endrep
+    RET
+;-----------------------------------------------------------------------------
 ; void pixel_sub_ps_64x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
 ;-----------------------------------------------------------------------------
 %macro PIXELSUB_PS_W64_H2 2
@@ -4687,6 +4851,111 @@
 %endif
 
 
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_64x64(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixel_sub_ps_64x64, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+     add        r1,     r1
+
+%rep 16
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r2 + 32]
+    pmovzxbw    m3,     [r2 + 48]
+
+    pmovzxbw    m4,     [r3]
+    pmovzxbw    m5,     [r3 + 16]
+    pmovzxbw    m6,     [r3 + 32]
+    pmovzxbw    m7,     [r3 + 48]
+
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],         m0
+    movu        [r0 + 32],    m1
+    movu        [r0 + 64],    m2
+    movu        [r0 + 96],    m3
+
+    add         r0,     r1
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r2 + 32]
+    pmovzxbw    m3,     [r2 + 48]
+
+    pmovzxbw    m4,     [r3]
+    pmovzxbw    m5,     [r3 + 16]
+    pmovzxbw    m6,     [r3 + 32]
+    pmovzxbw    m7,     [r3 + 48]
+
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],         m0
+    movu        [r0 + 32],    m1
+    movu        [r0 + 64],    m2
+    movu        [r0 + 96],    m3
+
+    add         r0,     r1
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r2 + 32]
+    pmovzxbw    m3,     [r2 + 48]
+
+    pmovzxbw    m4,     [r3]
+    pmovzxbw    m5,     [r3 + 16]
+    pmovzxbw    m6,     [r3 + 32]
+    pmovzxbw    m7,     [r3 + 48]
+
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],         m0
+    movu        [r0 + 32],    m1
+    movu        [r0 + 64],    m2
+    movu        [r0 + 96],    m3
+
+    add         r0,     r1
+    add         r2,     r4
+    add         r3,     r5
+
+    pmovzxbw    m0,     [r2]
+    pmovzxbw    m1,     [r2 + 16]
+    pmovzxbw    m2,     [r2 + 32]
+    pmovzxbw    m3,     [r2 + 48]
+
+    pmovzxbw    m4,     [r3]
+    pmovzxbw    m5,     [r3 + 16]
+    pmovzxbw    m6,     [r3 + 32]
+    pmovzxbw    m7,     [r3 + 48]
+
+    psubw       m0,     m4
+    psubw       m1,     m5
+    psubw       m2,     m6
+    psubw       m3,     m7
+
+    movu        [r0],         m0
+    movu        [r0 + 32],    m1
+    movu        [r0 + 64],    m2
+    movu        [r0 + 96],    m3
+
+    add         r0,     r1
+    add         r2,     r4
+    add         r3,     r5
+%endrep
+    RET
 ;=============================================================================
 ; variance
 ;=============================================================================
diff -r b9948752d551 -r 77dfe9f386a2 source/common/x86/pixel.h
--- a/source/common/x86/pixel.h	Mon Mar 16 20:40:12 2015 -0500
+++ b/source/common/x86/pixel.h	Tue Mar 17 10:21:55 2015 +0530
@@ -255,6 +255,10 @@
 void x265_pixel_add_ps_32x32_avx2(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
 void x265_pixel_add_ps_64x64_avx2(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
 
+void x265_pixel_sub_ps_16x16_avx2(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
+void x265_pixel_sub_ps_32x32_avx2(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
+void x265_pixel_sub_ps_64x64_avx2(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
+
 #undef DECL_PIXELS
 #undef DECL_HEVC_SSD
 #undef DECL_X1


More information about the x265-devel mailing list