[x265] [PATCH] arm: Implement cpy2Dto1D_shr ARM NEON asm

dnyaneshwar at multicorewareinc.com dnyaneshwar at multicorewareinc.com
Thu Feb 25 09:28:56 CET 2016


# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1456382877 -19800
#      Thu Feb 25 12:17:57 2016 +0530
# Node ID 01782e7f0a8cb93efbe4ff1534602ff9055c8565
# Parent  ed3dd1a26cb5801e306db8f1d4a52cd1f4d6620b
arm: Implement cpy2Dto1D_shr ARM NEON asm

diff -r ed3dd1a26cb5 -r 01782e7f0a8c source/common/arm/asm-primitives.cpp
--- a/source/common/arm/asm-primitives.cpp	Mon Feb 22 15:58:14 2016 +0530
+++ b/source/common/arm/asm-primitives.cpp	Thu Feb 25 12:17:57 2016 +0530
@@ -42,6 +42,12 @@
 {
     if (cpuMask & X265_CPU_NEON)
     {
+        // cpy2Dto1D_shr
+        p.cu[BLOCK_4x4].cpy2Dto1D_shr   = PFX(cpy2Dto1D_shr_4x4_neon);
+        p.cu[BLOCK_8x8].cpy2Dto1D_shr   = PFX(cpy2Dto1D_shr_8x8_neon);
+        p.cu[BLOCK_16x16].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_16x16_neon);
+        p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32x32_neon);
+
         // ssd_s
         p.cu[BLOCK_4x4].ssd_s   = PFX(pixel_ssd_s_4x4_neon);
         p.cu[BLOCK_8x8].ssd_s   = PFX(pixel_ssd_s_8x8_neon);
diff -r ed3dd1a26cb5 -r 01782e7f0a8c source/common/arm/blockcopy8.h
--- a/source/common/arm/blockcopy8.h	Mon Feb 22 15:58:14 2016 +0530
+++ b/source/common/arm/blockcopy8.h	Thu Feb 25 12:17:57 2016 +0530
@@ -52,4 +52,9 @@
 void x265_blockcopy_pp_64x48_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
 void x265_blockcopy_pp_64x64_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
 
+void x265_cpy2Dto1D_shr_4x4_neon(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+void x265_cpy2Dto1D_shr_8x8_neon(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+void x265_cpy2Dto1D_shr_16x16_neon(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+void x265_cpy2Dto1D_shr_32x32_neon(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
+
 #endif // ifndef X265_I386_PIXEL_ARM_H
diff -r ed3dd1a26cb5 -r 01782e7f0a8c source/common/arm/mc-a.S
--- a/source/common/arm/mc-a.S	Mon Feb 22 15:58:14 2016 +0530
+++ b/source/common/arm/mc-a.S	Thu Feb 25 12:17:57 2016 +0530
@@ -443,3 +443,93 @@
 pixel_avg_pp_64xN_neon 32 8
 pixel_avg_pp_64xN_neon 48 12
 pixel_avg_pp_64xN_neon 64 16
+
+// void x265_cpy2Dto1D_shr_4x4_neon(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift)
+function x265_cpy2Dto1D_shr_4x4_neon
+    add             r2, r2
+    vdup.16         q0, r3
+    vceq.s16        q1, q1
+    vshl.s16        q1, q0
+    vsri.s16        q1, #1
+    vneg.s16        q0, q0
+    vld1.s16        {d4}, [r1], r2
+    vld1.s16        {d5}, [r1], r2
+    vld1.s16        {d6}, [r1], r2
+    vld1.s16        {d7}, [r1], r2
+    vsub.s16        q2, q1
+    vsub.s16        q3, q1
+    vshl.s16        q2, q0
+    vshl.s16        q3, q0
+    vst1.16         {q2-q3}, [r0]
+    bx              lr
+endfunc
+
+function x265_cpy2Dto1D_shr_8x8_neon
+    add             r2, r2
+    vdup.16         q0, r3
+    vceq.s16        q1, q1
+    vshl.s16        q1, q0
+    vsri.s16        q1, #1
+    vneg.s16        q0, q0
+.rept 4
+    vld1.s16        {q2}, [r1], r2
+    vld1.s16        {q3}, [r1], r2
+    vsub.s16        q2, q1
+    vsub.s16        q3, q1
+    vshl.s16        q2, q0
+    vshl.s16        q3, q0
+    vst1.16         {q2-q3}, [r0]!
+.endr
+    bx              lr
+endfunc
+
+function x265_cpy2Dto1D_shr_16x16_neon
+    add             r2, r2
+    vdup.16         q0, r3
+    vceq.s16        q1, q1
+    vshl.s16        q1, q0
+    vsri.s16        q1, #1
+    vneg.s16        q0, q0
+    mov             r3, #4
+.loop_cpy2Dto1D_shr_16:
+    subs            r3, #1
+.rept 4
+    vld1.s16        {q2-q3}, [r1], r2
+    vsub.s16        q2, q1
+    vsub.s16        q3, q1
+    vshl.s16        q2, q0
+    vshl.s16        q3, q0
+    vst1.16         {q2-q3}, [r0]!
+.endr
+    bgt             .loop_cpy2Dto1D_shr_16
+    bx              lr
+endfunc
+
+function x265_cpy2Dto1D_shr_32x32_neon
+    add             r2, r2
+    sub             r2, #32
+    vdup.16         q0, r3
+    vceq.s16        q1, q1
+    vshl.s16        q1, q0
+    vsri.s16        q1, #1
+    vneg.s16        q0, q0
+    mov             r3, 16
+.loop_cpy2Dto1D_shr_32:
+    subs            r3, #1
+.rept 2
+    vld1.s16        {q2-q3}, [r1]!
+    vld1.s16        {q8-q9}, [r1], r2
+    vsub.s16        q2, q1
+    vsub.s16        q3, q1
+    vsub.s16        q8, q1
+    vsub.s16        q9, q1
+    vshl.s16        q2, q0
+    vshl.s16        q3, q0
+    vshl.s16        q8, q0
+    vshl.s16        q9, q0
+    vst1.16         {q2-q3}, [r0]!
+    vst1.16         {q8-q9}, [r0]!
+.endr
+    bgt             .loop_cpy2Dto1D_shr_32
+    bx              lr
+endfunc


More information about the x265-devel mailing list