[x265] [PATCH] arm: Implement blockcopy_pp_NxN_neon

radhakrishnan at multicorewareinc.com radhakrishnan at multicorewareinc.com
Thu Feb 11 10:54:45 CET 2016


# HG changeset patch
# User radhakrishnan at multicorewareinc.com
# Date 1455183020 -19800
#      Thu Feb 11 15:00:20 2016 +0530
# Node ID 4f5720ccaf1aa04868054636f14dce8ea65390ad
# Parent  a2ff6747eaf7b25102f27f808cf5526f441df488
arm: Implement blockcopy_pp_NxN_neon

diff -r a2ff6747eaf7 -r 4f5720ccaf1a source/common/arm/asm-primitives.cpp
--- a/source/common/arm/asm-primitives.cpp	Tue Feb 02 16:29:04 2016 +0530
+++ b/source/common/arm/asm-primitives.cpp	Thu Feb 11 15:00:20 2016 +0530
@@ -41,6 +41,30 @@
     if (cpuMask & X265_CPU_NEON)
     {
         p.pu[LUMA_16x16].copy_pp = PFX(blockcopy_pp_16x16_neon);
+        p.pu[LUMA_8x4].copy_pp = PFX(blockcopy_pp_8x4_neon);
+        p.pu[LUMA_8x8].copy_pp = PFX(blockcopy_pp_8x8_neon);
+        p.pu[LUMA_8x16].copy_pp = PFX(blockcopy_pp_8x16_neon);
+        p.pu[LUMA_8x32].copy_pp = PFX(blockcopy_pp_8x32_neon);
+        p.pu[LUMA_12x16].copy_pp = PFX(blockcopy_pp_12x16_neon); 
+        p.pu[LUMA_4x4].copy_pp = PFX(blockcopy_pp_4x4_neon);
+        p.pu[LUMA_4x8].copy_pp = PFX(blockcopy_pp_4x8_neon);
+        p.pu[LUMA_4x16].copy_pp = PFX(blockcopy_pp_4x16_neon);
+        p.pu[LUMA_16x4].copy_pp = PFX(blockcopy_pp_16x4_neon);
+        p.pu[LUMA_16x8].copy_pp = PFX(blockcopy_pp_16x8_neon);
+        p.pu[LUMA_16x12].copy_pp = PFX(blockcopy_pp_16x12_neon);
+        p.pu[LUMA_16x32].copy_pp = PFX(blockcopy_pp_16x32_neon);        
+        p.pu[LUMA_16x64].copy_pp = PFX(blockcopy_pp_16x64_neon);
+        p.pu[LUMA_24x32].copy_pp = PFX(blockcopy_pp_24x32_neon);
+        p.pu[LUMA_32x8].copy_pp = PFX(blockcopy_pp_32x8_neon);
+        p.pu[LUMA_32x16].copy_pp = PFX(blockcopy_pp_32x16_neon);
+        p.pu[LUMA_32x24].copy_pp = PFX(blockcopy_pp_32x24_neon);
+        p.pu[LUMA_32x32].copy_pp = PFX(blockcopy_pp_32x32_neon);
+        p.pu[LUMA_32x64].copy_pp = PFX(blockcopy_pp_32x64_neon);
+        p.pu[LUMA_48x64].copy_pp = PFX(blockcopy_pp_48x64_neon);
+        p.pu[LUMA_64x16].copy_pp = PFX(blockcopy_pp_64x16_neon);
+        p.pu[LUMA_64x32].copy_pp = PFX(blockcopy_pp_64x32_neon);
+        p.pu[LUMA_64x48].copy_pp = PFX(blockcopy_pp_64x48_neon);
+        p.pu[LUMA_64x64].copy_pp = PFX(blockcopy_pp_64x64_neon);
     }
 }
 } // namespace X265_NS
diff -r a2ff6747eaf7 -r 4f5720ccaf1a source/common/arm/blockcopy8.h
--- a/source/common/arm/blockcopy8.h	Tue Feb 02 16:29:04 2016 +0530
+++ b/source/common/arm/blockcopy8.h	Thu Feb 11 15:00:20 2016 +0530
@@ -27,5 +27,29 @@
 #define X265_BLOCKCOPY8_ARM_H
 
 void x265_blockcopy_pp_16x16_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_8x4_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_8x8_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_8x16_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_8x32_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_12x16_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_4x4_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_4x8_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_4x16_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_16x4_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_16x8_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_16x12_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_16x32_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_16x64_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_24x32_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_32x8_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_32x16_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_32x24_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_32x32_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_32x64_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_48x64_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_64x16_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_64x32_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_64x48_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
+void x265_blockcopy_pp_64x64_neon(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
 
 #endif // ifndef X265_I386_PIXEL_ARM_H
diff -r a2ff6747eaf7 -r 4f5720ccaf1a source/common/arm/mc-a.S
--- a/source/common/arm/mc-a.S	Tue Feb 02 16:29:04 2016 +0530
+++ b/source/common/arm/mc-a.S	Thu Feb 11 15:00:20 2016 +0530
@@ -100,3 +100,170 @@
     vst1.8          {q0}, [r0]
     bx              lr
 endfunc
+
+.macro blockcopy_pp_4xN_neon h
+function x265_blockcopy_pp_4x\h\()_neon
+    push            {r4}
+.rept \h
+    ldr             r4, [r2], r3
+    str             r4, [r0], r1
+.endr
+    pop             {r4}    
+    bx              lr
+endfunc
+.endm
+
+blockcopy_pp_4xN_neon 4
+blockcopy_pp_4xN_neon 8
+blockcopy_pp_4xN_neon 16
+
+.macro blockcopy_pp_16xN_neon h
+function x265_blockcopy_pp_16x\h\()_neon
+.rept \h    
+    vld1.8          {q0}, [r2], r3
+    vst1.8          {q0}, [r0], r1
+.endr
+    bx              lr
+endfunc
+.endm
+
+blockcopy_pp_16xN_neon 4
+blockcopy_pp_16xN_neon 8
+blockcopy_pp_16xN_neon 12
+
+.macro blockcopy_pp_16xN1_neon h i
+function x265_blockcopy_pp_16x\h\()_neon
+    push            {r4}
+    mov             r4, #\i
+loop_16x\h\():
+.rept 8
+    vld1.8          {q0}, [r2], r3
+    vst1.8          {q0}, [r0], r1
+.endr
+    subs            r4, r4, #1
+    bne             loop_16x\h    
+    pop             {r4}
+    bx              lr
+endfunc
+.endm
+
+blockcopy_pp_16xN1_neon 32 4
+blockcopy_pp_16xN1_neon 64 8
+
+.macro blockcopy_pp_8xN_neon h
+function x265_blockcopy_pp_8x\h\()_neon
+.rept \h    
+    vld1.8          {d0}, [r2], r3
+    vst1.8          {d0}, [r0], r1
+.endr
+    bx              lr
+endfunc
+.endm
+
+blockcopy_pp_8xN_neon 4
+blockcopy_pp_8xN_neon 8
+blockcopy_pp_8xN_neon 16
+blockcopy_pp_8xN_neon 32
+
+function x265_blockcopy_pp_12x16_neon
+    push            {r4, r5}
+    mov             r5, #8
+    sub             r3, r5
+    sub             r1, r5
+.rept 16
+    vld1.8          {d0}, [r2]!    
+    ldr             r4, [r2], r3
+    vst1.8          {d0}, [r0]!
+    str             r4, [r0], r1
+.endr
+    pop            {r4, r5}    
+    bx              lr
+endfunc
+
+function x265_blockcopy_pp_24x32_neon
+    push            {r4}
+    mov             r4, #4
+loop_24x32:
+.rept 8
+    vld1.8          {d0, d1, d2}, [r2], r3
+    vst1.8          {d0, d1, d2}, [r0], r1
+.endr
+    subs            r4, r4, #1
+    bne             loop_24x32    
+    pop             {r4}
+    bx              lr
+endfunc
+
+function x265_blockcopy_pp_32x8_neon
+.rept 8
+    vld1.8          {q0, q1}, [r2], r3
+    vst1.8          {q0, q1}, [r0], r1
+.endr 
+    bx              lr
+endfunc
+
+.macro blockcopy_pp_32xN_neon h i
+function x265_blockcopy_pp_32x\h\()_neon
+    push            {r4}
+    mov             r4, #\i
+loop_32x\h\():
+.rept 8
+    vld1.8          {q0, q1}, [r2], r3
+    vst1.8          {q0, q1}, [r0], r1
+.endr
+    subs            r4, r4, #1
+    bne             loop_32x\h    
+    pop             {r4}
+    bx              lr
+endfunc
+.endm
+
+blockcopy_pp_32xN_neon 16 2
+blockcopy_pp_32xN_neon 24 3
+blockcopy_pp_32xN_neon 32 4
+blockcopy_pp_32xN_neon 64 8
+
+function x265_blockcopy_pp_48x64_neon
+    push            {r4, r5}
+    mov             r4, #8
+    mov             r5, #32
+    sub             r3, r5
+    sub             r1, r5
+loop_48x64:
+.rept 8
+    vld1.8          {q0, q1}, [r2]!
+    vld1.8          {q2}, [r2], r3
+    vst1.8          {q0, q1}, [r0]!
+    vst1.8          {q2}, [r0], r1
+.endr
+    subs            r4, r4, #1
+    bne             loop_48x64    
+    pop             {r4, r5}
+    bx              lr
+endfunc
+
+.macro blockcopy_pp_64xN_neon h i
+function x265_blockcopy_pp_64x\h\()_neon
+    push            {r4, r5}
+    mov             r4, #\i
+    mov             r5, #32
+    sub             r3, r5
+    sub             r1, r5
+loop_64x\h\():
+.rept 4
+    vld1.8          {q0, q1}, [r2]!
+    vld1.8          {q2, q3}, [r2], r3
+    vst1.8          {q0, q1}, [r0]!
+    vst1.8          {q2, q3}, [r0], r1
+.endr
+    subs            r4, r4, #1
+    bne             loop_64x\h    
+    pop             {r4, r5}
+    bx              lr
+endfunc
+.endm
+
+blockcopy_pp_64xN_neon 16 4
+blockcopy_pp_64xN_neon 32 8
+blockcopy_pp_64xN_neon 48 12
+blockcopy_pp_64xN_neon 64 16


More information about the x265-devel mailing list