<div dir="ltr">Doesnt apply at the tip.</div><div class="gmail_extra"><br><div class="gmail_quote">On Tue, Mar 22, 2016 at 6:57 PM,  <span dir="ltr"><<a href="mailto:ramya@multicorewareinc.com" target="_blank">ramya@multicorewareinc.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"># HG changeset patch<br>
# User Ramya Sriraman<<a href="mailto:ramya@multicorewareinc.com">ramya@multicorewareinc.com</a>><br>
# Date 1458625243 -19800<br>
#      Tue Mar 22 11:10:43 2016 +0530<br>
# Node ID a9014e51d47ee5cdfe381d02526b1c94082cd4bf<br>
# Parent  e5c08206b8bfcce3a808b8f14848953c9cf51ce7<br>
arm: Implement interp_8tap_vert_sp_NXN NEON<br>
<br>
diff -r e5c08206b8bf -r a9014e51d47e source/common/arm/asm-primitives.cpp<br>
--- a/source/common/arm/asm-primitives.cpp      Wed Mar 16 16:52:23 2016 +0530<br>
+++ b/source/common/arm/asm-primitives.cpp      Tue Mar 22 11:10:43 2016 +0530<br>
@@ -328,6 +328,32 @@<br>
         p.pu[LUMA_24x32].luma_vpp   = PFX(interp_8tap_vert_pp_24x32_neon);<br>
         p.pu[LUMA_48x64].luma_vpp   = PFX(interp_8tap_vert_pp_48x64_neon);<br>
         p.pu[LUMA_12x16].luma_vpp   = PFX(interp_8tap_vert_pp_12x16_neon);<br>
+<br>
+        p.pu[LUMA_4x4].luma_vsp     = PFX(interp_8tap_vert_sp_4x4_neon);<br>
+        p.pu[LUMA_4x8].luma_vsp     = PFX(interp_8tap_vert_sp_4x8_neon);<br>
+        p.pu[LUMA_4x16].luma_vsp    = PFX(interp_8tap_vert_sp_4x16_neon);<br>
+        p.pu[LUMA_8x4].luma_vsp     = PFX(interp_8tap_vert_sp_8x4_neon);<br>
+        p.pu[LUMA_8x8].luma_vsp     = PFX(interp_8tap_vert_sp_8x8_neon);<br>
+        p.pu[LUMA_8x16].luma_vsp    = PFX(interp_8tap_vert_sp_8x16_neon);<br>
+        p.pu[LUMA_8x32].luma_vsp    = PFX(interp_8tap_vert_sp_8x32_neon);<br>
+        p.pu[LUMA_16x4].luma_vsp    = PFX(interp_8tap_vert_sp_16x4_neon);<br>
+        p.pu[LUMA_16x8].luma_vsp    = PFX(interp_8tap_vert_sp_16x8_neon);<br>
+        p.pu[LUMA_16x16].luma_vsp   = PFX(interp_8tap_vert_sp_16x16_neon);<br>
+        p.pu[LUMA_16x32].luma_vsp   = PFX(interp_8tap_vert_sp_16x32_neon);<br>
+        p.pu[LUMA_16x64].luma_vsp   = PFX(interp_8tap_vert_sp_16x64_neon);<br>
+        p.pu[LUMA_16x12].luma_vsp   = PFX(interp_8tap_vert_sp_16x12_neon);<br>
+        p.pu[LUMA_32x8].luma_vsp    = PFX(interp_8tap_vert_sp_32x8_neon);<br>
+        p.pu[LUMA_32x16].luma_vsp   = PFX(interp_8tap_vert_sp_32x16_neon);<br>
+        p.pu[LUMA_32x32].luma_vsp   = PFX(interp_8tap_vert_sp_32x32_neon);<br>
+        p.pu[LUMA_32x64].luma_vsp   = PFX(interp_8tap_vert_sp_32x64_neon);<br>
+        p.pu[LUMA_32x24].luma_vsp   = PFX(interp_8tap_vert_sp_32x24_neon);<br>
+        p.pu[LUMA_64x16].luma_vsp   = PFX(interp_8tap_vert_sp_64x16_neon);<br>
+        p.pu[LUMA_64x32].luma_vsp   = PFX(interp_8tap_vert_sp_64x32_neon);<br>
+        p.pu[LUMA_64x64].luma_vsp   = PFX(interp_8tap_vert_sp_64x64_neon);<br>
+        p.pu[LUMA_64x48].luma_vsp   = PFX(interp_8tap_vert_sp_64x48_neon);<br>
+        p.pu[LUMA_24x32].luma_vsp   = PFX(interp_8tap_vert_sp_24x32_neon);<br>
+        p.pu[LUMA_48x64].luma_vsp   = PFX(interp_8tap_vert_sp_48x64_neon);<br>
+        p.pu[LUMA_12x16].luma_vsp   = PFX(interp_8tap_vert_sp_12x16_neon);<br>
     }<br>
     if (cpuMask & X265_CPU_ARMV6)<br>
     {<br>
diff -r e5c08206b8bf -r a9014e51d47e source/common/arm/ipfilter8.S<br>
--- a/source/common/arm/ipfilter8.S     Wed Mar 16 16:52:23 2016 +0530<br>
+++ b/source/common/arm/ipfilter8.S     Tue Mar 22 11:10:43 2016 +0530<br>
@@ -32,6 +32,7 @@<br>
 .word -1,-1,4,4,-11,-11,40,40,40,40,-11,-11,4,4,-1,-1<br>
 .word 0,0,1,1,-5,-5,17,17,58,58,-10,-10,4,4,-1,-1<br>
<br>
+<br>
 .text<br>
<br>
 // filterPixelToShort(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride)<br>
@@ -698,6 +699,102 @@<br>
     bx          lr<br>
 endfunc<br>
<br>
+.macro LUMA_VPP_4xN h<br>
+function x265_interp_8tap_vert_pp_4x\h\()_neon<br>
+    push           {r4, r5, r6}<br>
+    ldr             r4, [sp, #4 * 3]<br>
+    mov             r5, r4, lsl #6<br>
+    mov             r4, r1, lsl #2<br>
+    sub             r4, r1<br>
+    sub             r0, r4<br>
+<br>
+    mov             r4, #32<br>
+    vdup.32         q8, r4<br>
+    mov             r4, #\h<br>
+<br>
+.loop_4x\h:<br>
+    movrel          r12, g_lumaFilter<br>
+    add             r12, r5<br>
+    mov             r6, r0<br>
+<br>
+    pld [r6]<br>
+    vld1.u32        d0[0], [r6], r1<br>
+    pld [r6]<br>
+    vld1.u32        d0[1], [r6], r1<br>
+    pld [r6]<br>
+    vld1.u32        d1[0], [r6], r1<br>
+    pld [r6]<br>
+    vld1.u32        d1[1], [r6], r1<br>
+    pld [r6]<br>
+    vld1.u32        d2[0], [r6], r1<br>
+    pld [r6]<br>
+    vld1.u32        d2[1], [r6], r1<br>
+    pld [r6]<br>
+    vld1.u32        d3[0], [r6], r1<br>
+    pld [r6]<br>
+    vld1.u32        d3[1], [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+<br>
+    vmovl.u8        q11, d0<br>
+    vmovl.u16       q12, d22<br>
+    vmovl.u16       q13, d23<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q12, q10<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q13, q10<br>
+<br>
+    vmovl.u8        q11, d1<br>
+    vmovl.u16       q12, d22<br>
+    vmovl.u16       q13, d23<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q12, q10<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q13, q10<br>
+<br>
+    vmovl.u8        q11, d2<br>
+    vmovl.u16       q12, d22<br>
+    vmovl.u16       q13, d23<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q12, q10<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q13, q10<br>
+<br>
+    vmovl.u8        q11, d3<br>
+    vmovl.u16       q12, d22<br>
+    vmovl.u16       q13, d23<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q12, q10<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q13, q10<br>
+<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #6<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u32        d0[0], [r2], r3<br>
+<br>
+    add             r0, r1<br>
+    subs            r4, #1<br>
+    bne             .loop_4x\h<br>
+<br>
+    pop             {r4, r5, r6}<br>
+    bx              lr<br>
+    .ltorg<br>
+endfunc<br>
+.endm<br>
+<br>
+LUMA_VPP_4xN 4<br>
+LUMA_VPP_4xN 8<br>
+LUMA_VPP_4xN 16<br>
+<br>
 .macro qpel_filter_0_32b<br>
     vmov.i16        d17, #64<br>
     vmovl.u8        q11, d3<br>
@@ -947,101 +1044,6 @@<br>
 LUMA_VPP 24 32<br>
 LUMA_VPP 48 64<br>
<br>
-.macro LUMA_VPP_4xN h<br>
-function x265_interp_8tap_vert_pp_4x\h\()_neon<br>
-    push           {r4, r5, r6}<br>
-    ldr             r4, [sp, #4 * 3]<br>
-    mov             r5, r4, lsl #6<br>
-    mov             r4, r1, lsl #2<br>
-    sub             r4, r1<br>
-    sub             r0, r4<br>
-<br>
-    mov             r4, #32<br>
-    vdup.32         q8, r4<br>
-    mov             r4, #\h<br>
-<br>
-.loop_4x\h:<br>
-    movrel          r12, g_lumaFilter<br>
-    add             r12, r5<br>
-    mov             r6, r0<br>
-<br>
-    pld [r6]<br>
-    vld1.u32        d0[0], [r6], r1<br>
-    pld [r6]<br>
-    vld1.u32        d0[1], [r6], r1<br>
-    pld [r6]<br>
-    vld1.u32        d1[0], [r6], r1<br>
-    pld [r6]<br>
-    vld1.u32        d1[1], [r6], r1<br>
-    pld [r6]<br>
-    vld1.u32        d2[0], [r6], r1<br>
-    pld [r6]<br>
-    vld1.u32        d2[1], [r6], r1<br>
-    pld [r6]<br>
-    vld1.u32        d3[0], [r6], r1<br>
-    pld [r6]<br>
-    vld1.u32        d3[1], [r6], r1<br>
-<br>
-    veor.u8         q9, q9<br>
-<br>
-    vmovl.u8        q11, d0<br>
-    vmovl.u16       q12, d22<br>
-    vmovl.u16       q13, d23<br>
-    vld1.s32        d20, [r12]!<br>
-    vmov.s32        d21, d20<br>
-    vmla.s32        q9, q12, q10<br>
-    vld1.s32        d20, [r12]!<br>
-    vmov.s32        d21, d20<br>
-    vmla.s32        q9, q13, q10<br>
-<br>
-    vmovl.u8        q11, d1<br>
-    vmovl.u16       q12, d22<br>
-    vmovl.u16       q13, d23<br>
-    vld1.s32        d20, [r12]!<br>
-    vmov.s32        d21, d20<br>
-    vmla.s32        q9, q12, q10<br>
-    vld1.s32        d20, [r12]!<br>
-    vmov.s32        d21, d20<br>
-    vmla.s32        q9, q13, q10<br>
-<br>
-    vmovl.u8        q11, d2<br>
-    vmovl.u16       q12, d22<br>
-    vmovl.u16       q13, d23<br>
-    vld1.s32        d20, [r12]!<br>
-    vmov.s32        d21, d20<br>
-    vmla.s32        q9, q12, q10<br>
-    vld1.s32        d20, [r12]!<br>
-    vmov.s32        d21, d20<br>
-    vmla.s32        q9, q13, q10<br>
-<br>
-    vmovl.u8        q11, d3<br>
-    vmovl.u16       q12, d22<br>
-    vmovl.u16       q13, d23<br>
-    vld1.s32        d20, [r12]!<br>
-    vmov.s32        d21, d20<br>
-    vmla.s32        q9, q12, q10<br>
-    vld1.s32        d20, [r12]!<br>
-    vmov.s32        d21, d20<br>
-    vmla.s32        q9, q13, q10<br>
-<br>
-    vadd.s32        q9, q8<br>
-    vqshrun.s32     d0, q9, #6<br>
-    vqmovn.u16      d0, q0<br>
-    vst1.u32        d0[0], [r2], r3<br>
-<br>
-    add             r0, r1<br>
-    subs            r4, #1<br>
-    bne             .loop_4x\h<br>
-<br>
-    pop             {r4, r5, r6}<br>
-    bx              lr<br>
-endfunc<br>
-.endm<br>
-<br>
-LUMA_VPP_4xN 4<br>
-LUMA_VPP_4xN 8<br>
-LUMA_VPP_4xN 16<br>
-<br>
 function x265_interp_8tap_vert_pp_12x16_neon<br>
     push            {r4, r5, r6, r7}<br>
     ldr             r5, [sp, #4 * 4]<br>
@@ -1050,7 +1052,7 @@<br>
     sub             r0, r4<br>
<br>
     mov             r4, #16<br>
-.loop_12x16:<br>
+.loop_vpp_12x16:<br>
<br>
     mov             r6, r0<br>
     mov             r7, r2<br>
@@ -1083,6 +1085,9 @@<br>
     beq              2f<br>
     cmp             r5,#3<br>
     beq              3f<br>
+0:<br>
+    qpel_filter_0_32b<br>
+    b            5f<br>
 1:<br>
     qpel_filter_1_32b<br>
     b            5f<br>
@@ -1092,11 +1097,6 @@<br>
 3:<br>
     qpel_filter_3_32b<br>
     b            5f<br>
-0:<br>
-    vmov.i16        d17, #64<br>
-    vmovl.u8        q11, d3<br>
-    vmull.s16       q9, d22, d17    // 64*d0<br>
-    vmull.s16       q10, d23, d17   // 64*d1<br>
 5:<br>
     mov             r12,#32<br>
     vdup.32         q8, r12<br>
@@ -1137,6 +1137,9 @@<br>
     beq              2f<br>
     cmp             r5,#3<br>
     beq              3f<br>
+0:<br>
+    qpel_filter_0_32b<br>
+    b            5f<br>
 1:<br>
     qpel_filter_1_32b<br>
     b            5f<br>
@@ -1146,11 +1149,6 @@<br>
 3:<br>
     qpel_filter_3_32b<br>
     b            5f<br>
-0:<br>
-    vmov.i16        d17, #64<br>
-    vmovl.u8        q11, d3<br>
-    vmull.s16       q9, d22, d17    // 64*d0<br>
-    vmull.s16       q10, d23, d17   // 64*d1<br>
 5:<br>
     mov             r12,#32<br>
     vdup.32         q8, r12<br>
@@ -1164,8 +1162,448 @@<br>
     add             r0, r1<br>
     add             r2, r3<br>
     subs            r4, #1<br>
-    bne             .loop_12x16<br>
+    bne             .loop_vpp_12x16<br>
<br>
     pop             {r4, r5, r6, r7}<br>
     bx              lr<br>
 endfunc<br>
+//**************luma_vsp************<br>
+.macro LUMA_VSP_4xN h<br>
+function x265_interp_8tap_vert_sp_4x\h\()_neon<br>
+    push            {r4, r5, r6}<br>
+    ldr             r4, [sp, #4 * 3]<br>
+    mov             r5, r4, lsl #6<br>
+    lsl             r1, #1<br>
+    mov             r4, r1, lsl #2<br>
+    sub             r4, r1<br>
+    sub             r0, r4<br>
+<br>
+    mov             r12, #1<br>
+    lsl             r12, #19<br>
+    add             r12, #2048<br>
+    vdup.32         q8, r12<br>
+    mov             r4, #\h<br>
+.loop_vsp_4x\h:<br>
+    movrel          r12, g_lumaFilter<br>
+    add             r12, r5<br>
+    mov             r6, r0<br>
+<br>
+    pld [r6]<br>
+    vld1.u16         d0, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         d1, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         d2, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         d3, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         d4, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         d5, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         d6, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         d7, [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+<br>
+    vmovl.s16       q11, d0<br>
+    vld1.s32        d24, [r12]!<br>
+    vmov.s32        d25, d24<br>
+    vmla.s32        q9, q12, q11<br>
+<br>
+    vmovl.s16       q11, d1<br>
+    vld1.s32        d24, [r12]!<br>
+    vmov.s32        d25, d24<br>
+    vmla.s32        q9, q12, q11<br>
+<br>
+    vmovl.s16       q11, d2<br>
+    vld1.s32        d24, [r12]!<br>
+    vmov.s32        d25, d24<br>
+    vmla.s32        q9, q12, q11<br>
+<br>
+    vmovl.s16       q11, d3<br>
+    vld1.s32        d24, [r12]!<br>
+    vmov.s32        d25, d24<br>
+    vmla.s32        q9, q12, q11<br>
+<br>
+    vmovl.s16       q11, d4<br>
+    vld1.s32        d24, [r12]!<br>
+    vmov.s32        d25, d24<br>
+    vmla.s32        q9, q12, q11<br>
+<br>
+    vmovl.s16       q11, d5<br>
+    vld1.s32        d24, [r12]!<br>
+    vmov.s32        d25, d24<br>
+    vmla.s32        q9, q12, q11<br>
+<br>
+    vmovl.s16       q11, d6<br>
+    vld1.s32        d24, [r12]!<br>
+    vmov.s32        d25, d24<br>
+    vmla.s32        q9, q12, q11<br>
+<br>
+    vmovl.s16       q11, d7<br>
+    vld1.s32        d24, [r12]!<br>
+    vmov.s32        d25, d24<br>
+    vmla.s32        q9, q12, q11<br>
+<br>
+<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #12<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u32        d0[0], [r2], r3<br>
+<br>
+    add             r0, r1<br>
+    subs            r4, #1<br>
+    bne             .loop_vsp_4x\h<br>
+    pop             {r4, r5, r6}<br>
+    bx              lr<br>
+    .ltorg<br>
+endfunc<br>
+.endm<br>
+<br>
+LUMA_VSP_4xN 4<br>
+LUMA_VSP_4xN 8<br>
+LUMA_VSP_4xN 16<br>
+<br>
+.macro qpel_filter_0_32b_1<br>
+    vmov.i16        d17, #64<br>
+    vmull.s16       q9, d6, d17    // 64*d0<br>
+    vmull.s16       q10, d7, d17   // 64*d1<br>
+.endm<br>
+<br>
+.macro qpel_filter_1_32b_1<br>
+    vmov.i16        d16, #58<br>
+    vmov.i16        d17, #10<br>
+    vmull.s16       q9, d6, d16    // 58 * d0<br>
+    vmull.s16       q10, d7, d16   // 58 * d1<br>
+    vmov.i16        d16, #17<br>
+    vmull.s16       q11, d4, d17   // 10 * c0<br>
+    vmull.s16       q12, d5, d17   // 10 * c1<br>
+    vmov.i16        d17, #5<br>
+    vmull.s16       q13, d8, d16   // 17 * e0<br>
+    vmull.s16       q14, d9, d16   // 17 * e1<br>
+    vmull.s16       q15, d10, d17  //  5 * f0<br>
+    vmull.s16       q8, d11, d17   //  5 * f1<br>
+    vsub.s32        q9, q11        // 58 * d0 - 10 * c0<br>
+    vsub.s32        q10, q12       // 58 * d1 - 10 * c1<br>
+    vshll.s16       q11, d2, #2    // 4 * b0<br>
+    vshll.s16       q12, d3, #2    // 4 * b1<br>
+    vadd.s32        q9, q13        // 58 * d0 - 10 * c0 + 17 * e0<br>
+    vadd.s32        q10, q14       // 58 * d1 - 10 * c1 + 17 * e1<br>
+    vsubl.s16       q13, d12, d0   // g0 - a0<br>
+    vsubl.s16       q14, d13, d1   // g1 - a1<br>
+    vadd.s32        q9, q11        // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0<br>
+    vadd.s32        q10, q12       // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1<br>
+    vsub.s32        q13, q15       // g0 - a0 - 5 * f0<br>
+    vsub.s32        q14, q8        // g1 - a1 - 5 * f1<br>
+    vadd.s32        q9, q13        // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + g0 - a0 - 5 * f0<br>
+    vadd.s32        q10, q14       // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + g1 - a1 - 5 * f1<br>
+.endm<br>
+<br>
+.macro qpel_filter_2_32b_1<br>
+    vmov.i32        q8, #11<br>
+    vaddl.s16       q9, d6, d8    // d0 + e0<br>
+    vaddl.s16       q10, d7, d9   // d1 + e1<br>
+    vaddl.s16       q11, d4, d10  // c0 + f0<br>
+    vaddl.s16       q12, d5, d11  // c1 + f1<br>
+    vmul.s32        q11, q8       // 11 * (c0 + f0)<br>
+    vmul.s32        q12, q8       // 11 * (c1 + f1)<br>
+    vmov.i32        q8, #40<br>
+    vaddl.s16       q15, d2, d12  // b0 + g0<br>
+    vmul.s32        q9, q8        // 40 * (d0 + e0)<br>
+    vmul.s32        q10, q8       // 40 * (d1 + e1)<br>
+    vaddl.s16       q8, d3, d13   // b1 + g1<br>
+    vaddl.s16       q13, d0, d14  // a0 + h0<br>
+    vaddl.s16       q14, d1, d15  // a1 + h1<br>
+    vshl.s32        q15, #2       // 4*(b0+g0)<br>
+    vshl.s32        q8, #2        // 4*(b1+g1)<br>
+    vadd.s32        q11, q13      // 11 * (c0 + f0) + a0 + h0<br>
+    vadd.s32        q12, q14      // 11 * (c1 + f1) + a1 + h1<br>
+    vadd.s32        q9, q15       // 40 * (d0 + e0) + 4*(b0+g0)<br>
+    vadd.s32        q10, q8       // 40 * (d1 + e1) + 4*(b1+g1)<br>
+    vsub.s32        q9, q11       // 40 * (d0 + e0) + 4*(b0+g0) - (11 * (c0 + f0) + a0 + h0)<br>
+    vsub.s32        q10, q12      // 40 * (d1 + e1) + 4*(b1+g1) - (11 * (c1 + f1) + a1 + h1)<br>
+.endm<br>
+<br>
+.macro qpel_filter_3_32b_1<br>
+    vmov.i16        d16, #17<br>
+    vmov.i16        d17, #5<br>
+    vmull.s16       q9, d6, d16   // 17 * d0<br>
+    vmull.s16       q10, d7, d16  // 17 * d1<br>
+    vmull.s16       q11, d4, d17  // 5 * c0<br>
+    vmull.s16       q12, d5, d17  // 5* c1<br>
+    vmov.i16        d16, #58<br>
+    vmull.s16       q13, d8, d16  // 58 * e0<br>
+    vmull.s16       q14, d9, d16  // 58 * e1<br>
+    vmov.i16        d17, #10<br>
+    vmull.s16       q15, d10, d17 // 10 * f0<br>
+    vmull.s16       q8, d11, d17  // 10 * f1<br>
+    vsub.s32        q9, q11       // 17 * d0 - 5 * c0<br>
+    vsub.s32        q10, q12      // 17 * d1 - 5 * c1<br>
+    vshll.s16       q11, d12, #2  // 4 * g0<br>
+    vshll.s16       q12, d13, #2  // 4 * g1<br>
+    vadd.s32        q9, q13       // 17 * d0 - 5 * c0+ 58 * e0<br>
+    vadd.s32        q10, q14      // 17 * d1 - 5 * c1 + 58 * e1<br>
+    vsubl.s16       q13, d2, d14  // b0 - h0<br>
+    vsubl.s16       q14, d3, d15  // b1 - h1<br>
+    vadd.s32        q9, q11       // 17 * d0 - 5 * c0+ 58 * e0 +4 * g0<br>
+    vadd.s32        q10, q12      // 17 * d1 - 5 * c1 + 58 * e1+4 * g1<br>
+    vsub.s32        q13, q15      // 17 * d0 - 5 * c0+ 58 * e0 +4 * g0 -10 * f0<br>
+    vsub.s32        q14, q8       // 17 * d1 - 5 * c1 + 58 * e1+4 * g1 - 10*f1<br>
+    vadd.s32        q9, q13       //  17 * d0 - 5 * c0+ 58 * e0 +4 * g0 -10 * f0 +b0 - h0<br>
+    vadd.s32        q10, q14      // 17 * d1 - 5 * c1 + 58 * e1+4 * g1 - 10*f1 + b1 - h1<br>
+.endm<br>
+<br>
+.macro FILTER_VSP a b filterv<br>
+<br>
+    vpush           { q4 - q7}<br>
+.loop_\filterv\()_\a\()x\b:<br>
+<br>
+    mov             r7, r2<br>
+    mov             r6, r0<br>
+    eor             r8, r8<br>
+<br>
+.loop_w8_\filterv\()_\a\()x\b:<br>
+<br>
+    add             r6, r0, r8<br>
+<br>
+    pld [r6]<br>
+    vld1.u16         {q0}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q1}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q2}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q3}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q4}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q5}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q6}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q7}, [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+    veor.u8         q10, q10<br>
+<br>
+   \filterv<br>
+<br>
+    mov             r12,#1<br>
+    lsl             r12, #19<br>
+    add             r12, #2048<br>
+    vdup.32         q8, r12<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #12<br>
+    vadd.s32        q10, q8<br>
+    vqshrun.s32     d1, q10, #12<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u8         d0, [r7]!<br>
+<br>
+<br>
+    add             r8, #16<br>
+    mov             r12, #\a<br>
+    lsl             r12, #1<br>
+    cmp             r8, r12<br>
+    blt             .loop_w8_\filterv\()_\a\()x\b<br>
+<br>
+    add             r0, r1<br>
+    add             r2, r3<br>
+    subs            r4, #1<br>
+    bne             .loop_\filterv\()_\a\()x\b<br>
+<br>
+    vpop            { q4 - q7}<br>
+<br>
+.endm<br>
+<br>
+.macro LUMA_VSP  w h<br>
+function x265_interp_8tap_vert_sp_\w\()x\h\()_neon<br>
+<br>
+    push            {r4, r5, r6, r7, r8}<br>
+    ldr             r5, [sp, #4 * 5]<br>
+    lsl             r1, #1<br>
+    mov             r4, r1, lsl #2<br>
+    sub             r4, r1<br>
+    sub             r0, r4<br>
+    mov             r4, #\h<br>
+<br>
+    cmp             r5, #0<br>
+    beq              0f<br>
+    cmp             r5, #1<br>
+    beq              1f<br>
+    cmp             r5, #2<br>
+    beq              2f<br>
+    cmp             r5, #3<br>
+    beq              3f<br>
+0:<br>
+    FILTER_VSP  \w \h qpel_filter_0_32b_1<br>
+    b            5f<br>
+1:<br>
+    FILTER_VSP  \w \h qpel_filter_1_32b_1<br>
+    b            5f<br>
+2:<br>
+    FILTER_VSP  \w \h qpel_filter_2_32b_1<br>
+    b            5f<br>
+3:<br>
+    FILTER_VSP  \w \h qpel_filter_3_32b_1<br>
+    b            5f<br>
+5:<br>
+    pop             {r4, r5, r6, r7, r8}<br>
+    bx              lr<br>
+endfunc<br>
+.endm<br>
+<br>
+<br>
+LUMA_VSP 8 4<br>
+LUMA_VSP 8 8<br>
+LUMA_VSP 8 16<br>
+LUMA_VSP 8 32<br>
+LUMA_VSP 16 4<br>
+LUMA_VSP 16 8<br>
+LUMA_VSP 16 16<br>
+LUMA_VSP 16 32<br>
+LUMA_VSP 16 64<br>
+LUMA_VSP 16 12<br>
+LUMA_VSP 32 8<br>
+LUMA_VSP 32 16<br>
+LUMA_VSP 32 32<br>
+LUMA_VSP 32 64<br>
+LUMA_VSP 32 24<br>
+LUMA_VSP 64 16<br>
+LUMA_VSP 64 32<br>
+LUMA_VSP 64 64<br>
+LUMA_VSP 64 48<br>
+LUMA_VSP 24 32<br>
+LUMA_VSP 48 64<br>
+<br>
+function x265_interp_8tap_vert_sp_12x16_neon<br>
+    push            {r4, r5, r6, r7}<br>
+    ldr             r5, [sp, #4 * 4]<br>
+    lsl             r1, #1<br>
+    mov             r4, r1, lsl #2<br>
+    sub             r4, r1<br>
+    sub             r0, r4<br>
+<br>
+    mov             r4, #16<br>
+    vpush           { q4 - q7}<br>
+.loop1_12x16:<br>
+<br>
+    mov             r6, r0<br>
+    mov             r7, r2<br>
+<br>
+    pld [r6]<br>
+    vld1.u16         {q0}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q1}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8          {q2}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q3}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q4}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q5}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q6}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q7}, [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+    veor.u8         q10, q10<br>
+<br>
+    cmp             r5,#0<br>
+    beq              0f<br>
+    cmp             r5,#1<br>
+    beq              1f<br>
+    cmp             r5,#2<br>
+    beq              2f<br>
+    cmp             r5,#3<br>
+    beq              3f<br>
+0:<br>
+    qpel_filter_0_32b_1<br>
+    b            5f<br>
+1:<br>
+    qpel_filter_1_32b_1<br>
+    b            5f<br>
+2:<br>
+    qpel_filter_2_32b_1<br>
+    b            5f<br>
+3:<br>
+    qpel_filter_3_32b_1<br>
+    b            5f<br>
+5:<br>
+    mov             r12,#1<br>
+    lsl             r12, #19<br>
+    add             r12, #2048<br>
+    vdup.32         q8, r12<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #12<br>
+    vadd.s32        q10, q8<br>
+    vqshrun.s32     d1, q10, #12<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u8         d0, [r7]!<br>
+<br>
+    add             r6, r0, #16<br>
+<br>
+    pld [r6]<br>
+    vld1.u16         {q0}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q1}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8          {q2}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q3}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q4}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q5}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q6}, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u16         {q7}, [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+    veor.u8         q10, q10<br>
+<br>
+    cmp             r5,#0<br>
+    beq              0f<br>
+    cmp             r5,#1<br>
+    beq              1f<br>
+    cmp             r5,#2<br>
+    beq              2f<br>
+    cmp             r5,#3<br>
+    beq              3f<br>
+0:<br>
+    qpel_filter_0_32b_1<br>
+    b            5f<br>
+1:<br>
+    qpel_filter_1_32b_1<br>
+    b            5f<br>
+2:<br>
+    qpel_filter_2_32b_1<br>
+    b            5f<br>
+3:<br>
+    qpel_filter_3_32b_1<br>
+    b            5f<br>
+5:<br>
+    mov             r12,#1<br>
+    lsl             r12, #19<br>
+    add             r12, #2048<br>
+    vdup.32         q8, r12<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #12<br>
+    vadd.s32        q10, q8<br>
+    vqshrun.s32     d1, q10, #12<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u32        d0[0], [r7]!<br>
+<br>
+    add             r0, r1<br>
+    add             r2, r3<br>
+    subs            r4, #1<br>
+    bne             .loop1_12x16<br>
+    vpop            { q4 - q7}<br>
+    pop             {r4, r5, r6, r7}<br>
+    bx              lr<br>
+endfunc<br>
+<br>
diff -r e5c08206b8bf -r a9014e51d47e source/common/arm/ipfilter8.h<br>
--- a/source/common/arm/ipfilter8.h     Wed Mar 16 16:52:23 2016 +0530<br>
+++ b/source/common/arm/ipfilter8.h     Tue Mar 22 11:10:43 2016 +0530<br>
@@ -76,4 +76,30 @@<br>
 void x265_interp_8tap_vert_pp_24x32_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
 void x265_interp_8tap_vert_pp_48x64_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
 void x265_interp_8tap_vert_pp_12x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+<br>
+void x265_interp_8tap_vert_sp_4x4_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_4x8_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_4x16_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_8x4_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_8x8_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_8x16_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_8x32_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_16x4_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_16x8_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_16x16_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_16x32_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_16x64_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_16x12_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_32x8_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_32x16_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_32x32_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_32x64_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_32x24_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_64x16_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_64x32_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_64x64_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_64x48_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_24x32_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_48x64_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_sp_12x16_neon(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
 #endif // ifndef X265_IPFILTER8_ARM_H<br>
_______________________________________________<br>
x265-devel mailing list<br>
<a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
<a href="https://mailman.videolan.org/listinfo/x265-devel" rel="noreferrer" target="_blank">https://mailman.videolan.org/listinfo/x265-devel</a><br>
</blockquote></div><br><br clear="all"><div><br></div>-- <br><div class="gmail_signature"><div dir="ltr"><div><div>Deepthi Nandakumar<br></div>Engineering Manager, x265<br></div>Multicoreware, Inc<br></div></div>
</div>