<div dir="ltr">Please ignore the previous patch. Below is the final version. <br># HG changeset patch<br># User Ramya Sriraman<<a href="mailto:ramya@multicorewareinc.com">ramya@multicorewareinc.com</a>><br># Date 1458127343 -19800<br>#      Wed Mar 16 16:52:23 2016 +0530<br># Node ID e5c08206b8bfcce3a808b8f14848953c9cf51ce7<br># Parent  b09998b1256ed8e652c4bf2d688cbfab3a84d5cd<br>arm: Implement interp_8tap_vert_pp_NxN NEON<br><br>diff -r b09998b1256e -r e5c08206b8bf source/common/arm/asm-primitives.cpp<br>--- a/source/common/arm/asm-primitives.cpp    Wed Mar 16 14:24:48 2016 +0530<br>+++ b/source/common/arm/asm-primitives.cpp    Wed Mar 16 16:52:23 2016 +0530<br>@@ -302,6 +302,32 @@<br>         // planecopy<br>         p.planecopy_cp = PFX(pixel_planecopy_cp_neon);<br> <br>+        // vertical interpolation filters<br>+        p.pu[LUMA_4x4].luma_vpp     = PFX(interp_8tap_vert_pp_4x4_neon);<br>+        p.pu[LUMA_4x8].luma_vpp     = PFX(interp_8tap_vert_pp_4x8_neon);<br>+        p.pu[LUMA_4x16].luma_vpp    = PFX(interp_8tap_vert_pp_4x16_neon);<br>+        p.pu[LUMA_8x4].luma_vpp     = PFX(interp_8tap_vert_pp_8x4_neon);<br>+        p.pu[LUMA_8x8].luma_vpp     = PFX(interp_8tap_vert_pp_8x8_neon);<br>+        p.pu[LUMA_8x16].luma_vpp    = PFX(interp_8tap_vert_pp_8x16_neon);<br>+        p.pu[LUMA_8x32].luma_vpp    = PFX(interp_8tap_vert_pp_8x32_neon);<br>+        p.pu[LUMA_16x4].luma_vpp    = PFX(interp_8tap_vert_pp_16x4_neon);<br>+        p.pu[LUMA_16x8].luma_vpp    = PFX(interp_8tap_vert_pp_16x8_neon);<br>+        p.pu[LUMA_16x16].luma_vpp   = PFX(interp_8tap_vert_pp_16x16_neon);<br>+        p.pu[LUMA_16x32].luma_vpp   = PFX(interp_8tap_vert_pp_16x32_neon);<br>+        p.pu[LUMA_16x64].luma_vpp   = PFX(interp_8tap_vert_pp_16x64_neon);<br>+        p.pu[LUMA_16x12].luma_vpp   = PFX(interp_8tap_vert_pp_16x12_neon);<br>+        p.pu[LUMA_32x8].luma_vpp    = PFX(interp_8tap_vert_pp_32x8_neon);<br>+        p.pu[LUMA_32x16].luma_vpp   = PFX(interp_8tap_vert_pp_32x16_neon);<br>+        p.pu[LUMA_32x32].luma_vpp   = PFX(interp_8tap_vert_pp_32x32_neon);<br>+        p.pu[LUMA_32x64].luma_vpp   = PFX(interp_8tap_vert_pp_32x64_neon);<br>+        p.pu[LUMA_32x24].luma_vpp   = PFX(interp_8tap_vert_pp_32x24_neon);<br>+        p.pu[LUMA_64x16].luma_vpp   = PFX(interp_8tap_vert_pp_64x16_neon);<br>+        p.pu[LUMA_64x32].luma_vpp   = PFX(interp_8tap_vert_pp_64x32_neon);<br>+        p.pu[LUMA_64x64].luma_vpp   = PFX(interp_8tap_vert_pp_64x64_neon);<br>+        p.pu[LUMA_64x48].luma_vpp   = PFX(interp_8tap_vert_pp_64x48_neon);<br>+        p.pu[LUMA_24x32].luma_vpp   = PFX(interp_8tap_vert_pp_24x32_neon);<br>+        p.pu[LUMA_48x64].luma_vpp   = PFX(interp_8tap_vert_pp_48x64_neon);<br>+        p.pu[LUMA_12x16].luma_vpp   = PFX(interp_8tap_vert_pp_12x16_neon);<br>     }<br>     if (cpuMask & X265_CPU_ARMV6)<br>     {<br>diff -r b09998b1256e -r e5c08206b8bf source/common/arm/ipfilter8.S<br>--- a/source/common/arm/ipfilter8.S    Wed Mar 16 14:24:48 2016 +0530<br>+++ b/source/common/arm/ipfilter8.S    Wed Mar 16 16:52:23 2016 +0530<br>@@ -24,8 +24,13 @@<br> #include "asm.S"<br> <br> .section .rodata<br>+.align 4<br> <br>-.align 4<br>+g_lumaFilter:<br>+.word 0,0,0,0,0,0,64,64,0,0,0,0,0,0,0,0<br>+.word -1,-1,4,4,-10,-10,58,58,17,17,-5,-5,1,1,0,0<br>+.word -1,-1,4,4,-11,-11,40,40,40,40,-11,-11,4,4,-1,-1<br>+.word 0,0,1,1,-5,-5,17,17,58,58,-10,-10,4,4,-1,-1 <br> <br> .text<br> <br>@@ -692,3 +697,475 @@<br>     bgt         .loop_filterP2S_48x64<br>     bx          lr<br> endfunc<br>+<br>+.macro qpel_filter_0_32b<br>+    vmov.i16        d17, #64<br>+    vmovl.u8        q11, d3<br>+    vmull.s16       q9, d22, d17    // 64*d0<br>+    vmull.s16       q10, d23, d17   // 64*d1<br>+.endm<br>+<br>+.macro qpel_filter_1_32b<br>+    vmov.i16        d16, #58<br>+    vmovl.u8        q11, d3<br>+    vmull.s16       q9, d22, d16        // 58 * d0<br>+    vmull.s16       q10, d23, d16       // 58 * d1<br>+<br>+    vmov.i16        d17, #10<br>+    vmovl.u8        q13, d2<br>+    vmull.s16       q11, d26, d17       // 10 * c0<br>+    vmull.s16       q12, d27, d17       // 10 * c1<br>+<br>+    vmov.i16        d16, #17<br>+    vmovl.u8        q15, d4<br>+    vmull.s16       q13, d30, d16       // 17 * e0<br>+    vmull.s16       q14, d31, d16       // 17 * e1<br>+<br>+    vmov.i16        d17, #5<br>+    vmovl.u8        q1, d5<br>+    vmull.s16       q15, d2, d17        //  5 * f0<br>+    vmull.s16       q8, d3, d17         //  5 * f1<br>+<br>+    vsub.s32        q9, q11             // 58 * d0 - 10 * c0<br>+    vsub.s32        q10, q12            // 58 * d1 - 10 * c1<br>+<br>+    vmovl.u8       q1, d1<br>+    vshll.s16      q11, d2, #2         // 4 * b0<br>+    vshll.s16      q12, d3, #2         // 4 * b1<br>+<br>+    vadd.s32       q9, q13             // 58 * d0 - 10 * c0 + 17 * e0<br>+    vadd.s32       q10, q14            // 58 * d1 - 10 * c1 + 17 * e1<br>+<br>+    vmovl.u8       q1, d0<br>+    vmovl.u8       q2, d6<br>+    vsubl.s16      q13, d4, d2         // g0 - a0<br>+    vsubl.s16      q14, d5, d3         // g1 - a1<br>+<br>+    vadd.s32       q9, q11             // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0<br>+    vadd.s32       q10, q12            // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1<br>+    vsub.s32       q13, q15            // g0 - a0 - 5 * f0<br>+    vsub.s32       q14, q8             // g1 - a1 - 5 * f1<br>+    vadd.s32       q9, q13             // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + g0 - a0 - 5 * f0<br>+    vadd.s32       q10, q14            // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + g1 - a1 - 5 * f1<br>+.endm<br>+<br>+.macro qpel_filter_2_32b<br>+    vmov.i32        q8, #11<br>+    vmovl.u8        q11, d3<br>+    vmovl.u8        q12, d4<br>+    vaddl.s16       q9, d22,d24        // d0 + e0<br>+    vaddl.s16       q10, d23, d25      // d1 + e1<br>+<br>+    vmovl.u8        q13, d2            //c<br>+    vmovl.u8        q14, d5            //f<br>+    vaddl.s16       q11, d26, d28      // c0 + f0<br>+    vaddl.s16       q12, d27, d29      // c1 + f1<br>+<br>+    vmul.s32        q11, q8            // 11 * (c0 + f0)<br>+    vmul.s32        q12, q8            // 11 * (c1 + f1)<br>+<br>+    vmov.i32        q8, #40<br>+    vmul.s32        q9, q8             // 40 * (d0 + e0)<br>+    vmul.s32        q10, q8            // 40 * (d1 + e1)<br>+<br>+    vmovl.u8        q13, d1            //b<br>+    vmovl.u8        q14, d6            //g<br>+    vaddl.s16       q15, d26, d28      // b0 + g0<br>+    vaddl.s16       q8, d27, d29       // b1 + g1<br>+<br>+    vmovl.u8        q1, d0             //a<br>+    vmovl.u8        q2, d7             //h<br>+    vaddl.s16       q13, d2, d4        // a0 + h0<br>+    vaddl.s16       q14, d3, d5        // a1 + h1<br>+<br>+    vshl.s32        q15, #2            // 4*(b0+g0)<br>+    vshl.s32        q8, #2             // 4*(b1+g1)<br>+<br>+    vadd.s32        q11, q13           // 11 * (c0 + f0) + a0 + h0<br>+    vadd.s32        q12, q14           // 11 * (c1 + f1) + a1 + h1<br>+    vadd.s32        q9, q15            // 40 * (d0 + e0) + 4*(b0+g0)<br>+    vadd.s32        q10, q8            // 40 * (d1 + e1) + 4*(b1+g1)<br>+    vsub.s32        q9, q11            // 40 * (d0 + e0) + 4*(b0+g0) - (11 * (c0 + f0) + a0 + h0)<br>+    vsub.s32        q10, q12           // 40 * (d1 + e1) + 4*(b1+g1) - (11 * (c1 + f1) + a1 + h1)<br>+.endm<br>+<br>+.macro qpel_filter_3_32b<br>+<br>+    vmov.i16        d16, #17<br>+    vmov.i16        d17, #5<br>+<br>+    vmovl.u8        q11, d3<br>+    vmull.s16       q9, d22, d16       // 17 * d0<br>+    vmull.s16       q10, d23, d16      // 17 * d1<br>+<br>+    vmovl.u8        q13, d2<br>+    vmull.s16       q11, d26, d17      // 5 * c0<br>+    vmull.s16       q12, d27, d17      // 5* c1<br>+<br>+    vmov.i16        d16, #58<br>+    vmovl.u8        q15, d4<br>+    vmull.s16       q13, d30, d16      // 58 * e0<br>+    vmull.s16       q14, d31, d16      // 58 * e1<br>+<br>+    vmov.i16        d17, #10<br>+    vmovl.u8        q1, d5<br>+    vmull.s16       q15, d2, d17       // 10 * f0<br>+    vmull.s16       q8, d3, d17        // 10 * f1<br>+<br>+    vsub.s32        q9, q11            // 17 * d0 - 5 * c0<br>+    vsub.s32        q10, q12           // 17 * d1 - 5 * c1<br>+<br>+    vmovl.u8        q1, d6<br>+    vshll.s16       q11, d2, #2        // 4 * g0<br>+    vshll.s16       q12, d3, #2        // 4 * g1<br>+<br>+    vadd.s32        q9, q13            // 17 * d0 - 5 * c0+ 58 * e0<br>+    vadd.s32        q10, q14           // 17 * d1 - 5 * c1 + 58 * e1<br>+<br>+    vmovl.u8        q1, d1<br>+    vmovl.u8        q2, d7<br>+    vsubl.s16      q13, d2, d4         // b0 - h0<br>+    vsubl.s16      q14, d3, d5         // b1 - h1<br>+<br>+    vadd.s32        q9, q11            // 17 * d0 - 5 * c0+ 58 * e0 +4 * g0<br>+    vadd.s32        q10, q12           // 17 * d1 - 5 * c1 + 58 * e1+4 * g1<br>+    vsub.s32        q13, q15           // 17 * d0 - 5 * c0+ 58 * e0 +4 * g0 -10 * f0<br>+    vsub.s32        q14, q8            // 17 * d1 - 5 * c1 + 58 * e1+4 * g1 - 10*f1<br>+    vadd.s32        q9, q13            //  17 * d0 - 5 * c0+ 58 * e0 +4 * g0 -10 * f0 +b0 - h0<br>+    vadd.s32        q10, q14           // 17 * d1 - 5 * c1 + 58 * e1+4 * g1 - 10*f1 + b1 - h1<br>+.endm<br>+<br>+.macro FILTER_VPP a b filterv<br>+<br>+.loop_\filterv\()_\a\()x\b:<br>+<br>+    mov             r7, r2<br>+    mov             r6, r0<br>+    eor             r8, r8<br>+<br>+.loop_w8_\filterv\()_\a\()x\b:<br>+<br>+    add             r6, r0, r8<br>+<br>+    pld [r6]<br>+    vld1.u8         d0, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d1, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d2, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d3, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d4, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d5, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d6, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d7, [r6], r1<br>+<br>+    veor.u8         q9, q9<br>+    veor.u8         q10, q10<br>+<br>+   \filterv<br>+<br>+    mov             r12,#32<br>+    vdup.32         q8, r12<br>+    vadd.s32        q9, q8<br>+    vqshrun.s32     d0, q9, #6<br>+    vadd.s32        q10, q8<br>+    vqshrun.s32     d1, q10, #6<br>+    vqmovn.u16      d0, q0<br>+    vst1.u8         d0, [r7]!<br>+<br>+    add             r8, #8<br>+    cmp             r8, #\a<br>+    blt             .loop_w8_\filterv\()_\a\()x\b<br>+<br>+    add             r0, r1<br>+    add             r2, r3<br>+    subs            r4, #1<br>+    bne             .loop_\filterv\()_\a\()x\b <br>+<br>+.endm <br>+<br>+.macro LUMA_VPP  w h<br>+function x265_interp_8tap_vert_pp_\w\()x\h\()_neon<br>+<br>+    push            {r4, r5, r6, r7, r8}<br>+    ldr             r5, [sp, #4 * 5]<br>+    mov             r4, r1, lsl #2<br>+    sub             r4, r1<br>+    sub             r0, r4<br>+    mov             r4, #\h<br>+<br>+    cmp             r5, #0<br>+    beq              0f<br>+    cmp             r5, #1<br>+    beq              1f<br>+    cmp             r5, #2<br>+    beq              2f<br>+    cmp             r5, #3<br>+    beq              3f<br>+0:<br>+    FILTER_VPP  \w \h qpel_filter_0_32b<br>+    b            5f<br>+1:<br>+    FILTER_VPP  \w \h qpel_filter_1_32b<br>+    b            5f<br>+2:<br>+    FILTER_VPP  \w \h qpel_filter_2_32b<br>+    b            5f<br>+3:<br>+    FILTER_VPP  \w \h qpel_filter_3_32b<br>+    b            5f<br>+5:<br>+    pop             {r4, r5, r6, r7, r8}<br>+    bx              lr<br>+endfunc<br>+.endm<br>+<br>+LUMA_VPP 8 4<br>+LUMA_VPP 8 8<br>+LUMA_VPP 8 16<br>+LUMA_VPP 8 32<br>+LUMA_VPP 16 4<br>+LUMA_VPP 16 8<br>+LUMA_VPP 16 16<br>+LUMA_VPP 16 32<br>+LUMA_VPP 16 64<br>+LUMA_VPP 16 12<br>+LUMA_VPP 32 8<br>+LUMA_VPP 32 16<br>+LUMA_VPP 32 32<br>+LUMA_VPP 32 64<br>+LUMA_VPP 32 24<br>+LUMA_VPP 64 16<br>+LUMA_VPP 64 32<br>+LUMA_VPP 64 64<br>+LUMA_VPP 64 48<br>+LUMA_VPP 24 32<br>+LUMA_VPP 48 64<br>+<br>+.macro LUMA_VPP_4xN h<br>+function x265_interp_8tap_vert_pp_4x\h\()_neon<br>+    push           {r4, r5, r6}<br>+    ldr             r4, [sp, #4 * 3]<br>+    mov             r5, r4, lsl #6<br>+    mov             r4, r1, lsl #2<br>+    sub             r4, r1<br>+    sub             r0, r4<br>+<br>+    mov             r4, #32<br>+    vdup.32         q8, r4<br>+    mov             r4, #\h<br>+<br>+.loop_4x\h:<br>+    movrel          r12, g_lumaFilter<br>+    add             r12, r5<br>+    mov             r6, r0<br>+<br>+    pld [r6]<br>+    vld1.u32        d0[0], [r6], r1<br>+    pld [r6]<br>+    vld1.u32        d0[1], [r6], r1<br>+    pld [r6]<br>+    vld1.u32        d1[0], [r6], r1<br>+    pld [r6]<br>+    vld1.u32        d1[1], [r6], r1<br>+    pld [r6]<br>+    vld1.u32        d2[0], [r6], r1<br>+    pld [r6]<br>+    vld1.u32        d2[1], [r6], r1<br>+    pld [r6]<br>+    vld1.u32        d3[0], [r6], r1<br>+    pld [r6]<br>+    vld1.u32        d3[1], [r6], r1<br>+<br>+    veor.u8         q9, q9<br>+<br>+    vmovl.u8        q11, d0<br>+    vmovl.u16       q12, d22<br>+    vmovl.u16       q13, d23<br>+    vld1.s32        d20, [r12]!<br>+    vmov.s32        d21, d20<br>+    vmla.s32        q9, q12, q10<br>+    vld1.s32        d20, [r12]!<br>+    vmov.s32        d21, d20<br>+    vmla.s32        q9, q13, q10<br>+<br>+    vmovl.u8        q11, d1<br>+    vmovl.u16       q12, d22<br>+    vmovl.u16       q13, d23<br>+    vld1.s32        d20, [r12]!<br>+    vmov.s32        d21, d20<br>+    vmla.s32        q9, q12, q10<br>+    vld1.s32        d20, [r12]!<br>+    vmov.s32        d21, d20<br>+    vmla.s32        q9, q13, q10<br>+<br>+    vmovl.u8        q11, d2<br>+    vmovl.u16       q12, d22<br>+    vmovl.u16       q13, d23<br>+    vld1.s32        d20, [r12]!<br>+    vmov.s32        d21, d20<br>+    vmla.s32        q9, q12, q10<br>+    vld1.s32        d20, [r12]!<br>+    vmov.s32        d21, d20<br>+    vmla.s32        q9, q13, q10<br>+<br>+    vmovl.u8        q11, d3<br>+    vmovl.u16       q12, d22<br>+    vmovl.u16       q13, d23<br>+    vld1.s32        d20, [r12]!<br>+    vmov.s32        d21, d20<br>+    vmla.s32        q9, q12, q10<br>+    vld1.s32        d20, [r12]!<br>+    vmov.s32        d21, d20<br>+    vmla.s32        q9, q13, q10<br>+<br>+    vadd.s32        q9, q8<br>+    vqshrun.s32     d0, q9, #6<br>+    vqmovn.u16      d0, q0<br>+    vst1.u32        d0[0], [r2], r3<br>+<br>+    add             r0, r1<br>+    subs            r4, #1<br>+    bne             .loop_4x\h<br>+<br>+    pop             {r4, r5, r6}<br>+    bx              lr<br>+endfunc<br>+.endm<br>+<br>+LUMA_VPP_4xN 4<br>+LUMA_VPP_4xN 8<br>+LUMA_VPP_4xN 16<br>+<br>+function x265_interp_8tap_vert_pp_12x16_neon<br>+    push            {r4, r5, r6, r7}<br>+    ldr             r5, [sp, #4 * 4]<br>+    mov             r4, r1, lsl #2<br>+    sub             r4, r1<br>+    sub             r0, r4<br>+<br>+    mov             r4, #16<br>+.loop_12x16:<br>+<br>+    mov             r6, r0<br>+    mov             r7, r2<br>+<br>+    pld [r6]<br>+    vld1.u8         d0, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d1, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d2, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d3, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d4, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d5, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d6, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d7, [r6], r1<br>+<br>+    veor.u8         q9, q9<br>+    veor.u8         q10, q10<br>+<br>+    cmp             r5,#0<br>+    beq              0f<br>+    cmp             r5,#1<br>+    beq              1f<br>+    cmp             r5,#2<br>+    beq              2f<br>+    cmp             r5,#3<br>+    beq              3f<br>+1:<br>+    qpel_filter_1_32b<br>+    b            5f<br>+2:<br>+    qpel_filter_2_32b<br>+    b            5f<br>+3:<br>+    qpel_filter_3_32b<br>+    b            5f<br>+0:<br>+    vmov.i16        d17, #64<br>+    vmovl.u8        q11, d3<br>+    vmull.s16       q9, d22, d17    // 64*d0<br>+    vmull.s16       q10, d23, d17   // 64*d1<br>+5:<br>+    mov             r12,#32<br>+    vdup.32         q8, r12<br>+    vadd.s32        q9, q8<br>+    vqshrun.s32     d0, q9, #6<br>+    vadd.s32        q10, q8<br>+    vqshrun.s32     d1, q10, #6<br>+    vqmovn.u16      d0, q0<br>+    vst1.u8         d0, [r7]!<br>+<br>+    add             r6, r0, #8<br>+<br>+    pld [r6]<br>+    vld1.u8         d0, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d1, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d2, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d3, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d4, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d5, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d6, [r6], r1<br>+    pld [r6]<br>+    vld1.u8         d7, [r6], r1<br>+<br>+    veor.u8         q9, q9<br>+    veor.u8         q10, q10<br>+<br>+    cmp             r5,#0<br>+    beq              0f<br>+    cmp             r5,#1<br>+    beq              1f<br>+    cmp             r5,#2<br>+    beq              2f<br>+    cmp             r5,#3<br>+    beq              3f<br>+1:<br>+    qpel_filter_1_32b<br>+    b            5f<br>+2:<br>+    qpel_filter_2_32b<br>+    b            5f<br>+3:<br>+    qpel_filter_3_32b<br>+    b            5f<br>+0:<br>+    vmov.i16        d17, #64<br>+    vmovl.u8        q11, d3<br>+    vmull.s16       q9, d22, d17    // 64*d0<br>+    vmull.s16       q10, d23, d17   // 64*d1<br>+5:<br>+    mov             r12,#32<br>+    vdup.32         q8, r12<br>+    vadd.s32        q9, q8<br>+    vqshrun.s32     d0, q9, #6<br>+    vadd.s32        q10, q8<br>+    vqshrun.s32     d1, q10, #6<br>+    vqmovn.u16      d0, q0<br>+    vst1.u32        d0[0], [r7]!<br>+<br>+    add             r0, r1<br>+    add             r2, r3<br>+    subs            r4, #1<br>+    bne             .loop_12x16<br>+<br>+    pop             {r4, r5, r6, r7}<br>+    bx              lr<br>+endfunc<br>diff -r b09998b1256e -r e5c08206b8bf source/common/arm/ipfilter8.h<br>--- a/source/common/arm/ipfilter8.h    Wed Mar 16 14:24:48 2016 +0530<br>+++ b/source/common/arm/ipfilter8.h    Wed Mar 16 16:52:23 2016 +0530<br>@@ -51,4 +51,29 @@<br> void x265_filterPixelToShort_64x48_neon(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);<br> void x265_filterPixelToShort_64x64_neon(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);<br> <br>+void x265_interp_8tap_vert_pp_4x4_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_4x8_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_4x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_8x4_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_8x8_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_8x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_8x32_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_16x4_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_16x8_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_16x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_16x32_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_16x64_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_16x12_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_32x8_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_32x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_32x32_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_32x64_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_32x24_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_64x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_64x32_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_64x64_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_64x48_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_24x32_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_48x64_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>+void x265_interp_8tap_vert_pp_12x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br> #endif // ifndef X265_IPFILTER8_ARM_H<br><br></div><div class="gmail_extra"><br clear="all"><div><div class="gmail_signature"><div dir="ltr"><div><div dir="ltr"><div><div><span style="color:rgb(56,118,29)"><br></span></div><div><span style="color:rgb(56,118,29)">Thank you<br></span></div><span style="color:rgb(56,118,29)">Regards<br></span></div><span style="color:rgb(56,118,29)">Ramya</span><br></div></div></div></div></div>
<br><div class="gmail_quote">On Thu, Mar 17, 2016 at 11:17 AM,  <span dir="ltr"><<a href="mailto:ramya@multicorewareinc.com" target="_blank">ramya@multicorewareinc.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"># HG changeset patch<br>
# User Ramya Sriraman<<a href="mailto:ramya@multicorewareinc.com">ramya@multicorewareinc.com</a>><br>
# Date 1458127343 -19800<br>
#      Wed Mar 16 16:52:23 2016 +0530<br>
# Node ID 2f43a5b323725cc37ec7aec7e53049b84de3f905<br>
# Parent  4a2f94a592511afabd434fc6cf02a469b6d65091<br>
arm: Implement interp_8tap_vert_pp_4xn,8xn and 16xn NEON<br>
<br>
diff -r 4a2f94a59251 -r 2f43a5b32372 source/common/arm/asm-primitives.cpp<br>
--- a/source/common/arm/asm-primitives.cpp      Wed Mar 09 14:34:06 2016 +0530<br>
+++ b/source/common/arm/asm-primitives.cpp      Wed Mar 16 16:52:23 2016 +0530<br>
@@ -296,6 +296,20 @@<br>
         // planecopy<br>
         p.planecopy_cp = PFX(pixel_planecopy_cp_neon);<br>
<br>
+        // vertical interpolation filters<br>
+        p.pu[LUMA_4x4].luma_vpp     = PFX(interp_8tap_vert_pp_4x4_neon);<br>
+        p.pu[LUMA_4x8].luma_vpp     = PFX(interp_8tap_vert_pp_4x8_neon);<br>
+        p.pu[LUMA_4x16].luma_vpp    = PFX(interp_8tap_vert_pp_4x16_neon);<br>
+        p.pu[LUMA_8x4].luma_vpp     = PFX(interp_8tap_vert_pp_8x4_neon);<br>
+        p.pu[LUMA_8x8].luma_vpp     = PFX(interp_8tap_vert_pp_8x8_neon);<br>
+        p.pu[LUMA_8x16].luma_vpp    = PFX(interp_8tap_vert_pp_8x16_neon);<br>
+        p.pu[LUMA_8x32].luma_vpp    = PFX(interp_8tap_vert_pp_8x32_neon);<br>
+        p.pu[LUMA_16x4].luma_vpp    = PFX(interp_8tap_vert_pp_16x4_neon);<br>
+        p.pu[LUMA_16x8].luma_vpp    = PFX(interp_8tap_vert_pp_16x8_neon);<br>
+        p.pu[LUMA_16x16].luma_vpp   = PFX(interp_8tap_vert_pp_16x16_neon);<br>
+        p.pu[LUMA_16x32].luma_vpp   = PFX(interp_8tap_vert_pp_16x32_neon);<br>
+        p.pu[LUMA_16x64].luma_vpp   = PFX(interp_8tap_vert_pp_16x64_neon);<br>
+        p.pu[LUMA_16x12].luma_vpp   = PFX(interp_8tap_vert_pp_16x12_neon);<br>
     }<br>
     if (cpuMask & X265_CPU_ARMV6)<br>
     {<br>
diff -r 4a2f94a59251 -r 2f43a5b32372 source/common/arm/ipfilter8.S<br>
--- a/source/common/arm/ipfilter8.S     Wed Mar 09 14:34:06 2016 +0530<br>
+++ b/source/common/arm/ipfilter8.S     Wed Mar 16 16:52:23 2016 +0530<br>
@@ -24,8 +24,10 @@<br>
 #include "asm.S"<br>
<br>
 .section .rodata<br>
+.align 4<br>
<br>
-.align 4<br>
+g_lumaFilter:<br>
+.word 0,0,0,0,0,0,64,64,0,0,0,0,0,0,0,0,-1,-1,4,4,-10,-10,58,58,17,17,-5,-5,1,1,0,0,-1,-1,4,4,-11,-11,40,40,40,40,-11,-11,4,4,-1,-1,0,0,1,1,-5,-5,17,17,58,58,-10,-10,4,4,-1,-1<br>
<br>
 .text<br>
<br>
@@ -692,3 +694,436 @@<br>
     bgt         .loop_filterP2S_48x64<br>
     bx          lr<br>
 endfunc<br>
+<br>
+<br>
+<br>
+.macro qpel_filter_1_32b<br>
+    vmov.i16        d16, #58<br>
+    vmovl.u8        q11, d3<br>
+    vmull.s16       q9, d22, d16        // 58 * d0<br>
+    vmull.s16       q10, d23, d16       // 58 * d1<br>
+<br>
+    vmov.i16        d17, #10<br>
+    vmovl.u8        q13, d2<br>
+    vmull.s16       q11, d26, d17       // 10 * c0<br>
+    vmull.s16       q12, d27, d17       // 10 * c1<br>
+<br>
+    vmov.i16        d16, #17<br>
+    vmovl.u8        q15, d4<br>
+    vmull.s16       q13, d30, d16       // 17 * e0<br>
+    vmull.s16       q14, d31, d16       // 17 * e1<br>
+<br>
+    vmov.i16        d17, #5<br>
+    vmovl.u8        q1, d5<br>
+    vmull.s16       q15, d2, d17        //  5 * f0<br>
+    vmull.s16       q8, d3, d17         //  5 * f1<br>
+<br>
+    vsub.s32        q9, q11             // 58 * d0 - 10 * c0<br>
+    vsub.s32        q10, q12            // 58 * d1 - 10 * c1<br>
+<br>
+     vmovl.u8       q1, d1<br>
+     vshll.s16      q11, d2, #2        // 4 * b0<br>
+     vshll.s16      q12, d3, #2        // 4 * b1<br>
+<br>
+     vadd.s32       q9, q13            // 58 * d0 - 10 * c0 + 17 * e0<br>
+     vadd.s32       q10, q14           // 58 * d1 - 10 * c1 + 17 * e1<br>
+<br>
+     vmovl.u8       q1, d0<br>
+     vmovl.u8       q2, d6<br>
+     vsubl.s16      q13, d4, d2        // g0 - a0<br>
+     vsubl.s16      q14, d5, d3        // g1 - a1<br>
+<br>
+     vadd.s32       q9, q11            // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0<br>
+     vadd.s32       q10, q12           // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1<br>
+     vsub.s32       q13, q15           // g0 - a0 - 5 * f0<br>
+     vsub.s32       q14, q8            // g1 - a1 - 5 * f1<br>
+     vadd.s32       q9, q13            // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + g0 - a0 - 5 * f0<br>
+     vadd.s32       q10, q14           // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + g1 - a1 - 5 * f1<br>
+.endm<br>
+<br>
+.macro qpel_filter_2_32b<br>
+     vmov.i32        q8, #11<br>
+     vmovl.u8        q11, d3<br>
+     vmovl.u8        q12, d4<br>
+     vaddl.s16       q9, d22,d24        // d0 + e0<br>
+     vaddl.s16       q10, d23, d25      // d1 + e1<br>
+<br>
+     vmovl.u8        q13, d2            //c<br>
+     vmovl.u8        q14, d5            //f<br>
+     vaddl.s16       q11, d26, d28      // c0 + f0<br>
+     vaddl.s16       q12, d27, d29      // c1 + f1<br>
+<br>
+     vmul.s32        q11, q8            // 11 * (c0 + f0)<br>
+     vmul.s32        q12, q8            // 11 * (c1 + f1)<br>
+<br>
+     vmov.i32        q8, #40<br>
+     vmul.s32        q9, q8             // 40 * (d0 + e0)<br>
+     vmul.s32        q10, q8            // 40 * (d1 + e1)<br>
+<br>
+     vmovl.u8        q13, d1            //b<br>
+     vmovl.u8        q14, d6            //g<br>
+     vaddl.s16       q15, d26, d28      // b0 + g0<br>
+     vaddl.s16       q8, d27, d29       // b1 + g1<br>
+<br>
+     vmovl.u8        q1, d0             //a<br>
+     vmovl.u8        q2, d7             //h<br>
+     vaddl.s16       q13, d2, d4        // a0 + h0<br>
+     vaddl.s16       q14, d3, d5        // a1 + h1<br>
+<br>
+     vshl.s32        q15, #2            // 4*(b0+g0)<br>
+     vshl.s32        q8, #2             // 4*(b1+g1)<br>
+<br>
+     vadd.s32        q11, q13           // 11 * (c0 + f0) + a0 + h0<br>
+     vadd.s32        q12, q14           // 11 * (c1 + f1) + a1 + h1<br>
+     vadd.s32        q9, q15            // 40 * (d0 + e0) + 4*(b0+g0)<br>
+     vadd.s32        q10, q8            // 40 * (d1 + e1) + 4*(b1+g1)<br>
+     vsub.s32        q9, q11            // 40 * (d0 + e0) + 4*(b0+g0) - (11 * (c0 + f0) + a0 + h0)<br>
+     vsub.s32        q10, q12           // 40 * (d1 + e1) + 4*(b1+g1) - (11 * (c1 + f1) + a1 + h1)<br>
+.endm<br>
+<br>
+.macro qpel_filter_3_32b<br>
+<br>
+     vmov.i16        d16, #17<br>
+     vmov.i16        d17, #5<br>
+<br>
+     vmovl.u8        q11, d3<br>
+     vmull.s16       q9, d22, d16       // 17 * d0<br>
+     vmull.s16       q10, d23, d16      // 17 * d1<br>
+<br>
+     vmovl.u8        q13, d2<br>
+     vmull.s16       q11, d26, d17      // 5 * c0<br>
+     vmull.s16       q12, d27, d17      // 5* c1<br>
+<br>
+     vmov.i16        d16, #58<br>
+     vmovl.u8        q15, d4<br>
+     vmull.s16       q13, d30, d16      // 58 * e0<br>
+     vmull.s16       q14, d31, d16      // 58 * e1<br>
+<br>
+     vmov.i16        d17, #10<br>
+     vmovl.u8        q1, d5<br>
+     vmull.s16       q15, d2, d17       // 10 * f0<br>
+     vmull.s16       q8, d3, d17        // 10 * f1<br>
+<br>
+     vsub.s32        q9, q11            // 17 * d0 - 5 * c0<br>
+     vsub.s32        q10, q12           // 17 * d1 - 5 * c1<br>
+<br>
+     vmovl.u8        q1, d6<br>
+     vshll.s16       q11, d2, #2        // 4 * g0<br>
+     vshll.s16       q12, d3, #2        // 4 * g1<br>
+<br>
+     vadd.s32        q9, q13            // 17 * d0 - 5 * c0+ 58 * e0<br>
+     vadd.s32        q10, q14           // 17 * d1 - 5 * c1 + 58 * e1<br>
+<br>
+     vmovl.u8        q1, d1<br>
+     vmovl.u8        q2, d7<br>
+     vsubl.s16      q13, d2, d4         // b0 - h0<br>
+     vsubl.s16      q14, d3, d5         // b1 - h1<br>
+<br>
+     vadd.s32        q9, q11            // 17 * d0 - 5 * c0+ 58 * e0 +4 * g0<br>
+     vadd.s32        q10, q12           // 17 * d1 - 5 * c1 + 58 * e1+4 * g1<br>
+     vsub.s32        q13, q15           // 17 * d0 - 5 * c0+ 58 * e0 +4 * g0 -10 * f0<br>
+     vsub.s32        q14, q8            // 17 * d1 - 5 * c1 + 58 * e1+4 * g1 - 10*f1<br>
+     vadd.s32        q9, q13            //  17 * d0 - 5 * c0+ 58 * e0 +4 * g0 -10 * f0 +b0 - h0<br>
+     vadd.s32        q10, q14           // 17 * d1 - 5 * c1 + 58 * e1+4 * g1 - 10*f1 + b1 - h1<br>
+.endm<br>
+<br>
+.macro LUMA_VPP_8xN h<br>
+function x265_interp_8tap_vert_pp_8x\h\()_neon<br>
+<br>
+    push            {r4, r5, r6}<br>
+    ldr             r5, [sp, #4 * 3]<br>
+    mov             r4, #3<br>
+    mul             r4, r1, r4<br>
+    sub             r0, r4<br>
+<br>
+    mov             r4, #\h<br>
+<br>
+.loop_8x\h:<br>
+    mov             r6,r0<br>
+    pld [r6]<br>
+    vld1.u8         d0, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d1, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d2, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d3, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d4, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d5, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d6, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d7, [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+    veor.u8         q10, q10<br>
+<br>
+    cmp             r5,#0<br>
+    beq              0f<br>
+    cmp             r5,#1<br>
+    beq              1f<br>
+    cmp             r5,#2<br>
+    beq              2f<br>
+    cmp             r5,#3<br>
+    beq              3f<br>
+1:<br>
+    qpel_filter_1_32b<br>
+    b            5f<br>
+2:<br>
+    qpel_filter_2_32b<br>
+    b            5f<br>
+3:<br>
+    qpel_filter_3_32b<br>
+    b            5f<br>
+0:<br>
+    vmov.i16        d17, #64<br>
+    vmovl.u8        q11, d3<br>
+    vmull.s16       q9, d22, d17   // 64*d0<br>
+    vmull.s16       q10, d23, d17   // 64*d1<br>
+5:<br>
+    mov             r12,#32<br>
+    vdup.32         q8, r12<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #6<br>
+    vadd.s32        q10, q8<br>
+    vqshrun.s32     d1, q10, #6<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u8         d0, [r2], r3<br>
+<br>
+    add             r0, r1<br>
+    subs            r4, #1<br>
+    bne             .loop_8x\h<br>
+<br>
+    pop             {r4, r5, r6}<br>
+    bx              lr<br>
+endfunc<br>
+.endm<br>
+LUMA_VPP_8xN 4<br>
+LUMA_VPP_8xN 8<br>
+LUMA_VPP_8xN 16<br>
+LUMA_VPP_8xN 32<br>
+<br>
+.macro LUMA_VPP_16xN h<br>
+function x265_interp_8tap_vert_pp_16x\h\()_neon<br>
+<br>
+    push            {r4, r5, r6, r7, r8}<br>
+    ldr             r5, [sp, #4 * 5]<br>
+    mov             r4, #3<br>
+    mul             r4, r1, r4<br>
+    sub             r0, r4<br>
+<br>
+    mov             r4, #\h<br>
+<br>
+.loop_16x\h:<br>
+    mov             r8, r2<br>
+    mov             r6, r0<br>
+<br>
+    pld [r6]<br>
+    vld1.u8         d0, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d1, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d2, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d3, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d4, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d5, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d6, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d7, [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+    veor.u8         q10, q10<br>
+<br>
+    cmp             r5,#0<br>
+    beq              0f<br>
+    cmp             r5,#1<br>
+    beq              1f<br>
+    cmp             r5,#2<br>
+    beq              2f<br>
+    cmp             r5,#3<br>
+    beq              3f<br>
+1:<br>
+    qpel_filter_1_32b<br>
+    b            5f<br>
+2:<br>
+    qpel_filter_2_32b<br>
+    b            5f<br>
+3:<br>
+    qpel_filter_3_32b<br>
+    b            5f<br>
+0:<br>
+    vmov.i16        d17, #64<br>
+    vmovl.u8        q11, d3<br>
+    vmull.s16       q9, d22, d17   // 64*d0<br>
+    vmull.s16       q10, d23, d17   // 64*d1<br>
+5:<br>
+    mov             r12,#32<br>
+    vdup.32         q8, r12<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #6<br>
+    vadd.s32        q10, q8<br>
+    vqshrun.s32     d1, q10, #6<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u8         d0, [r8]!<br>
+<br>
+    add             r6,r0, #8<br>
+    pld [r6]<br>
+    vld1.u8         d0, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d1, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d2, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d3, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d4, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d5, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d6, [r6], r1<br>
+    pld [r6]<br>
+    vld1.u8         d7, [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+    veor.u8         q10, q10<br>
+<br>
+    cmp             r5,#0<br>
+    beq              0f<br>
+    cmp             r5,#1<br>
+    beq              1f<br>
+    cmp             r5,#2<br>
+    beq              2f<br>
+    cmp             r5,#3<br>
+    beq              3f<br>
+1:<br>
+    qpel_filter_1_32b<br>
+    b            5f<br>
+2:<br>
+    qpel_filter_2_32b<br>
+    b            5f<br>
+3:<br>
+    qpel_filter_3_32b<br>
+    b            5f<br>
+0:<br>
+    vmov.i16        d17, #64<br>
+    vmovl.u8        q11, d3<br>
+    vmull.s16       q9, d22, d17   // 64*d0<br>
+    vmull.s16       q10, d23, d17   // 64*d1<br>
+5:<br>
+    mov             r12,#32<br>
+    vdup.32         q8, r12<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #6<br>
+    vadd.s32        q10, q8<br>
+    vqshrun.s32     d1, q10, #6<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u8         d0, [r8]!<br>
+<br>
+    add             r0, r1<br>
+    add             r2, r3<br>
+    subs            r4, #1<br>
+    bne             .loop_16x\h<br>
+<br>
+    pop             {r4, r5, r6, r7, r8}<br>
+    bx              lr<br>
+endfunc<br>
+.endm<br>
+LUMA_VPP_16xN 4<br>
+LUMA_VPP_16xN 8<br>
+LUMA_VPP_16xN 16<br>
+LUMA_VPP_16xN 32<br>
+LUMA_VPP_16xN 64<br>
+LUMA_VPP_16xN 12<br>
+<br>
+.macro LUMA_VPP_4xN h<br>
+function x265_interp_8tap_vert_pp_4x\h\()_neon<br>
+     push            {r4, r5, r6}<br>
+    ldr             r4, [sp, #4 * 3]<br>
+    mov             r5, r4, lsl #6<br>
+    mov             r4, #3<br>
+    mul             r4, r1, r4<br>
+    sub             r0, r4<br>
+<br>
+    mov             r4, #32<br>
+    vdup.32         q8, r4<br>
+    mov             r4, #\h<br>
+<br>
+.loop_4x\h:<br>
+    movrel          r12, g_lumaFilter<br>
+    add             r12, r5<br>
+    mov             r6, r0<br>
+<br>
+    vld1.u32        d0[0], [r6], r1<br>
+    vld1.u32        d0[1], [r6], r1<br>
+    vld1.u32        d1[0], [r6], r1<br>
+    vld1.u32        d1[1], [r6], r1<br>
+    vld1.u32        d2[0], [r6], r1<br>
+    vld1.u32        d2[1], [r6], r1<br>
+    vld1.u32        d3[0], [r6], r1<br>
+    vld1.u32        d3[1], [r6], r1<br>
+<br>
+    veor.u8         q9, q9<br>
+<br>
+    vmovl.u8        q11, d0<br>
+    vmovl.u16       q12, d22<br>
+    vmovl.u16       q13, d23<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q12, q10<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q13, q10<br>
+<br>
+    vmovl.u8        q11, d1<br>
+    vmovl.u16       q12, d22<br>
+    vmovl.u16       q13, d23<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q12, q10<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q13, q10<br>
+<br>
+    vmovl.u8        q11, d2<br>
+    vmovl.u16       q12, d22<br>
+    vmovl.u16       q13, d23<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q12, q10<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q13, q10<br>
+<br>
+    vmovl.u8        q11, d3<br>
+    vmovl.u16       q12, d22<br>
+    vmovl.u16       q13, d23<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q12, q10<br>
+    vld1.s32        d20, [r12]!<br>
+    vmov.s32        d21, d20<br>
+    vmla.s32        q9, q13, q10<br>
+<br>
+    vadd.s32        q9, q8<br>
+    vqshrun.s32     d0, q9, #6<br>
+    vqmovn.u16      d0, q0<br>
+    vst1.u32        d0[0], [r2], r3<br>
+<br>
+    add             r0, r1<br>
+    subs            r4, #1<br>
+    bne             .loop_4x\h<br>
+<br>
+    pop             {r4, r5, r6}<br>
+    bx              lr<br>
+endfunc<br>
+.endm<br>
+<br>
+LUMA_VPP_4xN 4<br>
+LUMA_VPP_4xN 8<br>
+LUMA_VPP_4xN 16<br>
diff -r 4a2f94a59251 -r 2f43a5b32372 source/common/arm/ipfilter8.h<br>
--- a/source/common/arm/ipfilter8.h     Wed Mar 09 14:34:06 2016 +0530<br>
+++ b/source/common/arm/ipfilter8.h     Wed Mar 16 16:52:23 2016 +0530<br>
@@ -51,4 +51,17 @@<br>
 void x265_filterPixelToShort_64x48_neon(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);<br>
 void x265_filterPixelToShort_64x64_neon(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);<br>
<br>
+void x265_interp_8tap_vert_pp_4x4_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_4x8_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_4x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_8x4_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_8x8_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_8x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_8x32_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_16x4_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_16x8_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_16x16_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_16x32_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_16x64_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
+void x265_interp_8tap_vert_pp_16x12_neon(const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);<br>
 #endif // ifndef X265_IPFILTER8_ARM_H<br>
</blockquote></div><br></div>