<div style="line-height:1.7;color:#000000;font-size:14px;font-family:arial"><pre><br>At 2016-04-20 21:24:03,ramya@multicorewareinc.com wrote:
># HG changeset patch
># User Ramya Sriraman<ramya@multicorewareinc.com>
># Date 1461158053 -19800
># Wed Apr 20 18:44:13 2016 +0530
># Node ID 72ae446412d6e25aa3d2aa8ecb657f9815fdb635
># Parent 4f83d465d11b3baa46e6089f73b0929266d4b722
>arm: Implement quant
>
>diff -r 4f83d465d11b -r 72ae446412d6 source/common/arm/asm-primitives.cpp
>--- a/source/common/arm/asm-primitives.cpp Wed Mar 30 17:29:13 2016 +0530
>+++ b/source/common/arm/asm-primitives.cpp Wed Apr 20 18:44:13 2016 +0530
>@@ -820,6 +820,8 @@
> p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vsp = PFX(interp_4tap_vert_sp_24x32_neon);
> p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vsp = PFX(interp_4tap_vert_sp_48x64_neon);
>
>+ // quant
>+ p.quant = PFX(quant_neon);
> }
> if (cpuMask & X265_CPU_ARMV6)
> {
>diff -r 4f83d465d11b -r 72ae446412d6 source/common/arm/pixel-util.S
>--- a/source/common/arm/pixel-util.S Wed Mar 30 17:29:13 2016 +0530
>+++ b/source/common/arm/pixel-util.S Wed Apr 20 18:44:13 2016 +0530
>@@ -1962,3 +1962,63 @@
> bx lr
> endfunc
>
>+function x265_quant_neon
>+ push {r4-r9}
>+ ldr r4, [sp, #4* 6] //qbits
>+ ldr r5, [sp, #4* 6 + 4] // add
>+ ldr r6, [sp, #4* 6 + 8] // numcoeff
>+ mov r7, #8
>+ sub r7, r7 , r4 //-(qbits- 8)
>+
>+ lsr r6, r6 ,#2
>+ mov r8, #0
>+
>+.loop_quant:
>+
>+ vld1.s16 d0, [r0]!
>+ vmovl.s16 q1, d0 // coef[blockpos]
>+
>+ vclt.s32 q4, q1, #0
>+ mov r9, #1
>+ vdup.s32 q2, r9
>+ vorr.s32 q4, q4, q2 // q4= sign
>+
>+ vabs.s32 q1, q1 // q1=level=abs(coef[blockpos])
>+ vld1.s32 {q0}, [r1]! // quantCoeff[blockpos]
>+ vmul.i32 q0, q0, q1 // q0=tmplevel = abs(level) * quantCoeff[blockpos];
>+
>+ vdup.s32 q2, r5
r5=qbits, it is constant in the loop, the NEON have 16 of registers, so don't need load every iteration
>+ vadd.s32 q1, q0, q2 // q1= tmplevel+add
>+ vdup.s32 q2, r4
>+ vneg.s32 q2, q2
>+ vshl.s32 q1, q1, q2 // q1= level =tmplevel+add >> qbits
how about vqshrun?
>+
>+ vdup.s32 q2, r4
>+ vshl.s32 q3, q1, q2 // q3 = level << qBits
>+ vsub.s32 q8, q0, q3 // q8= tmplevel - (level << qBits)
>+ vdup.s32 q2, r7
>+ vshl.s32 q8, q8, q2 // q3= ((tmplevel - (level << qBits)) >> qBits8)
>+ vst1.s32 {q8}, [r2]! // store deltaU
>+
>+ // numsig
>+ vclz.s32 q2, q1
>+ vshr.u32 q2, #5
>+ vadd.u32 d4, d5
>+ vpadd.u32 d4, d4
>+ vmov.32 r12, d4[0]
>+ mov r9, #4
>+ sub r9, r9, r12
>+ add r8, r9
>+
>+ vmul.s32 q2, q1, q4
>+ vqmovn.s32 d0, q2
>+ vst1.s16 d0, [r3]!
>+
>+ subs r6, #1
>+ bne .loop_quant
>+
>+ mov r0, r8
>+ pop {r4-r9}
>+ bx lr
>+endfunc
</pre></div>