<div dir="ltr"><br clear="all"><div><div class="gmail_signature"><div dir="ltr"><br></div></div></div>
<br><div class="gmail_quote">---------- Forwarded message ----------<br>From: <b class="gmail_sendername"></b> <span dir="ltr"><<a href="mailto:aasaipriya@multicorewareinc.com">aasaipriya@multicorewareinc.com</a>></span><br>Date: Mon, Jun 29, 2015 at 4:51 PM<br>Subject: [x265] [PATCH] asm: avx2 code for weight_sp() 16bpp<br>To: <a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br><br><br># HG changeset patch<br>
# User Aasaipriya Chandran <<a href="mailto:aasaipriya@multicorewareinc.com">aasaipriya@multicorewareinc.com</a>><br>
# Date 1435562395 -19800<br>
#Â Â Â Mon Jun 29 12:49:55 2015 +0530<br>
# Node ID bebe4e496a432608cf0a9c495debd1970caa387e<br>
# Parent 9feee64efa440c25f016d15ae982789e5393a77e<br>
asm: avx2 code for weight_sp() 16bpp<br>
<br>
 avx2: weight_sp 11.37x  4496.63     51139.20<br>
 sse4: weight_sp 6.48x  8163.87     52870.36<br>
<br>
diff -r 9feee64efa44 -r bebe4e496a43 source/common/x86/asm-primitives.cpp<br>
--- a/source/common/x86/asm-primitives.cpp   Fri Jun 26 15:29:51 2015 +0530<br>
+++ b/source/common/x86/asm-primitives.cpp   Mon Jun 29 12:49:55 2015 +0530<br>
@@ -1517,6 +1517,7 @@<br>
     p.scale1D_128to64 = PFX(scale1D_128to64_avx2);<br>
     p.scale2D_64to32 = PFX(scale2D_64to32_avx2);<br>
     p.weight_pp = PFX(weight_pp_avx2);<br>
+Â Â Â Â p.weight_sp = PFX(weight_sp_avx2);<br>
     p.sign = PFX(calSign_avx2);<br>
<br>
     <a href="http://p.cu" rel="noreferrer" target="_blank">p.cu</a>[BLOCK_16x16].calcresidual = PFX(getResidual16_avx2);<br>
diff -r 9feee64efa44 -r bebe4e496a43 source/common/x86/pixel-util8.asm<br>
--- a/source/common/x86/pixel-util8.asm Fri Jun 26 15:29:51 2015 +0530<br>
+++ b/source/common/x86/pixel-util8.asm Mon Jun 29 12:49:55 2015 +0530<br>
@@ -1674,8 +1674,128 @@<br>
   dec     r5d<br>
   jnz     .loopH<br>
   RET<br>
-<br>
-%if ARCH_X86_64<br>
+%endif<br>
+<br>
+<br>
+%if HIGH_BIT_DEPTH<br>
+INIT_YMM avx2<br>
+cglobal weight_sp, 6,7,9<br>
+  mova           m1, [pw_1023]<br>
+  mova           m2, [pw_1]<br>
+  mov            r6d, r7m</div><div class="gmail_quote"><br></div><div class="gmail_quote"><br></div><div class="gmail_quote"><span style="font-size:12.8000001907349px">r7 is 8th register (0-7). so it should be  </span><span style="font-size:12.8000001907349px">cglobal weight_sp, </span><span style="font-size:12.8000001907349px">6, 8, 9 and </span><span style="font-size:12.8000001907349px">ARCH_X86_64 only code.</span><br></div><div class="gmail_quote"><br></div><div class="gmail_quote"><br></div><div class="gmail_quote"><br>
+  shl            r6d, 16<br>
+  or            r6d, r6m<br>
+  vpbroadcastd       m3, r6d   ; m3 = [round w0]<br>
+  movd           xm4, r8m   ; m4 = [shift]<br>
+  vpbroadcastd       m5, r9m   ; m5 = [offset]<br>
+<br>
+Â Â ; correct row stride<br>
+  add            r3d, r3d<br>
+  add            r2d, r2d<br>
+  mov            r6d, r4d<br>
+  and            r6d, ~(mmsize / SIZEOF_PIXEL - 1)<br>
+  sub            r3d, r6d<br>
+  sub            r3d, r6d<br>
+  sub            r2d, r6d<br>
+  sub            r2d, r6d<br>
+<br>
+Â Â ; generate partial width mask (MUST BE IN YMM0)<br>
+  mov            r6d, r4d<br>
+  and            r6d, (mmsize / SIZEOF_PIXEL - 1)<br>
+  movd           xm0, r6d<br>
+  pshuflw          m0, m0, 0<br>
+  punpcklqdq        m0, m0<br>
+Â Â vinserti128Â Â Â Â Â Â Â Â m0, m0, xm0, 1<br>
+  pcmpgtw          m0, [pw_0_15]<br>
+<br>
+.loopH:<br>
+  mov            r6d, r4d<br>
+<br>
+.loopW:<br>
+  movu           m6, [r0]<br>
+  paddw           m6, [pw_2000]<br>
+<br>
+  punpcklwd         m7, m6, m2<br>
+  pmaddwd          m7, m3    ;(round w0)<br>
+  psrad           m7, xm4   ;(shift)<br>
+  paddd           m7, m5    ;(offset)<br>
+<br>
+  punpckhwd         m6, m2<br>
+  pmaddwd          m6, m3<br>
+  psrad           m6, xm4<br>
+  paddd           m6, m5<br>
+<br>
+  packusdw         m7, m6<br>
+  pminuw          m7, m1<br>
+<br>
+  sub            r6d, (mmsize / SIZEOF_PIXEL)<br>
+  jl            .width14<br>
+  movu           [r1], m7<br>
+  lea            r0, [r0 + mmsize]<br>
+  lea            r1, [r1 + mmsize]<br>
+  je            .nextH<br>
+  jmp            .loopW<br>
+<br>
+.width14:<br>
+  add            r6d, 16<br>
+  cmp            r6d, 14<br>
+  jl            .width12<br>
+  movu           [r1], xm7<br>
+Â Â vextracti128Â Â Â Â Â Â Â xm8, m7, 1<br>
+  movq           [r1 + 16], xm8<br>
+  pextrd          [r1 + 24], xm8, 2<br>
+  je            .nextH<br>
+<br>
+.width12:<br>
+  cmp            r6d, 12<br>
+  jl            .width10<br>
+  movu           [r1], xm7<br>
+Â Â vextracti128Â Â Â Â Â Â Â xm8, m7, 1<br>
+  movq           [r1 + 16], xm8<br>
+  je            .nextH<br>
+<br>
+.width10:<br>
+  cmp            r6d, 10<br>
+  jl            .width8<br>
+  movu           [r1], xm7<br>
+Â Â vextracti128Â Â Â Â Â Â Â xm8, m7, 1<br>
+  movd           [r1 + 16], xm8<br>
+  je            .nextH<br>
+<br>
+.width8:<br>
+  cmp            r6d, 8<br>
+  jl            .width6<br>
+  movu           [r1], xm7<br>
+  je            .nextH<br>
+<br>
+.width6<br>
+  cmp            r6d, 6<br>
+  jl            .width4<br>
+  movq           [r1], xm7<br>
+  pextrd          [r1 + 8], xm7, 2<br>
+  je            .nextH<br>
+<br>
+.width4:<br>
+  cmp            r6d, 4<br>
+  jl            .width2<br>
+  movq           [r1], xm7<br>
+  je            .nextH<br>
+  add            r1, 4<br>
+  pshufd          m6, m6, 1<br>
+  je            .nextH<br>
+<br>
+.width2:<br>
+  movd           [r1], xm7<br>
+<br>
+.nextH:<br>
+  add            r0, r2<br>
+  add            r1, r3<br>
+<br>
+  dec            r5d<br>
+  jnz            .loopH<br>
+Â Â RET<br>
+<br>
+%else<br>
 INIT_YMM avx2<br>
 cglobal weight_sp, 6, 9, 7<br>
   mov       r7d, r7m<br>
@@ -1752,8 +1872,6 @@<br>
   jnz       .loopH<br>
   RET<br>
 %endif<br>
-%endif ; end of (HIGH_BIT_DEPTH == 0)<br>
-<br>
<br>
 ;-----------------------------------------------------------------<br>
 ; void transpose_4x4(pixel *dst, pixel *src, intptr_t stride)<br>
_______________________________________________<br>
x265-devel mailing list<br>
<a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
<a href="https://mailman.videolan.org/listinfo/x265-devel" rel="noreferrer" target="_blank">https://mailman.videolan.org/listinfo/x265-devel</a><br>
</div><br></div>