<div dir="ltr">pushed</div><div class="gmail_extra"><br><div class="gmail_quote">On Wed, Jun 14, 2017 at 4:07 PM, <span dir="ltr"><<a href="mailto:jayashri@multicorewareinc.com" target="_blank">jayashri@multicorewareinc.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"># HG changeset patch<br>
# User Jayashri Murugan <<a href="mailto:jayashri@multicorewareinc.com">jayashri@multicorewareinc.com</a><wbr>><br>
# Date 1496834769 -19800<br>
# Wed Jun 07 16:56:09 2017 +0530<br>
# Node ID 6d1fea9c34c5a1b7cb062ce3126df7<wbr>de50170419<br>
# Parent 324ee113f48943791ec295013012d3<wbr>0fcbe16338<br>
avx2: 'integral12h' asm code -> 2.42x faster than 'C' version<br>
<br>
BIT_DEPTH = 8 : integral_init12h 2.42x 807.20 1950.56<br>
BIT_DEPTH = 10|12 : integral_init12h 1.55x 1260.02 1958.27<br>
<br>
diff -r 324ee113f489 -r 6d1fea9c34c5 source/common/x86/asm-<wbr>primitives.cpp<br>
--- a/source/common/x86/asm-<wbr>primitives.cpp Wed Jun 07 15:37:49 2017 +0530<br>
+++ b/source/common/x86/asm-<wbr>primitives.cpp Wed Jun 07 16:56:09 2017 +0530<br>
@@ -2166,6 +2166,7 @@<br>
p.integral_initv[INTEGRAL_32] = PFX(integral32v_avx2);<br>
p.integral_inith[INTEGRAL_4] = PFX(integral4h_avx2);<br>
p.integral_inith[INTEGRAL_8] = PFX(integral8h_avx2);<br>
+ p.integral_inith[INTEGRAL_12] = PFX(integral12h_avx2);<br>
<br>
/* TODO: This kernel needs to be modified to work with HIGH_BIT_DEPTH only<br>
p.planeClipAndMax = PFX(planeClipAndMax_avx2); */<br>
@@ -3713,6 +3714,7 @@<br>
p.integral_initv[INTEGRAL_32] = PFX(integral32v_avx2);<br>
p.integral_inith[INTEGRAL_4] = PFX(integral4h_avx2);<br>
p.integral_inith[INTEGRAL_8] = PFX(integral8h_avx2);<br>
+ p.integral_inith[INTEGRAL_12] = PFX(integral12h_avx2);<br>
<br>
}<br>
#endif<br>
diff -r 324ee113f489 -r 6d1fea9c34c5 source/common/x86/seaintegral.<wbr>asm<br>
--- a/source/common/x86/<wbr>seaintegral.asm Wed Jun 07 15:37:49 2017 +0530<br>
+++ b/source/common/x86/<wbr>seaintegral.asm Wed Jun 07 16:56:09 2017 +0530<br>
@@ -389,14 +389,204 @@<br>
RET<br>
%endif<br>
<br>
+%macro INTEGRAL_TWELVE_HORIZONTAL_16 0<br>
+ pmovzxbw m0, [r1]<br>
+ pmovzxbw m1, [r1 + 1]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 2]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 3]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 4]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 5]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 6]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 7]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 8]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 9]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 10]<br>
+ paddw m0, m1<br>
+ pmovzxbw m1, [r1 + 11]<br>
+ paddw m0, m1<br>
+%endmacro<br>
+<br>
+%macro INTEGRAL_TWELVE_HORIZONTAL_4 0<br>
+ movd xm0, [r1]<br>
+ movd xm1, [r1 + 1]<br>
+ pmovzxbw xm0, xm0<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 2]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 3]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 4]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 5]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 6]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 7]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 8]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 9]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 10]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+ movd xm1, [r1 + 11]<br>
+ pmovzxbw xm1, xm1<br>
+ paddw xm0, xm1<br>
+%endmacro<br>
+<br>
+%macro INTEGRAL_TWELVE_HORIZONTAL_8_<wbr>HBD 0<br>
+ pmovzxwd m0, [r1]<br>
+ pmovzxwd m1, [r1 + 2]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 4]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 6]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 8]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 10]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 12]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 14]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 16]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 18]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 20]<br>
+ paddd m0, m1<br>
+ pmovzxwd m1, [r1 + 22]<br>
+ paddd m0, m1<br>
+%endmacro<br>
+<br>
+%macro INTEGRAL_TWELVE_HORIZONTAL_4_<wbr>HBD 0<br>
+ pmovzxwd xm0, [r1]<br>
+ pmovzxwd xm1, [r1 + 2]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 4]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 6]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 8]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 10]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 12]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 14]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 16]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 18]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 20]<br>
+ paddd xm0, xm1<br>
+ pmovzxwd xm1, [r1 + 22]<br>
+ paddd xm0, xm1<br>
+%endmacro<br>
+<br>
;-----------------------------<wbr>------------------------------<wbr>------------------<br>
;static void integral_init12h_c(uint32_t *sum, pixel *pix, intptr_t stride)<br>
;-----------------------------<wbr>------------------------------<wbr>------------------<br>
INIT_YMM avx2<br>
-cglobal integral12h, 3, 3, 0<br>
-<br>
+%if HIGH_BIT_DEPTH<br>
+cglobal integral12h, 3, 5, 3<br>
+ lea r3, [4 * r2]<br>
+ sub r0, r3<br>
+ sub r2, 12 ;stride - 12<br>
+ mov r4, r2<br>
+ shr r4, 3<br>
+<br>
+.loop:<br>
+ INTEGRAL_TWELVE_HORIZONTAL_8_<wbr>HBD<br>
+ movu m1, [r0]<br>
+ paddd m0, m1<br>
+ movu [r0 + r3], m0<br>
+ add r1, 16<br>
+ add r0, 32<br>
+ sub r2, 8<br>
+ sub r4, 1<br>
+ jnz .loop<br>
+ INTEGRAL_TWELVE_HORIZONTAL_4_<wbr>HBD<br>
+ movu xm1, [r0]<br>
+ paddd xm0, xm1<br>
+ movu [r0 + r3], xm0<br>
RET<br>
<br>
+%else<br>
+cglobal integral12h, 3, 5, 3<br>
+ lea r3, [4 * r2]<br>
+ sub r0, r3<br>
+ sub r2, 12 ;stride - 12<br>
+ mov r4, r2<br>
+ shr r4, 4<br>
+<br>
+.loop_16:<br>
+ INTEGRAL_TWELVE_HORIZONTAL_16<br>
+ vperm2i128 m2, m0, m0, 1<br>
+ pmovzxwd m2, xm2<br>
+ pmovzxwd m0, xm0<br>
+ movu m1, [r0]<br>
+ paddd m0, m1<br>
+ movu [r0 + r3], m0<br>
+ movu m1, [r0 + 32]<br>
+ paddd m2, m1<br>
+ movu [r0 + r3 + 32], m2<br>
+ add r1, 16<br>
+ add r0, 64<br>
+ sub r2, 16<br>
+ sub r4, 1<br>
+ jnz .loop_16<br>
+ cmp r2, 12<br>
+ je .loop_12<br>
+ cmp r2, 4<br>
+ je .loop_4<br>
+<br>
+.loop_12:<br>
+ INTEGRAL_TWELVE_HORIZONTAL_16<br>
+ vperm2i128 m2, m0, m0, 1<br>
+ pmovzxwd xm2, xm2<br>
+ pmovzxwd m0, xm0<br>
+ movu m1, [r0]<br>
+ paddd m0, m1<br>
+ movu [r0 + r3], m0<br>
+ movu xm1, [r0 + 32]<br>
+ paddd xm2, xm1<br>
+ movu [r0 + r3 + 32], xm2<br>
+ jmp .end<br>
+<br>
+.loop_4:<br>
+ INTEGRAL_TWELVE_HORIZONTAL_4<br>
+ pmovzxwd xm0, xm0<br>
+ movu xm1, [r0]<br>
+ paddd xm0, xm1<br>
+ movu [r0 + r3], xm0<br>
+ jmp .end<br>
+<br>
+.end<br>
+ RET<br>
+%endif<br>
+<br>
;-----------------------------<wbr>------------------------------<wbr>------------------<br>
;static void integral_init16h_c(uint32_t *sum, pixel *pix, intptr_t stride)<br>
;-----------------------------<wbr>------------------------------<wbr>------------------<br>
______________________________<wbr>_________________<br>
x265-devel mailing list<br>
<a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
<a href="https://mailman.videolan.org/listinfo/x265-devel" rel="noreferrer" target="_blank">https://mailman.videolan.org/<wbr>listinfo/x265-devel</a><br>
</blockquote></div><br></div>