<div dir="ltr"><br><div class="gmail_extra"><br><br><div class="gmail_quote">On Fri, Oct 18, 2013 at 5:48 AM, <span dir="ltr"><<a href="mailto:praveen@multicorewareinc.com" target="_blank">praveen@multicorewareinc.com</a>></span> wrote:<br>
<blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"># HG changeset patch<br>
# User Praveen Tiwari<br>
# Date 1382093292 -19800<br>
# Node ID 0efb3f85325f03edb436b260ba28189b8eae6b3f<br>
# Parent 975b3d17baa8339f77a3b99245136e8c06cf3fdb<br>
asm code for luma filter functions<br></blockquote><div><br></div><div>I've folded a few patches together, but these changes are all queued.</div><div><br></div><div>Thanks for persevering; the new 4tap and 8tap routines have good performance and I think we have a solid foundation on which to add the rest of the interpolation primitives, and perhaps do some XOP/AVX/AVX2 optimizations of the existing macros in the future.</div>
<div><br></div><div>The next step is to wire up these new primitives into the motion compensation routines. Then on to the vertical interpolation and the ps/sp varieties.</div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
<br>
diff -r 975b3d17baa8 -r 0efb3f85325f source/common/x86/asm-primitives.cpp<br>
--- a/source/common/x86/asm-primitives.cpp Fri Oct 18 15:58:29 2013 +0530<br>
+++ b/source/common/x86/asm-primitives.cpp Fri Oct 18 16:18:12 2013 +0530<br>
@@ -279,6 +279,7 @@<br>
SA8D_INTER_FROM_BLOCK(sse4);<br>
<br>
CHROMA_FILTERS(_sse4);<br>
+ LUMA_FILTERS(_sse4);<br>
}<br>
if (cpuMask & X265_CPU_AVX)<br>
{<br>
diff -r 975b3d17baa8 -r 0efb3f85325f source/common/x86/ipfilter8.asm<br>
--- a/source/common/x86/ipfilter8.asm Fri Oct 18 15:58:29 2013 +0530<br>
+++ b/source/common/x86/ipfilter8.asm Fri Oct 18 16:18:12 2013 +0530<br>
@@ -30,6 +30,11 @@<br>
tab_Tm: db 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6<br>
db 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10<br>
<br>
+tab_Lm: db 0, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8<br>
+ db 2, 3, 4, 5, 6, 7, 8, 9, 3, 4, 5, 6, 7, 8, 9, 10<br>
+ db 4, 5, 6, 7, 8, 9, 10, 11, 5, 6, 7, 8, 9, 10, 11, 12<br>
+ db 6, 7, 8, 9, 10, 11, 12, 13, 7, 8, 9, 10, 11, 12, 13, 14<br>
+<br>
tab_c_512: times 8 dw 512<br>
<br>
tab_coeff: db 0, 64, 0, 0<br>
@@ -41,6 +46,12 @@<br>
db -2, 16, 54, -4<br>
db -2, 10, 58, -2<br>
<br>
+tab_LumaCoeff: db 0, 0, 0, 64, 0, 0, 0, 0<br>
+ db -1, 4, -10, 58, 17, -5, 1, 0<br>
+ db -1, 4, -11, 40, 40, -11, 4, -1<br>
+ db 0, 1, -5, 17, 58, -10, 4, -1<br>
+<br>
+<br>
SECTION .text<br>
<br>
%macro FILTER_H4_w2_2 3<br>
@@ -469,3 +480,116 @@<br>
IPFILTER_CHROMA_W 32, 16<br>
IPFILTER_CHROMA_W 32, 24<br>
IPFILTER_CHROMA_W 32, 32<br>
+<br>
+<br>
+%macro FILTER_H8_W8 3<br>
+ movu %1, [r0 - 3 + r5]<br>
+ pshufb %2, %1, [tab_Lm]<br>
+ pmaddubsw %2, m3<br>
+ pshufb m7, %1, [tab_Lm + 16]<br>
+ pmaddubsw m7, m3<br>
+ phaddw %2, m7<br>
+ pshufb m7, %1, [tab_Lm + 32]<br>
+ pmaddubsw m7, m3<br>
+ pshufb %1, %1, [tab_Lm + 48]<br>
+ pmaddubsw %1, m3<br>
+ phaddw m7, %1<br>
+ phaddw %2, m7<br>
+ pmulhrsw %2, %3<br>
+ packuswb %2, %2<br>
+ movh [r2 + r5], %2<br>
+%endmacro<br>
+<br>
+%macro FILTER_H8_W4 3<br>
+ movu %1, [r0 - 3 + r5]<br>
+ pshufb %2, %1, [tab_Lm]<br>
+ pmaddubsw %2, m3<br>
+ pshufb m7, %1, [tab_Lm + 16]<br>
+ pmaddubsw m7, m3<br>
+ phaddw %2, m7<br>
+ phaddw %2, %2<br>
+ pmulhrsw %2, %3<br>
+ packuswb %2, %2<br>
+ movd [r2 + r5], %2<br>
+%endmacro<br>
+<br>
+%macro FILTER_H8_W1 3<br>
+ movu %1, [r0 - 3 + r5]<br>
+ pshufb %2, %1, [tab_Lm]<br>
+ pmaddubsw %2, m3<br>
+ phaddw %2, %2<br>
+ phaddw %2, %2<br>
+ pmulhrsw %2, %3<br>
+ packuswb %2, %2<br>
+ pextrb [r2 + r5], %2, 0<br>
+%endmacro<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void interp_8tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)<br>
+;-----------------------------------------------------------------------------<br>
+%macro IPFILTER_LUMA 2<br>
+cglobal interp_8tap_horiz_pp_%1x%2, 4, 6, 5<br>
+<br>
+mov r4d, r4m<br>
+<br>
+%ifdef PIC<br>
+lea r5, [tab_LumaCoeff]<br>
+movh m3, [r5 + r4 * 8]<br>
+%else<br>
+movh m3, [tab_LumaCoeff + r4 * 8]<br>
+%endif<br>
+<br>
+punpcklqdq m3, m3<br>
+mova m2, [tab_c_512]<br>
+mov r4, %2<br>
+<br>
+.loop<br>
+ xor r5, r5<br>
+%rep %1 / 8<br>
+ FILTER_H8_W8 m0, m1, m2<br>
+ add r5, 8<br>
+%endrep<br>
+<br>
+%rep (%1 % 8) / 4<br>
+ FILTER_H8_W4 m0, m1, m2<br>
+ add r5, 4<br>
+%endrep<br>
+<br>
+ %rep(%1 % 4)<br>
+ FILTER_H8_W1 m0, m1, m2<br>
+ add r5, 1<br>
+ %endrep<br>
+<br>
+ add r0, r1<br>
+ add r2, r3<br>
+<br>
+ dec r4d<br>
+ jnz .loop<br>
+ RET<br>
+%endmacro<br>
+<br>
+ IPFILTER_LUMA 4, 4<br>
+ IPFILTER_LUMA 8, 8<br>
+ IPFILTER_LUMA 8, 4<br>
+ IPFILTER_LUMA 4, 8<br>
+ IPFILTER_LUMA 16, 16<br>
+ IPFILTER_LUMA 16, 8<br>
+ IPFILTER_LUMA 8, 16<br>
+ IPFILTER_LUMA 16, 12<br>
+ IPFILTER_LUMA 12, 16<br>
+ IPFILTER_LUMA 16, 4<br>
+ IPFILTER_LUMA 4, 16<br>
+ IPFILTER_LUMA 32, 32<br>
+ IPFILTER_LUMA 32, 16<br>
+ IPFILTER_LUMA 16, 32<br>
+ IPFILTER_LUMA 32, 24<br>
+ IPFILTER_LUMA 24, 32<br>
+ IPFILTER_LUMA 32, 8<br>
+ IPFILTER_LUMA 8, 32<br>
+ IPFILTER_LUMA 64, 64<br>
+ IPFILTER_LUMA 64, 32<br>
+ IPFILTER_LUMA 32, 64<br>
+ IPFILTER_LUMA 64, 48<br>
+ IPFILTER_LUMA 48, 64<br>
+ IPFILTER_LUMA 64, 16<br>
+ IPFILTER_LUMA 16, 64<br>
_______________________________________________<br>
x265-devel mailing list<br>
<a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
<a href="https://mailman.videolan.org/listinfo/x265-devel" target="_blank">https://mailman.videolan.org/listinfo/x265-devel</a><br>
</blockquote></div><br><br clear="all"><div><br></div>-- <br>Steve Borho
</div></div>