[x265] [PATCH] asm: avx2 8bpp code for convert_p2s[64xN]
rajesh at multicorewareinc.com
rajesh at multicorewareinc.com
Tue Apr 7 15:37:27 CEST 2015
# HG changeset patch
# User Rajesh Paulraj<rajesh at multicorewareinc.com>
# Date 1428411977 -19800
# Tue Apr 07 18:36:17 2015 +0530
# Node ID 5aa94ce59d5b6c283db7ad85caf5c8cc0ddafcfd
# Parent 5e1261b63eae3db9b3527c74c45adefeb2bb20fb
asm: avx2 8bpp code for convert_p2s[64xN]
convert_p2s[64x16](10.54x),convert_p2s[64x32](10.78x),
convert_p2s[64x48](10.32x),convert_p2s[64x64](11.14x)
diff -r 5e1261b63eae -r 5aa94ce59d5b source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue Apr 07 18:05:41 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Tue Apr 07 18:36:17 2015 +0530
@@ -2042,6 +2042,10 @@
p.pu[LUMA_32x24].convert_p2s = x265_filterPixelToShort_32x24_avx2;
p.pu[LUMA_32x32].convert_p2s = x265_filterPixelToShort_32x32_avx2;
p.pu[LUMA_32x64].convert_p2s = x265_filterPixelToShort_32x64_avx2;
+ p.pu[LUMA_64x16].convert_p2s = x265_filterPixelToShort_64x16_avx2;
+ p.pu[LUMA_64x32].convert_p2s = x265_filterPixelToShort_64x32_avx2;
+ p.pu[LUMA_64x48].convert_p2s = x265_filterPixelToShort_64x48_avx2;
+ p.pu[LUMA_64x64].convert_p2s = x265_filterPixelToShort_64x64_avx2;
if ((cpuMask & X265_CPU_BMI1) && (cpuMask & X265_CPU_BMI2))
p.findPosLast = x265_findPosLast_x64;
diff -r 5e1261b63eae -r 5aa94ce59d5b source/common/x86/ipfilter8.asm
--- a/source/common/x86/ipfilter8.asm Tue Apr 07 18:05:41 2015 +0530
+++ b/source/common/x86/ipfilter8.asm Tue Apr 07 18:36:17 2015 +0530
@@ -8433,6 +8433,127 @@
P2S_H_64xN 32
P2S_H_64xN 48
+;-----------------------------------------------------------------------------
+; void filterPixelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int16_t dstStride)
+;-----------------------------------------------------------------------------
+%macro P2S_H_64xN_avx2 1
+INIT_YMM avx2
+cglobal filterPixelToShort_64x%1, 3, 7, 6
+ mov r3d, r3m
+ add r3d, r3d
+ lea r4, [r3 * 3]
+ lea r5, [r1 * 3]
+
+ ; load height
+ mov r6d, %1/4
+
+ ; load constant
+ vbroadcasti128 m4, [pb_128]
+ vbroadcasti128 m5, [tab_c_64_n64]
+
+.loop:
+ movu m0, [r0]
+ punpcklbw m1, m0, m4
+ punpckhbw m2, m0, m4
+ pmaddubsw m1, m5
+ pmaddubsw m2, m5
+ vperm2i128 m3, m1, m2, q0301
+ vperm2i128 m2, m1, m2, q0200
+
+ movu [r2 + r3 * 0], m2
+ movu [r2 + r3 * 0 + 32], m3
+
+ movu m0, [r0 + r1]
+ punpcklbw m1, m0, m4
+ punpckhbw m2, m0, m4
+ pmaddubsw m1, m5
+ pmaddubsw m2, m5
+ vperm2i128 m3, m1, m2, q0301
+ vperm2i128 m2, m1, m2, q0200
+
+ movu [r2 + r3 * 1], m2
+ movu [r2 + r3 * 1 + 32], m3
+
+ movu m0, [r0 + r1 * 2]
+ punpcklbw m1, m0, m4
+ punpckhbw m2, m0, m4
+ pmaddubsw m1, m5
+ pmaddubsw m2, m5
+ vperm2i128 m3, m1, m2, q0301
+ vperm2i128 m2, m1, m2, q0200
+
+ movu [r2 + r3 * 2], m2
+ movu [r2 + r3 * 2 + 32], m3
+
+ movu m0, [r0 + r5]
+ punpcklbw m1, m0, m4
+ punpckhbw m2, m0, m4
+ pmaddubsw m1, m5
+ pmaddubsw m2, m5
+ vperm2i128 m3, m1, m2, q0301
+ vperm2i128 m2, m1, m2, q0200
+
+ movu [r2 + r4], m2
+ movu [r2 + r4 + 32], m3
+
+ add r0, 32
+
+ movu m0, [r0]
+ punpcklbw m1, m0, m4
+ punpckhbw m2, m0, m4
+ pmaddubsw m1, m5
+ pmaddubsw m2, m5
+ vperm2i128 m3, m1, m2, q0301
+ vperm2i128 m2, m1, m2, q0200
+
+ movu [r2 + r3 * 0 + 64], m2
+ movu [r2 + r3 * 0 + 96], m3
+
+ movu m0, [r0 + r1]
+ punpcklbw m1, m0, m4
+ punpckhbw m2, m0, m4
+ pmaddubsw m1, m5
+ pmaddubsw m2, m5
+ vperm2i128 m3, m1, m2, q0301
+ vperm2i128 m2, m1, m2, q0200
+
+ movu [r2 + r3 * 1 + 64], m2
+ movu [r2 + r3 * 1 + 96], m3
+
+ movu m0, [r0 + r1 * 2]
+ punpcklbw m1, m0, m4
+ punpckhbw m2, m0, m4
+ pmaddubsw m1, m5
+ pmaddubsw m2, m5
+ vperm2i128 m3, m1, m2, q0301
+ vperm2i128 m2, m1, m2, q0200
+
+ movu [r2 + r3 * 2 + 64], m2
+ movu [r2 + r3 * 2 + 96], m3
+
+ movu m0, [r0 + r5]
+ punpcklbw m1, m0, m4
+ punpckhbw m2, m0, m4
+ pmaddubsw m1, m5
+ pmaddubsw m2, m5
+ vperm2i128 m3, m1, m2, q0301
+ vperm2i128 m2, m1, m2, q0200
+
+ movu [r2 + r4 + 64], m2
+ movu [r2 + r4 + 96], m3
+
+ lea r0, [r0 + r1 * 4 - 32]
+ lea r2, [r2 + r3 * 4]
+
+ dec r6d
+ jnz .loop
+ RET
+%endmacro
+P2S_H_64xN_avx2 64
+P2S_H_64xN_avx2 16
+P2S_H_64xN_avx2 32
+P2S_H_64xN_avx2 48
+
;-----------------------------------------------------------------------------
; void filterPixelToShort(pixel src, intptr_t srcStride, int16_t dst, int16_t dstStride)
;-----------------------------------------------------------------------------
diff -r 5e1261b63eae -r 5aa94ce59d5b source/common/x86/ipfilter8.h
--- a/source/common/x86/ipfilter8.h Tue Apr 07 18:05:41 2015 +0530
+++ b/source/common/x86/ipfilter8.h Tue Apr 07 18:36:17 2015 +0530
@@ -680,6 +680,10 @@
void x265_filterPixelToShort_32x24_avx2(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
void x265_filterPixelToShort_32x32_avx2(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
void x265_filterPixelToShort_32x64_avx2(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
+void x265_filterPixelToShort_64x16_avx2(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
+void x265_filterPixelToShort_64x32_avx2(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
+void x265_filterPixelToShort_64x48_avx2(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
+void x265_filterPixelToShort_64x64_avx2(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
#undef LUMA_FILTERS
#undef LUMA_SP_FILTERS
#undef LUMA_SS_FILTERS
More information about the x265-devel
mailing list