[x265] [PATCH] asm: avx2 8bpp code for sub_ps[8x8](3.40x), sub_ps[16x16](7.65x),
rajesh at multicorewareinc.com
rajesh at multicorewareinc.com
Wed Mar 11 12:01:02 CET 2015
# HG changeset patch
# User Rajesh Paulraj<rajesh at multicorewareinc.com>
# Date 1426071586 -19800
# Wed Mar 11 16:29:46 2015 +0530
# Node ID 4ed5dee69387acacbd25f35347611990f9bf285c
# Parent 8f148ac8dbe4b68e88ceff84f40e33b29e888dc9
asm: avx2 8bpp code for sub_ps[8x8](3.40x), sub_ps[16x16](7.65x),
sub_ps[32x32](8.37x), sub_ps[64x64](9.27x)
diff -r 8f148ac8dbe4 -r 4ed5dee69387 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue Mar 10 15:46:36 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Wed Mar 11 16:29:46 2015 +0530
@@ -1419,6 +1419,11 @@
p.cu[BLOCK_32x32].add_ps = x265_pixel_add_ps_32x32_avx2;
p.cu[BLOCK_64x64].add_ps = x265_pixel_add_ps_64x64_avx2;
+ p.cu[BLOCK_8x8].sub_ps = x265_pixel_sub_ps_8x8_avx2;
+ p.cu[BLOCK_16x16].sub_ps = x265_pixel_sub_ps_16x16_avx2;
+ p.cu[BLOCK_32x32].sub_ps = x265_pixel_sub_ps_32x32_avx2;
+ p.cu[BLOCK_64x64].sub_ps = x265_pixel_sub_ps_64x64_avx2;
+
p.pu[LUMA_16x4].pixelavg_pp = x265_pixel_avg_16x4_avx2;
p.pu[LUMA_16x8].pixelavg_pp = x265_pixel_avg_16x8_avx2;
p.pu[LUMA_16x12].pixelavg_pp = x265_pixel_avg_16x12_avx2;
diff -r 8f148ac8dbe4 -r 4ed5dee69387 source/common/x86/pixel-util8.asm
--- a/source/common/x86/pixel-util8.asm Tue Mar 10 15:46:36 2015 +0530
+++ b/source/common/x86/pixel-util8.asm Wed Mar 11 16:29:46 2015 +0530
@@ -4198,6 +4198,43 @@
PIXELSUB_PS_W8_H4 8, 16
%endif
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_8x8(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixel_sub_ps_8x8, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+ add r1, r1
+%rep 2
+ pmovzxbw xm0, [r2]
+ pmovzxbw xm1, [r2 + r4]
+ pmovzxbw xm2, [r3]
+ pmovzxbw xm3, [r3 + r5]
+
+ psubw xm0, xm2
+ psubw xm1, xm3
+
+ movu [r0], xm0
+ movu [r0 + r1], xm1
+
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r5 * 2]
+
+ pmovzxbw xm0, [r2]
+ pmovzxbw xm1, [r2 + r4]
+ pmovzxbw xm2, [r3]
+ pmovzxbw xm3, [r3 + r5]
+
+ psubw xm0, xm2
+ psubw xm1, xm3
+
+ movu [r0 + r1 * 2], xm0
+ lea r0, [r0 + r1 * 2]
+ movu [r0 + r1], xm1
+ lea r0, [r0 + r1 * 2]
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r5 * 2]
+%endrep
+ RET
;-----------------------------------------------------------------------------
; void pixel_sub_ps_16x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
@@ -4335,6 +4372,43 @@
PIXELSUB_PS_W16_H4 16, 32
%endif
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_16x16(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixel_sub_ps_16x16, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+ add r1, r1
+
+%rep 4
+ pmovzxbw m0, [r2]
+ pmovzxbw m1, [r3]
+ pmovzxbw m2, [r2 + r4]
+ pmovzxbw m3, [r3 + r5]
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r5 * 2]
+
+ psubw m0, m1
+ psubw m2, m3
+
+ movu [r0], m0
+ movu [r0 + r1], m2
+
+ pmovzxbw m0, [r2]
+ pmovzxbw m1, [r3]
+ pmovzxbw m2, [r2 + r4]
+ pmovzxbw m3, [r3 + r5]
+
+ psubw m0, m1
+ psubw m2, m3
+
+ movu [r0 + r1 * 2], m0
+ lea r0, [r0 + r1 * 2]
+ movu [r0 + r1], m2
+ lea r0, [r0 + r1 * 2]
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r5 * 2]
+%endrep
+ RET
;-----------------------------------------------------------------------------
; void pixel_sub_ps_32x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
@@ -4469,6 +4543,133 @@
PIXELSUB_PS_W32_H2 32, 64
%endif
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_32x32(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixel_sub_ps_32x32, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+ add r1, r1
+
+%rep 4
+ pmovzxbw m0, [r2]
+ pmovzxbw m1, [r2 + 16]
+ pmovzxbw m2, [r3]
+ pmovzxbw m3, [r3 + 16]
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0], m0
+ movu [r0 + 32], m1
+
+ pmovzxbw m0, [r2 + r4]
+ pmovzxbw m1, [r2 + r4 + 16]
+ pmovzxbw m2, [r3 + r5]
+ pmovzxbw m3, [r3 + r5 + 16]
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0 + r1], m0
+ movu [r0 + r1 + 32], m1
+
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2 + r4]
+ pmovzxbw m1, [r2 + r4 + 16]
+ pmovzxbw m2, [r3 + r5]
+ pmovzxbw m3, [r3 + r5 + 16]
+
+ psubw m0, m2
+ psubw m1, m3
+ lea r0, [r0 + r1 * 2]
+
+ movu [r0 ], m0
+ movu [r0 + 32], m1
+
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2 + r4]
+ pmovzxbw m1, [r2 + r4 + 16]
+ pmovzxbw m2, [r3 + r5]
+ pmovzxbw m3, [r3 + r5 + 16]
+
+
+ psubw m0, m2
+ psubw m1, m3
+ lea r0, [r0 + r1]
+
+ movu [r0 ], m0
+ movu [r0 + 32], m1
+
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2 + r4]
+ pmovzxbw m1, [r2 + r4 + 16]
+ pmovzxbw m2, [r3 + r5]
+ pmovzxbw m3, [r3 + r5 + 16]
+
+ psubw m0, m2
+ psubw m1, m3
+ lea r0, [r0 + r1]
+
+ movu [r0 ], m0
+ movu [r0 + 32], m1
+
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2 + r4]
+ pmovzxbw m1, [r2 + r4 + 16]
+ pmovzxbw m2, [r3 + r5]
+ pmovzxbw m3, [r3 + r5 + 16]
+
+ psubw m0, m2
+ psubw m1, m3
+ lea r0, [r0 + r1]
+
+ movu [r0 ], m0
+ movu [r0 + 32], m1
+
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2 + r4]
+ pmovzxbw m1, [r2 + r4 + 16]
+ pmovzxbw m2, [r3 + r5]
+ pmovzxbw m3, [r3 + r5 + 16]
+
+ psubw m0, m2
+ psubw m1, m3
+ lea r0, [r0 + r1]
+
+ movu [r0 ], m0
+ movu [r0 + 32], m1
+
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2 + r4]
+ pmovzxbw m1, [r2 + r4 + 16]
+ pmovzxbw m2, [r3 + r5]
+ pmovzxbw m3, [r3 + r5 + 16]
+
+ psubw m0, m2
+ psubw m1, m3
+ lea r0, [r0 + r1]
+
+ movu [r0 ], m0
+ movu [r0 + 32], m1
+
+ lea r0, [r0 + r1]
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r5 * 2]
+
+%endrep
+ RET
;-----------------------------------------------------------------------------
; void pixel_sub_ps_64x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
@@ -4688,6 +4889,112 @@
PIXELSUB_PS_W64_H2 64, 64
%endif
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_64x64(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal pixel_sub_ps_64x64, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+ add r1, r1
+
+%rep 16
+ pmovzxbw m0, [r2]
+ pmovzxbw m1, [r2 + 16]
+ pmovzxbw m2, [r2 + 32]
+ pmovzxbw m3, [r2 + 48]
+
+ pmovzxbw m4, [r3]
+ pmovzxbw m5, [r3 + 16]
+ pmovzxbw m6, [r3 + 32]
+ pmovzxbw m7, [r3 + 48]
+
+ psubw m0, m4
+ psubw m1, m5
+ psubw m2, m6
+ psubw m3, m7
+
+ movu [r0], m0
+ movu [r0 + 32], m1
+ movu [r0 + 64], m2
+ movu [r0 + 96], m3
+
+ lea r0, [r0 + r1]
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2]
+ pmovzxbw m1, [r2 + 16]
+ pmovzxbw m2, [r2 + 32]
+ pmovzxbw m3, [r2 + 48]
+
+ pmovzxbw m4, [r3]
+ pmovzxbw m5, [r3 + 16]
+ pmovzxbw m6, [r3 + 32]
+ pmovzxbw m7, [r3 + 48]
+
+ psubw m0, m4
+ psubw m1, m5
+ psubw m2, m6
+ psubw m3, m7
+
+ movu [r0], m0
+ movu [r0 + 32], m1
+ movu [r0 + 64], m2
+ movu [r0 + 96], m3
+
+ lea r0, [r0 + r1]
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2]
+ pmovzxbw m1, [r2 + 16]
+ pmovzxbw m2, [r2 + 32]
+ pmovzxbw m3, [r2 + 48]
+
+ pmovzxbw m4, [r3]
+ pmovzxbw m5, [r3 + 16]
+ pmovzxbw m6, [r3 + 32]
+ pmovzxbw m7, [r3 + 48]
+
+ psubw m0, m4
+ psubw m1, m5
+ psubw m2, m6
+ psubw m3, m7
+
+ movu [r0], m0
+ movu [r0 + 32], m1
+ movu [r0 + 64], m2
+ movu [r0 + 96], m3
+
+ lea r0, [r0 + r1]
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+
+ pmovzxbw m0, [r2]
+ pmovzxbw m1, [r2 + 16]
+ pmovzxbw m2, [r2 + 32]
+ pmovzxbw m3, [r2 + 48]
+
+ pmovzxbw m4, [r3]
+ pmovzxbw m5, [r3 + 16]
+ pmovzxbw m6, [r3 + 32]
+ pmovzxbw m7, [r3 + 48]
+
+ psubw m0, m4
+ psubw m1, m5
+ psubw m2, m6
+ psubw m3, m7
+
+ movu [r0], m0
+ movu [r0 + 32], m1
+ movu [r0 + 64], m2
+ movu [r0 + 96], m3
+
+ lea r0, [r0 + r1]
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r5]
+%endrep
+
+ RET
;=============================================================================
; variance
diff -r 8f148ac8dbe4 -r 4ed5dee69387 source/common/x86/pixel.h
--- a/source/common/x86/pixel.h Tue Mar 10 15:46:36 2015 +0530
+++ b/source/common/x86/pixel.h Wed Mar 11 16:29:46 2015 +0530
@@ -255,6 +255,11 @@
void x265_pixel_add_ps_32x32_avx2(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
void x265_pixel_add_ps_64x64_avx2(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
+void x265_pixel_sub_ps_8x8_avx2(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
+void x265_pixel_sub_ps_16x16_avx2(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
+void x265_pixel_sub_ps_32x32_avx2(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
+void x265_pixel_sub_ps_64x64_avx2(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
+
#undef DECL_PIXELS
#undef DECL_HEVC_SSD
#undef DECL_X1
More information about the x265-devel
mailing list