[x265] [PATCH] asm: assembly code for x265_pixel_satd_32x8
yuvaraj at multicorewareinc.com
yuvaraj at multicorewareinc.com
Mon Nov 11 12:32:31 CET 2013
# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1384169486 -19800
# Mon Nov 11 17:01:26 2013 +0530
# Node ID 1130addeb3b8f9daffa9c0f5d2852e1812169c02
# Parent 9642b5b6500b5553ab3ce70a360aaaadad5d7234
asm: assembly code for x265_pixel_satd_32x8
diff -r 9642b5b6500b -r 1130addeb3b8 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Nov 11 17:41:32 2013 +0800
+++ b/source/common/x86/asm-primitives.cpp Mon Nov 11 17:01:26 2013 +0530
@@ -72,7 +72,6 @@
p.satd[LUMA_32x16] = cmp<32, 16, 16, 16, x265_pixel_satd_16x16_ ## cpu>; \
p.satd[LUMA_32x24] = cmp<32, 24, 16, 8, x265_pixel_satd_16x8_ ## cpu>; \
p.satd[LUMA_24x32] = cmp<24, 32, 8, 16, x265_pixel_satd_8x16_ ## cpu>; \
- p.satd[LUMA_32x8] = cmp<32, 8, 16, 8, x265_pixel_satd_16x8_ ## cpu>; \
p.satd[LUMA_64x64] = cmp<64, 64, 16, 16, x265_pixel_satd_16x16_ ## cpu>; \
p.satd[LUMA_64x32] = cmp<64, 32, 16, 16, x265_pixel_satd_16x16_ ## cpu>; \
p.satd[LUMA_32x64] = cmp<32, 64, 16, 16, x265_pixel_satd_16x16_ ## cpu>; \
@@ -294,7 +293,7 @@
INIT8(satd, _mmx2);
HEVC_SATD(mmx2);
p.satd[LUMA_12x16] = cmp<12, 16, 4, 16, x265_pixel_satd_4x16_mmx2>;
-
+ p.satd[LUMA_32x8] = x265_pixel_satd_32x8_sse2;
p.sa8d[BLOCK_4x4] = x265_pixel_satd_4x4_mmx2;
p.frame_init_lowres_core = x265_frame_init_lowres_core_mmx2;
@@ -375,7 +374,7 @@
p.satd[LUMA_16x4] = x265_pixel_satd_16x4_sse2;
p.satd[LUMA_16x32] = x265_pixel_satd_16x32_sse2;
p.satd[LUMA_16x64] = x265_pixel_satd_16x64_sse2;
- p.satd[LUMA_16x12] = cmp<16, 12, 16, 4, x265_pixel_satd_16x4_sse2>;
+ p.satd[LUMA_16x12] = x265_pixel_satd_16x12_sse2;
#endif
p.frame_init_lowres_core = x265_frame_init_lowres_core_sse2;
@@ -444,6 +443,7 @@
{
p.satd[LUMA_4x16] = x265_pixel_satd_4x16_sse4;
p.satd[LUMA_12x16] = cmp<12, 16, 4, 16, x265_pixel_satd_4x16_sse4>;
+ p.satd[LUMA_32x8] = x265_pixel_satd_32x8_sse4;
p.sa8d[BLOCK_8x8] = x265_pixel_sa8d_8x8_sse4;
p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_sse4;
SA8D_INTER_FROM_BLOCK(sse4);
@@ -468,6 +468,7 @@
p.frame_init_lowres_core = x265_frame_init_lowres_core_avx;
p.satd[LUMA_4x16] = x265_pixel_satd_4x16_avx;
p.satd[LUMA_12x16] = cmp<12, 16, 4, 16, x265_pixel_satd_4x16_avx>;
+ p.satd[LUMA_32x8] = x265_pixel_satd_32x8_avx;
p.sa8d[BLOCK_8x8] = x265_pixel_sa8d_8x8_avx;
p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_avx;
SA8D_INTER_FROM_BLOCK(avx);
diff -r 9642b5b6500b -r 1130addeb3b8 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Mon Nov 11 17:41:32 2013 +0800
+++ b/source/common/x86/pixel-a.asm Mon Nov 11 17:01:26 2013 +0530
@@ -1745,7 +1745,46 @@
call pixel_satd_16x4_internal
call pixel_satd_16x4_internal
SATD_END_SSE2 m10
+
+cglobal pixel_satd_32x8, 4,8,8 ;if WIN64 && notcpuflag(avx)
+ SATD_START_SSE2 m10, m7
+ mov r6, r0
+ mov r7, r2
+%if vertical
+ mova m7, [pw_00ff]
+%endif
+ call pixel_satd_16x4_internal
+ call pixel_satd_16x4_internal
+ lea r0, [r6 + 16]
+ lea r2, [r7 + 16]
+ call pixel_satd_16x4_internal
+ call pixel_satd_16x4_internal
+ SATD_END_SSE2 m10
+
%else
+
+cglobal pixel_satd_32x8, 4,6,8 ;if !WIN64
+ SATD_START_SSE2 m6, m7
+ BACKUP_POINTERS
+ call pixel_satd_8x8_internal
+ RESTORE_AND_INC_POINTERS
+ BACKUP_POINTERS
+ call pixel_satd_8x8_internal
+ RESTORE_AND_INC_POINTERS
+%if WIN64 == 0
+ add r0, 8*SIZEOF_PIXEL
+ add r2, 8*SIZEOF_PIXEL
+%endif
+ BACKUP_POINTERS
+ call pixel_satd_8x8_internal
+ RESTORE_AND_INC_POINTERS
+%if WIN64 == 0
+ add r0, 16*SIZEOF_PIXEL
+ add r2, 16*SIZEOF_PIXEL
+%endif
+ call pixel_satd_8x8_internal
+ SATD_END_SSE2 m6
+
cglobal pixel_satd_16x8, 4,6,8
SATD_START_SSE2 m6, m7
BACKUP_POINTERS
More information about the x265-devel
mailing list