[x265] [PATCH] avx2: 'integral16h' asm code -> 2.25x faster than 'C' version
jayashri at multicorewareinc.com
jayashri at multicorewareinc.com
Wed Jun 14 13:58:17 CEST 2017
# HG changeset patch
# User Jayashri Murugan <jayashri at multicorewareinc.com>
# Date 1496985803 -19800
# Fri Jun 09 10:53:23 2017 +0530
# Node ID 68a1e026fdd21c944c5e94a9da2de205ca63df6c
# Parent 6d1fea9c34c5a1b7cb062ce3126df7de50170419
avx2: 'integral16h' asm code -> 2.25x faster than 'C' version
BIT_DEPTH = 8 : integral_init16h 2.25x 816.07 1837.95
BIT_DEPTH = 10|12 : integral_init16h 1.25x 1489.40 1856.91
diff -r 6d1fea9c34c5 -r 68a1e026fdd2 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Jun 07 16:56:09 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp Fri Jun 09 10:53:23 2017 +0530
@@ -2167,6 +2167,7 @@
p.integral_inith[INTEGRAL_4] = PFX(integral4h_avx2);
p.integral_inith[INTEGRAL_8] = PFX(integral8h_avx2);
p.integral_inith[INTEGRAL_12] = PFX(integral12h_avx2);
+ p.integral_inith[INTEGRAL_16] = PFX(integral16h_avx2);
/* TODO: This kernel needs to be modified to work with HIGH_BIT_DEPTH only
p.planeClipAndMax = PFX(planeClipAndMax_avx2); */
@@ -3715,6 +3716,7 @@
p.integral_inith[INTEGRAL_4] = PFX(integral4h_avx2);
p.integral_inith[INTEGRAL_8] = PFX(integral8h_avx2);
p.integral_inith[INTEGRAL_12] = PFX(integral12h_avx2);
+ p.integral_inith[INTEGRAL_16] = PFX(integral16h_avx2);
}
#endif
diff -r 6d1fea9c34c5 -r 68a1e026fdd2 source/common/x86/seaintegral.asm
--- a/source/common/x86/seaintegral.asm Wed Jun 07 16:56:09 2017 +0530
+++ b/source/common/x86/seaintegral.asm Fri Jun 09 10:53:23 2017 +0530
@@ -587,14 +587,169 @@
RET
%endif
+%macro INTEGRAL_SIXTEEN_HORIZONTAL_16 0
+ pmovzxbw m0, [r1]
+ pmovzxbw m1, [r1 + 1]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 2]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 3]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 4]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 5]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 6]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 7]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 8]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 9]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 10]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 11]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 12]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 13]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 14]
+ paddw m0, m1
+ pmovzxbw m1, [r1 + 15]
+ paddw m0, m1
+%endmacro
+
+%macro INTEGRAL_SIXTEEN_HORIZONTAL_8 0
+ pmovzxbw xm0, [r1]
+ pmovzxbw xm1, [r1 + 1]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 2]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 3]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 4]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 5]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 6]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 7]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 8]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 9]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 10]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 11]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 12]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 13]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 14]
+ paddw xm0, xm1
+ pmovzxbw xm1, [r1 + 15]
+ paddw xm0, xm1
+%endmacro
+
+%macro INTEGRAL_SIXTEEN_HORIZONTAL_8_HBD 0
+ pmovzxwd m0, [r1]
+ pmovzxwd m1, [r1 + 2]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 4]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 6]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 8]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 10]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 12]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 14]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 16]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 18]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 20]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 22]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 24]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 26]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 28]
+ paddd m0, m1
+ pmovzxwd m1, [r1 + 30]
+ paddd m0, m1
+%endmacro
+
;-----------------------------------------------------------------------------
;static void integral_init16h_c(uint32_t *sum, pixel *pix, intptr_t stride)
;-----------------------------------------------------------------------------
INIT_YMM avx2
-cglobal integral16h, 3, 3, 0
-
+%if HIGH_BIT_DEPTH
+cglobal integral16h, 3, 4, 3
+ lea r3, [4 * r2]
+ sub r0, r3
+ sub r2, 16 ;stride - 16
+
+.loop:
+ INTEGRAL_SIXTEEN_HORIZONTAL_8_HBD
+ movu m1, [r0]
+ paddd m0, m1
+ movu [r0 + r3], m0
+ add r1, 16
+ add r0, 32
+ sub r2, 8
+ jnz .loop
RET
+%else
+cglobal integral16h, 3, 5, 3
+ lea r3, [4 * r2]
+ sub r0, r3
+ sub r2, 16 ;stride - 16
+ mov r4, r2
+ shr r4, 4
+
+.loop_16:
+ INTEGRAL_SIXTEEN_HORIZONTAL_16
+ vperm2i128 m2, m0, m0, 1
+ pmovzxwd m2, xm2
+ pmovzxwd m0, xm0
+ movu m1, [r0]
+ paddd m0, m1
+ movu [r0 + r3], m0
+ movu m1, [r0 + 32]
+ paddd m2, m1
+ movu [r0 + r3 + 32], m2
+ add r1, 16
+ add r0, 64
+ sub r2, 16
+ sub r4, 1
+ jnz .loop_16
+ cmp r2, 8
+ je .loop_8
+ jmp .end
+
+.loop_8:
+ INTEGRAL_SIXTEEN_HORIZONTAL_8
+ pmovzxwd m0, xm0
+ movu m1, [r0]
+ paddd m0, m1
+ movu [r0 + r3], m0
+ jmp .end
+
+.end
+ RET
+%endif
+
;-----------------------------------------------------------------------------
;static void integral_init24h_c(uint32_t *sum, pixel *pix, intptr_t stride)
;-----------------------------------------------------------------------------
More information about the x265-devel
mailing list