[x265] [PATCH] asm: luma_vpp[8x8] in avx2: improve 701c->423c
divya at multicorewareinc.com
divya at multicorewareinc.com
Fri Nov 14 10:15:31 CET 2014
# HG changeset patch
# User Divya Manivannan
# Date 1415956508 -19800
# Fri Nov 14 14:45:08 2014 +0530
# Node ID 8f67293bcf0f6536b243fbc2868ace6925c78db5
# Parent 64314f8061f1742c3c1278b455b15554c174b7b8
asm: luma_vpp[8x8] in avx2: improve 701c->423c
diff -r 64314f8061f1 -r 8f67293bcf0f source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu Nov 13 18:40:35 2014 +0900
+++ b/source/common/x86/asm-primitives.cpp Fri Nov 14 14:45:08 2014 +0530
@@ -1800,6 +1800,7 @@
#endif
p.luma_hpp[LUMA_4x4] = x265_interp_8tap_horiz_pp_4x4_avx2;
p.luma_vpp[LUMA_4x4] = x265_interp_8tap_vert_pp_4x4_avx2;
+ p.luma_vpp[LUMA_8x8] = x265_interp_8tap_vert_pp_8x8_avx2;
}
#endif // if HIGH_BIT_DEPTH
}
diff -r 64314f8061f1 -r 8f67293bcf0f source/common/x86/ipfilter8.asm
--- a/source/common/x86/ipfilter8.asm Thu Nov 13 18:40:35 2014 +0900
+++ b/source/common/x86/ipfilter8.asm Fri Nov 14 14:45:08 2014 +0530
@@ -3601,6 +3601,195 @@
RET
%endmacro
+INIT_YMM avx2
+%if ARCH_X86_64 == 1
+cglobal interp_8tap_vert_pp_8x8, 4,6,14
+ mov r4d, r4m
+ shl r4d, 6
+
+%ifdef PIC
+ lea r5, [tab_LumaCoeffVer]
+ lea r5, [r5 + r4]
+ vbroadcasti128 m0, [r5]
+ vbroadcasti128 m7, [r5 + 16]
+ vbroadcasti128 m8, [r5 + 32]
+ vbroadcasti128 m9, [r5 + 48]
+%else
+ lea r5, [tab_LumaCoeffVer + r4]
+ vbroadcasti128 m0, [r5]
+ vbroadcasti128 m7, [r5 + 16]
+ vbroadcasti128 m8, [r5 + 32]
+ vbroadcasti128 m9, [r5 + 48]
+%endif
+
+ lea r5, [r1 * 3]
+ sub r0, r5
+ mov r4d, 2
+
+ movq xm1, [r0] ; m1 = row 0
+ movq xm2, [r0 + r1] ; m2 = row 1
+ punpcklbw xm1, xm2 ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+ movq xm3, [r0 + r1 * 2] ; m3 = row 2
+ punpcklbw xm2, xm3 ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+ vinserti128 m5, m1, xm2, 1 ; m5 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+ movq xm4, [r0 + r5] ; m4 = row 3
+ punpcklbw xm3, xm4 ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+ lea r0, [r0 + r1 * 4]
+ movq xm1, [r0] ; m1 = row 4
+ punpcklbw xm4, xm1 ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+ vinserti128 m6, m3, xm4, 1 ; m6 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+ movq xm2, [r0 + r1] ; m2 = row 5
+ punpcklbw xm1, xm2 ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+ movq xm4, [r0 + r1 * 2] ; m7 = row 6
+ punpcklbw xm2, xm4 ; m2 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+ vinserti128 m1, m1, xm2, 1 ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+ movq xm3, [r0 + r5] ; m3 = row 7
+ punpcklbw xm4, xm3 ; m4 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+ lea r0, [r0 + r1 * 4]
+ movq xm2, [r0] ; m7 = row 8
+ punpcklbw xm3, xm2 ; m3 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
+ vinserti128 m4, m4, xm3, 1 ; m4 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+ movq xm3, [r0 + r1] ; m3 = row 9
+ punpcklbw xm2, xm3 ; m2 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+ movq xm10, [r0 + r1 * 2] ; m10 = row 10
+ punpcklbw xm3, xm10 ; m3 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90]
+ vinserti128 m2, m2, xm3, 1 ; m2 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90] - [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+ movq xm3, [r0 + r5] ; m3 = row 11
+ punpcklbw xm10, xm3 ; m10 = [B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0]
+ lea r0, [r0 + r1 * 4]
+ movq xm11, [r0] ; m11 = row 12
+ punpcklbw xm3, xm11 ; m3 = [C7 B7 C6 B6 C5 B5 C4 B4 C3 B3 C2 B2 C1 B1 C0 B0]
+ vinserti128 m10, m10, xm3, 1 ; m10 = [C7 B7 C6 B6 C5 B5 C4 B4 C3 B3 C2 B2 C1 B1 C0 B0] - [B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0]
+ movq xm3, [r0 + r1] ; m3 = row 13
+ punpcklbw xm11, xm3 ; m11 = [D7 C7 D6 C6 D5 C5 D4 C4 D3 C3 D2 C2 D1 C1 D0 C0]
+ movq xm12, [r0 + r1 * 2] ; m12 = row 14
+ punpcklbw xm3, xm12 ; m3 = [E7 D7 E6 D6 E5 D5 E4 D4 E3 D3 E2 D2 E1 D1 E0 D0]
+ vinserti128 m11, m11, xm3, 1 ; m11 = [E7 D7 E6 D6 E5 D5 E4 D4 E3 D3 E2 D2 E1 D1 E0 D0] - [D7 C7 D6 C6 D5 C5 D4 C4 D3 C3 D2 C2 D1 C1 D0 C0]
+
+.loop
+ pmaddubsw m5, m0
+ pmaddubsw m3, m6, m7
+ paddw m12, m5, m3
+ pmaddubsw m6, m0
+ pmaddubsw m3, m1, m8
+ movu m5, m1
+ pmaddubsw m1, m7
+ paddw m13, m6, m1
+ pmaddubsw m1, m4, m9
+ paddw m3, m1
+ paddw m12, m3
+ movu m6, m4
+ pmaddubsw m4, m8
+ movu m1, m2
+ pmaddubsw m2, m9
+ paddw m4, m2
+ paddw m13, m4
+ movu m4, m10
+ movu m2, m11
+ pmulhrsw m12, [pw_512] ; m12 = word: row 0, row 1
+ pmulhrsw m13, [pw_512] ; m13 = word: row 2, row 3
+ packuswb m12, m13
+ vextracti128 xm13, m12, 1
+ movq [r2], xm12
+ movq [r2 + r3], xm13
+ lea r2, [r2 + r3 * 2]
+ movhps [r2], xm12
+ movhps [r2 + r3], xm13
+
+ lea r2, [r2 + r3 * 2]
+ dec r4d
+ jnz .loop
+ RET
+
+%else
+cglobal interp_8tap_vert_pp_8x8, 4,7,8
+ mov r4d, r4m
+ shl r4d, 6
+ lea r5, [r1 * 3]
+ sub r0, r5
+
+%ifdef PIC
+ lea r5, [tab_LumaCoeffVer]
+ lea r5, [r5 + r4]
+%else
+ lea r5, [tab_LumaCoeffVer + r4]
+%endif
+
+ mov r4d, 2
+
+.loop
+ movq xm1, [r0] ; m1 = row 0
+ movq xm2, [r0 + r1] ; m2 = row 1
+ punpcklbw xm1, xm2 ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+ lea r6, [r0 + r1 * 2]
+ movq xm3, [r6] ; m3 = row 2
+ punpcklbw xm2, xm3 ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+ movq xm4, [r6 + r1] ; m4 = row 3
+ punpcklbw xm3, xm4 ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+ vinserti128 m5, m1, xm3, 1 ; m5 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
+ pmaddubsw m5, [r5]
+ lea r6, [r6 + r1 * 2]
+ movq xm1, [r6] ; m1 = row 4
+ punpcklbw xm4, xm1 ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+ vinserti128 m2, m2, xm4, 1 ; m2 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
+ pmaddubsw m2, [r5]
+ movq xm6, [r6 + r1] ; m6 = row 5
+ punpcklbw xm1, xm6 ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+ vinserti128 m3, m3, xm1, 1 ; m3 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
+ pmaddubsw m3, [r5]
+ lea r6, [r6 + r1 * 2]
+ movq xm0, [r6] ; m0 = row 6
+ punpcklbw xm6, xm0 ; m6 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+ vinserti128 m4, m4, xm6, 1 ; m4 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
+ pmaddubsw m4, [r5]
+ movq xm7, [r6 + r1] ; m7 = row 7
+ punpcklbw xm0, xm7 ; m0 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+ vinserti128 m1, m1, xm0, 1 ; m1 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
+ pmaddubsw m1, [r5 + 32]
+ paddw m5, m1
+ lea r6, [r6 + r1 * 2]
+ movq xm1, [r6] ; m1 = row 8
+ punpcklbw xm7, xm1 ; m7 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
+ vinserti128 m6, m6, xm7, 1 ; m6 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
+ pmaddubsw m6, [r5 + 32]
+ paddw m2, m6
+ movq xm6, [r6 + r1] ; m6 = row 9
+ punpcklbw xm1, xm6 ; m1 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
+ vinserti128 m0, m0, xm1, 1 ; m0 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
+ pmaddubsw m0, [r5 + 32]
+ paddw m3, m0
+ movq xm0, [r6 + r1 * 2] ; m0 = row 10
+ punpcklbw xm6, xm0 ; m6 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90]
+ vinserti128 m7, m7, xm6, 1 ; m7 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90] - [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
+ pmaddubsw m7, [r5 + 32]
+ paddw m4, m7
+
+ vextracti128 xm1, m5, 1
+ paddw xm5, xm1
+ vextracti128 xm1, m2, 1
+ paddw xm2, xm1
+ vextracti128 xm1, m3, 1
+ paddw xm3, xm1
+ vextracti128 xm1, m4, 1
+ paddw xm4, xm1
+ vinserti128 m5, m5, xm3, 1
+ vinserti128 m2, m2, xm4, 1
+ pmulhrsw m5, [pw_512] ; m5 = word: row 0, row 1
+ pmulhrsw m2, [pw_512] ; m2 = word: row 2, row 3
+ packuswb m5, m2
+ vextracti128 xm2, m5, 1
+ movq [r2], xm5
+ movhps [r2 + r3], xm5
+ lea r2, [r2 + r3 * 2]
+ movq [r2], xm2
+ movhps [r2 + r3], xm2
+ lea r2, [r2 + r3 * 2]
+ lea r0, [r0 + r1 * 4]
+ dec r4d
+ jnz .loop
+ RET
+%endif
+
;-------------------------------------------------------------------------------------------------------------
; void interp_8tap_vert_pp_8x4(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
;-------------------------------------------------------------------------------------------------------------
More information about the x265-devel
mailing list