[x265] [PATCH 3 of 3] asm:Fix sse_ss [32x32] & [64x64] for main12 SSE2
Ramya Sriraman
ramya at multicorewareinc.com
Fri Oct 23 06:24:33 CEST 2015
# HG changeset patch
# User Ramya Sriraman <ramya at multicorewareinc.com>
# Date 1445244995 -19800
# Mon Oct 19 14:26:35 2015 +0530
# Node ID 18a794060c07d47b930271be5ac867fb84f8d002
# Parent 76ebb28a96cd1620dc0229487d697d7bf634c6d6
asm: Fix sse_ss [32x32] & [64x64] for main12 SSE2
diff -r 76ebb28a96cd -r 18a794060c07 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Sep 30 11:22:16 2015
+0530
+++ b/source/common/x86/asm-primitives.cpp Mon Oct 19 14:26:35 2015
+0530
@@ -1006,10 +1006,12 @@
p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sse_pp =
(pixel_sse_t)PFX(pixel_ssd_ss_4x8_mmx2);
p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp =
(pixel_sse_t)PFX(pixel_ssd_ss_8x16_sse2);
p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp =
(pixel_sse_t)PFX(pixel_ssd_ss_16x32_sse2);
-#if X265_DEPTH <= 10
- p.cu[BLOCK_4x4].sse_ss = PFX(pixel_ssd_ss_4x4_mmx2);
- ALL_LUMA_CU(sse_ss, pixel_ssd_ss, sse2);
-#endif
+ p.cu[BLOCK_4x4].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_
ss_4x4_mmx2);
+ p.cu[BLOCK_8x8].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_
ss_8x8_sse2);
+ p.cu[BLOCK_16x16].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_
ss_16x16_sse2);
+ p.cu[BLOCK_32x32].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_
ss_32x32_sse2);
+ p.cu[BLOCK_64x64].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_
ss_64x64_sse2);
+
p.cu[BLOCK_4x4].dct = PFX(dct4_sse2);
p.cu[BLOCK_8x8].dct = PFX(dct8_sse2);
p.cu[BLOCK_4x4].idct = PFX(idct4_sse2);
diff -r 76ebb28a96cd -r 18a794060c07 source/common/x86/ssd-a.asm
--- a/source/common/x86/ssd-a.asm Wed Sep 30 11:22:16 2015 +0530
+++ b/source/common/x86/ssd-a.asm Mon Oct 19 14:26:35 2015 +0530
@@ -183,6 +183,155 @@
RET
%endmacro
+%macro SSD_ONE_SS_32 0
+cglobal pixel_ssd_ss_32x32, 4,5,8
+ add r1d, r1d
+ add r3d, r3d
+ pxor m5, m5
+ pxor m6, m6
+ mov r4d, 2
+
+.iterate:
+ mov r5d, 16
+ pxor m4, m4
+ pxor m7, m7
+.loop:
+ movu m0, [r0]
+ movu m1, [r0 + mmsize]
+ movu m2, [r2]
+ movu m3, [r2 + mmsize]
+ psubw m0, m2
+ psubw m1, m3
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ paddd m4, m0
+ paddd m7, m1
+ movu m0, [r0 + 2 * mmsize]
+ movu m1, [r0 + 3 * mmsize]
+ movu m2, [r2 + 2 * mmsize]
+ movu m3, [r2 + 3 * mmsize]
+ psubw m0, m2
+ psubw m1, m3
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ paddd m4, m0
+ paddd m7, m1
+
+ add r0, r1
+ add r2, r3
+
+ dec r5d
+ jnz .loop
+
+ mova m0, m4
+ pxor m1, m1
+ punpckldq m0, m1
+ punpckhdq m4, m1
+ paddq m5, m0
+ paddq m6, m4
+
+ mova m0, m7
+ punpckldq m0, m1
+ punpckhdq m7, m1
+ paddq m5, m0
+ paddq m6, m7
+
+ dec r4d
+ jnz .iterate
+
+ paddq m5, m6
+ movhlps m2, m5
+ paddq m5, m2
+ movq rax, m5
+ RET
+%endmacro
+
+%macro SSD_ONE_SS_64 0
+cglobal pixel_ssd_ss_64x64, 4,6,8
+ add r1d, r1d
+ add r3d, r3d
+ pxor m5, m5
+ pxor m6, m6
+ mov r5d, 8
+
+.iterate:
+ pxor m4, m4
+ pxor m7, m7
+ mov r4d, 8
+
+.loop:
+ ;----process 1st half a row----
+ movu m0, [r0]
+ movu m1, [r0 + mmsize]
+ movu m2, [r2]
+ movu m3, [r2 + mmsize]
+ psubw m0, m2
+ psubw m1, m3
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ paddd m4, m0
+ paddd m7, m1
+ movu m0, [r0 + 2 * mmsize]
+ movu m1, [r0 + 3 * mmsize]
+ movu m2, [r2 + 2 * mmsize]
+ movu m3, [r2 + 3 * mmsize]
+ psubw m0, m2
+ psubw m1, m3
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ paddd m4, m0
+ paddd m7, m1
+ ;----process 2nd half a row----
+ movu m0, [r0 + 4 * mmsize]
+ movu m1, [r0 + 5 * mmsize]
+ movu m2, [r2 + 4 * mmsize]
+ movu m3, [r2 + 5 * mmsize]
+ psubw m0, m2
+ psubw m1, m3
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ paddd m4, m0
+ paddd m7, m1
+ movu m0, [r0 + 6 * mmsize]
+ movu m1, [r0 + 7 * mmsize]
+ movu m2, [r2 + 6 * mmsize]
+ movu m3, [r2 + 7 * mmsize]
+ psubw m0, m2
+ psubw m1, m3
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ paddd m4, m0
+ paddd m7, m1
+
+ add r0, r1
+ add r2, r3
+
+ dec r4d
+ jnz .loop
+
+ mova m0, m4
+ pxor m1, m1
+ punpckldq m0, m1
+ punpckhdq m4, m1
+ paddq m5, m0
+ paddq m6, m4
+
+ mova m0, m7
+ punpckldq m0, m1
+ punpckhdq m7, m1
+ paddq m5, m0
+ paddq m6, m7
+
+ dec r5
+ jne .iterate
+
+ paddq m5, m6
+ movhlps m2, m5
+ paddq m5, m2
+ movq rax, m5
+ RET
+%endmacro
+
%macro SSD_TWO 2
cglobal pixel_ssd_ss_%1x%2, 4,7,8
FIX_STRIDES r1, r3
@@ -250,7 +399,7 @@
pmaddwd m3, m3
pmaddwd m4, m4
paddd m1, m2
- paddd m3, m4
+ paddd m3, m4
paddd m0, m1
paddd m0, m3
movu m1, [r0 + r1 + 64]
@@ -551,19 +700,23 @@
SSD_ONE 32, 8
SSD_ONE 32, 16
SSD_ONE 32, 24
-SSD_ONE 32, 32
+
%if BIT_DEPTH <= 10
SSD_ONE 32, 64
+ SSD_ONE 32, 32
+ SSD_TWO 64, 64
%else
SSD_ONE_32
+ SSD_ONE_SS_32
+ SSD_ONE_SS_64
%endif
SSD_TWO 48, 64
SSD_TWO 64, 16
SSD_TWO 64, 32
SSD_TWO 64, 48
-SSD_TWO 64, 64
+
INIT_YMM avx2
SSD_ONE 16, 8
SSD_ONE 16, 32
Thank you
Regards
Ramya
On Thu, Oct 15, 2015 at 11:42 PM, chen <chenm003 at 163.com> wrote:
> In your code, you use m4 as temporary/intermedia sum, but its dynamic
> range:
> every element 12 + 12 = 24 bits
> up to 64 iterate: +5 bits
> 64 elements in 4 dword register: +4 bits
>
> total = 24 + 5 + 4 = 33 bits
>
> above means when you use two of intrmedia sum registers, you just
> need qword sum in last stage.
>
> At 2015-10-15 20:01:40,"Ramya Sriraman" <ramya at multicorewareinc.com>
> wrote:
>
> # HG changeset patch
> # User Ramya Sriraman <ramya at multicorewareinc.com>
> # Date 1444216029 -19800
> # Wed Oct 07 16:37:09 2015 +0530
> # Node ID 6b2c146d0bcf28a19e7defe977a8f063240a3905
> # Parent 0ea631d6f87d4fc056da26ff94c6ffa1120e69bd
> asm:Fix sse_ss [32x32] & [64x64] main12 SSE2
>
> diff -r 0ea631d6f87d -r 6b2c146d0bcf source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp Wed Oct 07 13:42:06 2015 +0530
> +++ b/source/common/x86/asm-primitives.cpp Wed Oct 07 16:37:09 2015 +0530
> @@ -1006,10 +1006,12 @@
> p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sse_pp =
> (pixel_sse_t)PFX(pixel_ssd_ss_4x8_mmx2);
> p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp =
> (pixel_sse_t)PFX(pixel_ssd_ss_8x16_sse2);
> p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp =
> (pixel_sse_t)PFX(pixel_ssd_ss_16x32_sse2);
> -#if X265_DEPTH <= 10
> - p.cu[BLOCK_4x4].sse_ss = PFX(pixel_ssd_ss_4x4_mmx2);
> - ALL_LUMA_CU(sse_ss, pixel_ssd_ss, sse2);
> -#endif
> + p.cu[BLOCK_4x4].sse_ss =
> (pixel_sse_ss_t)PFX(pixel_ssd_ss_4x4_mmx2);
> + p.cu[BLOCK_8x8].sse_ss =
> (pixel_sse_ss_t)PFX(pixel_ssd_ss_8x8_sse2);
> + p.cu[BLOCK_16x16].sse_ss =
> (pixel_sse_ss_t)PFX(pixel_ssd_ss_16x16_sse2);
> + p.cu[BLOCK_32x32].sse_ss =
> (pixel_sse_ss_t)PFX(pixel_ssd_ss_32x32_sse2);
> + p.cu[BLOCK_64x64].sse_ss =
> (pixel_sse_ss_t)PFX(pixel_ssd_ss_64x64_sse2);
> +
> p.cu[BLOCK_4x4].dct = PFX(dct4_sse2);
> p.cu[BLOCK_8x8].dct = PFX(dct8_sse2);
> p.cu[BLOCK_4x4].idct = PFX(idct4_sse2);
> diff -r 0ea631d6f87d -r 6b2c146d0bcf source/common/x86/ssd-a.asm
> --- a/source/common/x86/ssd-a.asm Wed Oct 07 13:42:06 2015 +0530
> +++ b/source/common/x86/ssd-a.asm Wed Oct 07 16:37:09 2015 +0530
> @@ -183,6 +183,208 @@
> RET
> %endmacro
>
> +%macro SSD_ONE_SS_32 0
> +cglobal pixel_ssd_ss_32x32, 4,5,7
> + add r1d, r1d
> + add r3d, r3d
> + pxor m5, m5
> + pxor m6, m6
> + mov r4d, 8
> +.iterate:
> + pxor m4, m4
> +
> + movu m0, [r0]
> + movu m1, [r0 + mmsize]
> + movu m2, [r2]
> + movu m3, [r2 + mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> + movu m0, [r0 + 2 * mmsize]
> + movu m1, [r0 + 3 * mmsize]
> + movu m2, [r2 + 2 * mmsize]
> + movu m3, [r2 + 3 * mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> +
> + add r0, r1
> + add r2, r3
> +
> + movu m0, [r0]
> + movu m1, [r0 + mmsize]
> + movu m2, [r2]
> + movu m3, [r2 + mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> + movu m0, [r0 + 2 * mmsize]
> + movu m1, [r0 + 3 * mmsize]
> + movu m2, [r2 + 2 * mmsize]
> + movu m3, [r2 + 3 * mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> +
> + add r0, r1
> + add r2, r3
> +
> + movu m0, [r0]
> + movu m1, [r0 + mmsize]
> + movu m2, [r2]
> + movu m3, [r2 + mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> + movu m0, [r0 + 2 * mmsize]
> + movu m1, [r0 + 3 * mmsize]
> + movu m2, [r2 + 2 * mmsize]
> + movu m3, [r2 + 3 * mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> +
> + add r0, r1
> + add r2, r3
> +
> + movu m0, [r0]
> + movu m1, [r0 + mmsize]
> + movu m2, [r2]
> + movu m3, [r2 + mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> + movu m0, [r0 + 2 * mmsize]
> + movu m1, [r0 + 3 * mmsize]
> + movu m2, [r2 + 2 * mmsize]
> + movu m3, [r2 + 3 * mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> +
> + add r0, r1
> + add r2, r3
> +
> + mova m0, m4
> + pxor m1, m1
> + punpckldq m0, m1
> + punpckhdq m4, m1
> + paddq m5, m0
> + paddq m6, m4
> +
> + dec r4d
> + jnz .iterate
> +
> + paddq m5, m6
> + movhlps m2, m5
> + paddq m5, m2
> + movq rax, m5
> + RET
> +%endmacro
> +
> +%macro SSD_ONE_SS_64 0
> +cglobal pixel_ssd_ss_64x64, 4,6,7
> + add r1d, r1d
> + add r3d, r3d
> + pxor m5, m5
> + pxor m6, m6
> + mov r5d, 16
> +
> +.iterate:
> + pxor m4, m4
> + mov r4d, 4
> +
> +.loop:
> + ;----process 1st half a row----
> + movu m0, [r0]
> + movu m1, [r0 + mmsize]
> + movu m2, [r2]
> + movu m3, [r2 + mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> + movu m0, [r0 + 2 * mmsize]
> + movu m1, [r0 + 3 * mmsize]
> + movu m2, [r2 + 2 * mmsize]
> + movu m3, [r2 + 3 * mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> + ;----process 2nd half a row----
> + movu m0, [r0 + 4 * mmsize]
> + movu m1, [r0 + 5 * mmsize]
> + movu m2, [r2 + 4 * mmsize]
> + movu m3, [r2 + 5 * mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> + movu m0, [r0 + 6 * mmsize]
> + movu m1, [r0 + 7 * mmsize]
> + movu m2, [r2 + 6 * mmsize]
> + movu m3, [r2 + 7 * mmsize]
> + psubw m0, m2
> + psubw m1, m3
> + pmaddwd m0, m0
> + pmaddwd m1, m1
> + paddd m4, m0
> + paddd m4, m1
> +
> + add r0, r1
> + add r2, r3
> +
> + dec r4d
> + jnz .loop
> +
> + mova m0, m4
> + pxor m1, m1
> + punpckldq m0, m1
> + punpckhdq m4, m1
> + paddq m5, m0
> + paddq m6, m4
> +
> + dec r5
> + jne .iterate
> +
> + paddq m5, m6
> + movhlps m2, m5
> + paddq m5, m2
> + movq rax, m5
> + RET
> +%endmacro
> +
> %macro SSD_TWO 2
> cglobal pixel_ssd_ss_%1x%2, 4,7,8
> FIX_STRIDES r1, r3
> @@ -525,19 +727,20 @@
> SSD_ONE 32, 8
> SSD_ONE 32, 16
> SSD_ONE 32, 24
> -SSD_ONE 32, 32
>
> %if BIT_DEPTH <= 10
> SSD_ONE 32, 64
> + SSD_ONE 32, 32
> + SSD_TWO 64, 64
> %else
> SSD_ONE_32
> + SSD_ONE_SS_32
> + SSD_ONE_SS_64
> %endif
> -
> SSD_TWO 48, 64
> SSD_TWO 64, 16
> SSD_TWO 64, 32
> SSD_TWO 64, 48
> -SSD_TWO 64, 64
> INIT_YMM avx2
> SSD_ONE 16, 8
> SSD_ONE 16, 16
>
>
>
> Thank you
> Regards
> Ramya
>
> On Wed, Oct 14, 2015 at 8:54 PM, chen <chenm003 at 163.com> wrote:
>
>> just said it is right.
>> use series CALL may reduce performance, especially CALL in a loop, I
>> suggest copy ssd_ss_32x4 code to there.
>>
>>
>> At 2015-10-14 17:36:37,ramya at multicorewareinc.com wrote:
>> ># HG changeset patch
>> ># User Ramya Sriraman <ramya at multicorewareinc.com>
>> ># Date 1444216029 -19800
>> ># Wed Oct 07 16:37:09 2015 +0530
>> ># Node ID 6597371dcf4ffe45590c915738544e4acd25def6
>> ># Parent 7f984cbb0a15ed6b5ffc7ea843ce6a5380b31179
>> >asm:Fix sse_ss [32x32] & [64x64] for main12 SSE2
>> >
>> >diff -r 7f984cbb0a15 -r 6597371dcf4f source/common/x86/asm-primitives.cpp
>> >--- a/source/common/x86/asm-primitives.cpp Wed Oct 07 13:42:41 2015 +0530
>> >+++ b/source/common/x86/asm-primitives.cpp Wed Oct 07 16:37:09 2015 +0530
>> >@@ -1006,10 +1006,12 @@
>> > p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_4x8_mmx2);
>> > p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_8x16_sse2);
>> > p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = (pixel_sse_t)PFX(pixel_ssd_ss_16x32_sse2);
>> >-#if X265_DEPTH <= 10
>> >- p.cu[BLOCK_4x4].sse_ss = PFX(pixel_ssd_ss_4x4_mmx2);
>> >- ALL_LUMA_CU(sse_ss, pixel_ssd_ss, sse2);
>> >-#endif
>> >+ p.cu[BLOCK_4x4].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_ss_4x4_mmx2);
>> >+ p.cu[BLOCK_8x8].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_ss_8x8_sse2);
>> >+ p.cu[BLOCK_16x16].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_ss_16x16_sse2);
>> >+ p.cu[BLOCK_32x32].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_ss_32x32_sse2);
>> >+ p.cu[BLOCK_64x64].sse_ss = (pixel_sse_ss_t)PFX(pixel_ssd_ss_64x64_sse2);
>> >+
>> > p.cu[BLOCK_4x4].dct = PFX(dct4_sse2);
>> > p.cu[BLOCK_8x8].dct = PFX(dct8_sse2);
>> > p.cu[BLOCK_4x4].idct = PFX(idct4_sse2);
>> >diff -r 7f984cbb0a15 -r 6597371dcf4f source/common/x86/ssd-a.asm
>> >--- a/source/common/x86/ssd-a.asm Wed Oct 07 13:42:41 2015 +0530
>> >+++ b/source/common/x86/ssd-a.asm Wed Oct 07 16:37:09 2015 +0530
>> >@@ -183,6 +183,153 @@
>> > RET
>> > %endmacro
>> >
>> >+;Function to find ssd for 32x4 block, sse2, 12 bit depth
>> >+;Defined sepeartely to be called from SSD_ONE_SS_32 macro
>> >+INIT_XMM sse2
>> >+cglobal ssd_ss_32x4
>> >+ pxor m4, m4
>> >+ mov r4d, 4
>> >+.loop:
>> >+ movu m0, [r0]
>> >+ movu m1, [r0 + mmsize]
>> >+ movu m2, [r2]
>> >+ movu m3, [r2 + mmsize]
>> >+ psubw m0, m2
>> >+ psubw m1, m3
>> >+ pmaddwd m0, m0
>> >+ pmaddwd m1, m1
>> >+ paddd m4, m0
>> >+ paddd m4, m1
>> >+ movu m0, [r0 + 2 * mmsize]
>> >+ movu m1, [r0 + 3 * mmsize]
>> >+ movu m2, [r2 + 2 * mmsize]
>> >+ movu m3, [r2 + 3 * mmsize]
>> >+ psubw m0, m2
>> >+ psubw m1, m3
>> >+ pmaddwd m0, m0
>> >+ pmaddwd m1, m1
>> >+ paddd m4, m0
>> >+ paddd m4, m1
>> >+
>> >+ add r0, r1
>> >+ add r2, r3
>> >+
>> >+ dec r4d
>> >+ jnz .loop
>> >+
>> >+ mova m0, m4
>> >+ pxor m1, m1
>> >+ punpckldq m0, m1
>> >+ punpckhdq m4, m1
>> >+ paddq m5, m0
>> >+ paddq m6, m4
>> >+ ret
>> >+
>> >+%macro SSD_ONE_SS_32 0
>> >+cglobal pixel_ssd_ss_32x32, 4,5,7
>> >+ add r1d, r1d
>> >+ add r3d, r3d
>> >+ pxor m5, m5
>> >+ pxor m6, m6
>> >+
>> >+ call ssd_ss_32x4
>> >+ call ssd_ss_32x4
>> >+ call ssd_ss_32x4
>> >+ call ssd_ss_32x4
>> >+ call ssd_ss_32x4
>> >+ call ssd_ss_32x4
>> >+ call ssd_ss_32x4
>> >+ call ssd_ss_32x4
>> >+
>> >+ paddq m5, m6
>> >+ movhlps m2, m5
>> >+ paddq m5, m2
>> >+ movq rax, m5
>> >+ RET
>> >+%endmacro
>> >+
>> >+;Function to find ssd for 64x4 block, sse2, 12 bit depth
>> >+;Defined sepeartely to be called from SSD_ONE_SS_64 macro
>> >+INIT_XMM sse2
>> >+cglobal ssd_ss_64x4
>> >+ pxor m4, m4
>> >+ mov r4d, 4
>> >+.loop:
>> >+ ;----process 1st half a row----
>> >+ movu m0, [r0]
>> >+ movu m1, [r0 + mmsize]
>> >+ movu m2, [r2]
>> >+ movu m3, [r2 + mmsize]
>> >+ psubw m0, m2
>> >+ psubw m1, m3
>> >+ pmaddwd m0, m0
>> >+ pmaddwd m1, m1
>> >+ paddd m4, m0
>> >+ paddd m4, m1
>> >+ movu m0, [r0 + 2 * mmsize]
>> >+ movu m1, [r0 + 3 * mmsize]
>> >+ movu m2, [r2 + 2 * mmsize]
>> >+ movu m3, [r2 + 3 * mmsize]
>> >+ psubw m0, m2
>> >+ psubw m1, m3
>> >+ pmaddwd m0, m0
>> >+ pmaddwd m1, m1
>> >+ paddd m4, m0
>> >+ paddd m4, m1
>> >+ ;----process 2nd half a row----
>> >+ movu m0, [r0 + 4 * mmsize]
>> >+ movu m1, [r0 + 5 * mmsize]
>> >+ movu m2, [r2 + 4 * mmsize]
>> >+ movu m3, [r2 + 5 * mmsize]
>> >+ psubw m0, m2
>> >+ psubw m1, m3
>> >+ pmaddwd m0, m0
>> >+ pmaddwd m1, m1
>> >+ paddd m4, m0
>> >+ paddd m4, m1
>> >+ movu m0, [r0 + 6 * mmsize]
>> >+ movu m1, [r0 + 7 * mmsize]
>> >+ movu m2, [r2 + 6 * mmsize]
>> >+ movu m3, [r2 + 7 * mmsize]
>> >+ psubw m0, m2
>> >+ psubw m1, m3
>> >+ pmaddwd m0, m0
>> >+ pmaddwd m1, m1
>> >+ paddd m4, m0
>> >+ paddd m4, m1
>> >+
>> >+ add r0, r1
>> >+ add r2, r3
>> >+ dec r4d
>> >+ jnz .loop
>> >+
>> >+ mova m0, m4
>> >+ pxor m1, m1
>> >+ punpckldq m0, m1
>> >+ punpckhdq m4, m1
>> >+ paddq m5, m0
>> >+ paddq m6, m4
>> >+ ret
>> >+
>> >+%macro SSD_ONE_SS_64 0
>> >+cglobal pixel_ssd_ss_64x64, 4,6,7
>> >+ add r1d, r1d
>> >+ add r3d, r3d
>> >+ pxor m5, m5
>> >+ pxor m6, m6
>> >+ mov r5d, 16
>> >+.iterate:
>> >+ call ssd_ss_64x4
>> >+ dec r5
>> >+ jne .iterate
>> >+
>> >+ paddq m5, m6
>> >+ movhlps m2, m5
>> >+ paddq m5, m2
>> >+ movq rax, m5
>> >+ RET
>> >+%endmacro
>> >+
>> > %macro SSD_TWO 2
>> > cglobal pixel_ssd_ss_%1x%2,
>> 4,7,8
>> > FIX_STRIDES r1, r3
>> >@@ -551,19 +698,22 @@
>> > SSD_ONE 32, 8
>> > SSD_ONE 32, 16
>> > SSD_ONE 32, 24
>> >-SSD_ONE 32, 32
>> >+
>> >
>> > %if BIT_DEPTH <= 10
>> > SSD_ONE 32, 64
>> >+ SSD_ONE 32, 32
>> >+ SSD_TWO 64, 64
>> > %else
>> > SSD_ONE_32
>> >+ SSD_ONE_SS_32
>> >+ SSD_ONE_SS_64
>> > %endif
>> >
>> > SSD_TWO 48, 64
>> > SSD_TWO 64, 16
>> > SSD_TWO 64, 32
>> > SSD_TWO 64, 48
>> >-SSD_TWO 64, 64
>> > INIT_YMM avx2
>> > SSD_ONE 16, 8
>> > SSD_ONE 16, 32
>> >_______________________________________________
>> >x265-devel mailing list
>> >x265-devel at videolan.org
>> >https://mailman.videolan.org/listinfo/x265-devel
>>
>>
>> _______________________________________________
>> x265-devel mailing list
>> x265-devel at videolan.org
>> https://mailman.videolan.org/listinfo/x265-devel
>>
>>
>
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20151023/cc68c812/attachment-0001.html>
More information about the x265-devel
mailing list