[x265] [PATCH 1 of 4] cleanup: remove unused code in pixel-a.asm
Min Chen
chenm003 at 163.com
Mon Nov 25 07:37:59 CET 2013
# HG changeset patch
# User Min Chen <chenm003 at 163.com>
# Date 1385283236 -28800
# Node ID 513f564ba3602391fa9689b305ac92be6fb78d03
# Parent 464af047f7b12a0a0e105d7550d454f30cf16eea
cleanup: remove unused code in pixel-a.asm
diff -r 464af047f7b1 -r 513f564ba360 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Sun Nov 24 14:52:31 2013 +0800
+++ b/source/common/x86/pixel-a.asm Sun Nov 24 16:53:56 2013 +0800
@@ -60,64 +60,8 @@
mask_1100: times 2 dd 0, -1
pb_pppm: times 4 db 1,1,1,-1
deinterleave_shuf: db 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15
-intrax3_shuf: db 7,6,7,6,5,4,5,4,3,2,3,2,1,0,1,0
-
-intrax9a_ddlr1: db 6, 7, 8, 9, 7, 8, 9,10, 4, 5, 6, 7, 3, 4, 5, 6
-intrax9a_ddlr2: db 8, 9,10,11, 9,10,11,12, 2, 3, 4, 5, 1, 2, 3, 4
-intrax9a_hdu1: db 15, 4, 5, 6,14, 3,15, 4,14, 2,13, 1,13, 1,12, 0
-intrax9a_hdu2: db 13, 2,14, 3,12, 1,13, 2,12, 0,11,11,11,11,11,11
-intrax9a_vrl1: db 10,11,12,13, 3, 4, 5, 6,11,12,13,14, 5, 6, 7, 8
-intrax9a_vrl2: db 2,10,11,12, 1, 3, 4, 5,12,13,14,15, 6, 7, 8, 9
-intrax9a_vh1: db 6, 7, 8, 9, 6, 7, 8, 9, 4, 4, 4, 4, 3, 3, 3, 3
-intrax9a_vh2: db 6, 7, 8, 9, 6, 7, 8, 9, 2, 2, 2, 2, 1, 1, 1, 1
-intrax9a_dc: db 1, 2, 3, 4, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1
-intrax9a_lut: db 0x60,0x68,0x80,0x00,0x08,0x20,0x40,0x28,0x48,0,0,0,0,0,0,0
-pw_s01234567: dw 0x8000,0x8001,0x8002,0x8003,0x8004,0x8005,0x8006,0x8007
-pw_s01234657: dw 0x8000,0x8001,0x8002,0x8003,0x8004,0x8006,0x8005,0x8007
-intrax9_edge: db 0, 0, 1, 2, 3, 7, 8, 9,10,11,12,13,14,15,15,15
-
-intrax9b_ddlr1: db 6, 7, 8, 9, 4, 5, 6, 7, 7, 8, 9,10, 3, 4, 5, 6
-intrax9b_ddlr2: db 8, 9,10,11, 2, 3, 4, 5, 9,10,11,12, 1, 2, 3, 4
-intrax9b_hdu1: db 15, 4, 5, 6,14, 2,13, 1,14, 3,15, 4,13, 1,12, 0
-intrax9b_hdu2: db 13, 2,14, 3,12, 0,11,11,12, 1,13, 2,11,11,11,11
-intrax9b_vrl1: db 10,11,12,13,11,12,13,14, 3, 4, 5, 6, 5, 6, 7, 8
-intrax9b_vrl2: db 2,10,11,12,12,13,14,15, 1, 3, 4, 5, 6, 7, 8, 9
-intrax9b_vh1: db 6, 7, 8, 9, 4, 4, 4, 4, 6, 7, 8, 9, 3, 3, 3, 3
-intrax9b_vh2: db 6, 7, 8, 9, 2, 2, 2, 2, 6, 7, 8, 9, 1, 1, 1, 1
-intrax9b_edge2: db 6, 7, 8, 9, 6, 7, 8, 9, 4, 3, 2, 1, 4, 3, 2, 1
-intrax9b_v1: db 0, 1,-1,-1,-1,-1,-1,-1, 4, 5,-1,-1,-1,-1,-1,-1
-intrax9b_v2: db 2, 3,-1,-1,-1,-1,-1,-1, 6, 7,-1,-1,-1,-1,-1,-1
-intrax9b_lut: db 0x60,0x64,0x80,0x00,0x04,0x20,0x40,0x24,0x44,0,0,0,0,0,0,0
ALIGN 32
-intra8x9_h1: db 7, 7, 7, 7, 7, 7, 7, 7, 5, 5, 5, 5, 5, 5, 5, 5
-intra8x9_h2: db 6, 6, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4
-intra8x9_h3: db 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1
-intra8x9_h4: db 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0
-intra8x9_ddl1: db 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 9,10
-intra8x9_ddl2: db 2, 3, 4, 5, 6, 7, 8, 9, 4, 5, 6, 7, 8, 9,10,11
-intra8x9_ddl3: db 5, 6, 7, 8, 9,10,11,12, 7, 8, 9,10,11,12,13,14
-intra8x9_ddl4: db 6, 7, 8, 9,10,11,12,13, 8, 9,10,11,12,13,14,15
-intra8x9_vl1: db 0, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8
-intra8x9_vl2: db 1, 2, 3, 4, 5, 6, 7, 8, 2, 3, 4, 5, 6, 7, 8, 9
-intra8x9_vl3: db 2, 3, 4, 5, 6, 7, 8, 9, 3, 4, 5, 6, 7, 8, 9,10
-intra8x9_vl4: db 3, 4, 5, 6, 7, 8, 9,10, 4, 5, 6, 7, 8, 9,10,11
-intra8x9_ddr1: db 8, 9,10,11,12,13,14,15, 6, 7, 8, 9,10,11,12,13
-intra8x9_ddr2: db 7, 8, 9,10,11,12,13,14, 5, 6, 7, 8, 9,10,11,12
-intra8x9_ddr3: db 4, 5, 6, 7, 8, 9,10,11, 2, 3, 4, 5, 6, 7, 8, 9
-intra8x9_ddr4: db 3, 4, 5, 6, 7, 8, 9,10, 1, 2, 3, 4, 5, 6, 7, 8
-intra8x9_vr1: db 8, 9,10,11,12,13,14,15, 7, 8, 9,10,11,12,13,14
-intra8x9_vr2: db 8, 9,10,11,12,13,14,15, 6, 8, 9,10,11,12,13,14
-intra8x9_vr3: db 5, 7, 8, 9,10,11,12,13, 3, 5, 7, 8, 9,10,11,12
-intra8x9_vr4: db 4, 6, 8, 9,10,11,12,13, 2, 4, 6, 8, 9,10,11,12
-intra8x9_hd1: db 3, 8, 9,10,11,12,13,14, 1, 6, 2, 7, 3, 8, 9,10
-intra8x9_hd2: db 2, 7, 3, 8, 9,10,11,12, 0, 5, 1, 6, 2, 7, 3, 8
-intra8x9_hd3: db 7, 8, 9,10,11,12,13,14, 3, 4, 5, 6, 7, 8, 9,10
-intra8x9_hd4: db 5, 6, 7, 8, 9,10,11,12, 1, 2, 3, 4, 5, 6, 7, 8
-intra8x9_hu1: db 13,12,11,10, 9, 8, 7, 6, 9, 8, 7, 6, 5, 4, 3, 2
-intra8x9_hu2: db 11,10, 9, 8, 7, 6, 5, 4, 7, 6, 5, 4, 3, 2, 1, 0
-intra8x9_hu3: db 5, 4, 3, 2, 1, 0,15,15, 1, 0,15,15,15,15,15,15
-intra8x9_hu4: db 3, 2, 1, 0,15,15,15,15,15,15,15,15,15,15,15,15
pw_s00112233: dw 0x8000,0x8000,0x8001,0x8001,0x8002,0x8002,0x8003,0x8003
pw_s00001111: dw 0x8000,0x8000,0x8000,0x8000,0x8001,0x8001,0x8001,0x8001
@@ -7387,2099 +7331,8 @@
;=============================================================================
; INTRA SATD
;=============================================================================
-
-%macro HSUMSUB2 8
- pshufd %4, %2, %7
- pshufd %5, %3, %7
- %1 %2, %8
- %1 %6, %8
- paddw %2, %4
- paddw %3, %5
-%endmacro
-
-; intra_sa8d_x3_8x8 and intra_satd_x3_4x4 are obsoleted by x9 on ssse3+,
-; and are only retained for old cpus.
-%macro INTRA_SA8D_SSE2 0
-%if ARCH_X86_64
-;-----------------------------------------------------------------------------
-; void intra_sa8d_x3_8x8( uint8_t *fenc, uint8_t edge[36], int *res )
-;-----------------------------------------------------------------------------
-cglobal intra_sa8d_x3_8x8, 3,3,14
- ; 8x8 hadamard
- pxor m8, m8
- movq m0, [r0+0*FENC_STRIDE]
- movq m1, [r0+1*FENC_STRIDE]
- movq m2, [r0+2*FENC_STRIDE]
- movq m3, [r0+3*FENC_STRIDE]
- movq m4, [r0+4*FENC_STRIDE]
- movq m5, [r0+5*FENC_STRIDE]
- movq m6, [r0+6*FENC_STRIDE]
- movq m7, [r0+7*FENC_STRIDE]
- punpcklbw m0, m8
- punpcklbw m1, m8
- punpcklbw m2, m8
- punpcklbw m3, m8
- punpcklbw m4, m8
- punpcklbw m5, m8
- punpcklbw m6, m8
- punpcklbw m7, m8
-
- HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 8
-
- ABSW2 m8, m9, m2, m3, m2, m3
- ABSW2 m10, m11, m4, m5, m4, m5
- paddusw m8, m10
- paddusw m9, m11
- ABSW2 m10, m11, m6, m7, m6, m7
- ABSW m13, m1, m1
- paddusw m10, m11
- paddusw m8, m9
- paddusw m13, m10
- paddusw m13, m8
-
- ; 1D hadamard of edges
- movq m8, [r1+7]
- movq m9, [r1+16]
- pxor m10, m10
- punpcklbw m8, m10
- punpcklbw m9, m10
- HSUMSUB2 pmullw, m8, m9, m10, m11, m11, q1032, [pw_ppppmmmm]
- HSUMSUB2 pmullw, m8, m9, m10, m11, m11, q2301, [pw_ppmmppmm]
- pshuflw m10, m8, q2301
- pshuflw m11, m9, q2301
- pshufhw m10, m10, q2301
- pshufhw m11, m11, q2301
- pmullw m8, [pw_pmpmpmpm]
- pmullw m11, [pw_pmpmpmpm]
- paddw m8, m10
- paddw m9, m11
-
- ; differences
- paddw m10, m8, m9
- paddw m10, [pw_8]
- pand m10, [sw_f0]
- psllw m10, 2 ; dc
-
- psllw m8, 3 ; left edge
- psubw m8, m0
- psubw m10, m0
- ABSW2 m8, m10, m8, m10, m11, m12 ; 1x8 sum
- paddusw m8, m13
- paddusw m13, m10
- punpcklwd m0, m1
- punpcklwd m2, m3
- punpcklwd m4, m5
- punpcklwd m6, m7
- punpckldq m0, m2
- punpckldq m4, m6
- punpcklqdq m0, m4 ; transpose
- psllw m9, 3 ; top edge
- psrldq m2, m13, 2 ; 8x7 sum
- psubw m0, m9 ; 8x1 sum
- ABSW m0, m0, m9
- paddusw m2, m0
-
- ; 3x HADDW
- movdqa m7, [pw_1]
- pmaddwd m2, m7
- pmaddwd m8, m7
- pmaddwd m13, m7
- punpckhdq m3, m2, m8
- punpckldq m2, m8
- pshufd m5, m13, q3311
- paddd m2, m3
- paddd m5, m13
- punpckhqdq m0, m2, m5
- punpcklqdq m2, m5
- pavgw m0, m2
- pxor m1, m1
- pavgw m0, m1
- movq [r2], m0 ; i8x8_v, i8x8_h
- psrldq m0, 8
- movd [r2+8], m0 ; i8x8_dc
- RET
-%endif ; ARCH_X86_64
-%endmacro ; INTRA_SA8D_SSE2
-
-; in: r0 = fenc
-; out: m0..m3 = hadamard coefs
-INIT_MMX
-cglobal hadamard_load
-; not really a global, but otherwise cycles get attributed to the wrong function in profiling
-%if HIGH_BIT_DEPTH
- mova m0, [r0+0*FENC_STRIDEB]
- mova m1, [r0+1*FENC_STRIDEB]
- mova m2, [r0+2*FENC_STRIDEB]
- mova m3, [r0+3*FENC_STRIDEB]
-%else
- pxor m7, m7
- movd m0, [r0+0*FENC_STRIDE]
- movd m1, [r0+1*FENC_STRIDE]
- movd m2, [r0+2*FENC_STRIDE]
- movd m3, [r0+3*FENC_STRIDE]
- punpcklbw m0, m7
- punpcklbw m1, m7
- punpcklbw m2, m7
- punpcklbw m3, m7
-%endif
- HADAMARD4_2D 0, 1, 2, 3, 4
- SAVE_MM_PERMUTATION
- ret
-
-%macro SCALAR_HADAMARD 4-5 ; direction, offset, 3x tmp
-%ifidn %1, top
-%if HIGH_BIT_DEPTH
- mova %3, [r1+%2*SIZEOF_PIXEL-FDEC_STRIDEB]
-%else
- movd %3, [r1+%2*SIZEOF_PIXEL-FDEC_STRIDEB]
- pxor %5, %5
- punpcklbw %3, %5
-%endif
-%else ; left
-%ifnidn %2, 0
- shl %2d, 5 ; log(FDEC_STRIDEB)
-%endif
- movd %3, [r1+%2*SIZEOF_PIXEL-4+1*FDEC_STRIDEB]
- pinsrw %3, [r1+%2*SIZEOF_PIXEL-2+0*FDEC_STRIDEB], 0
- pinsrw %3, [r1+%2*SIZEOF_PIXEL-2+2*FDEC_STRIDEB], 2
- pinsrw %3, [r1+%2*SIZEOF_PIXEL-2+3*FDEC_STRIDEB], 3
-%if HIGH_BIT_DEPTH == 0
- psrlw %3, 8
-%endif
-%ifnidn %2, 0
- shr %2d, 5
-%endif
-%endif ; direction
-%if cpuflag(ssse3)
- %define %%sign psignw
-%else
- %define %%sign pmullw
-%endif
- pshufw %4, %3, q1032
- %%sign %4, [pw_ppmmppmm]
- paddw %3, %4
- pshufw %4, %3, q2301
- %%sign %4, [pw_pmpmpmpm]
- paddw %3, %4
- psllw %3, 2
- mova [%1_1d+2*%2], %3
-%endmacro
-
-%macro SUM_MM_X3 8 ; 3x sum, 4x tmp, op
- pxor %7, %7
- pshufw %4, %1, q1032
- pshufw %5, %2, q1032
- pshufw %6, %3, q1032
- paddw %1, %4
- paddw %2, %5
- paddw %3, %6
- punpcklwd %1, %7
- punpcklwd %2, %7
- punpcklwd %3, %7
- pshufw %4, %1, q1032
- pshufw %5, %2, q1032
- pshufw %6, %3, q1032
- %8 %1, %4
- %8 %2, %5
- %8 %3, %6
-%endmacro
-
-; in: m1..m3
-; out: m7
-; clobber: m4..m6
-%macro SUM3x4 0
- ABSW2 m4, m5, m1, m2, m1, m2
- ABSW m7, m3, m3
- paddw m4, m5
- paddw m7, m4
-%endmacro
-
-; in: m0..m3 (4x4)
-; out: m0 v, m4 h, m5 dc
-; clobber: m1..m3
-%macro SUM4x3 3 ; dc, left, top
- movq m4, %2
-%ifid %1
- movq m5, %1
-%else
- movd m5, %1
-%endif
- psubw m4, m0
- psubw m5, m0
- punpcklwd m0, m1
- punpcklwd m2, m3
- punpckldq m0, m2 ; transpose
- psubw m0, %3
- ABSW2 m4, m5, m4, m5, m2, m3 ; 1x4 sum
- ABSW m0, m0, m1 ; 4x1 sum
-%endmacro
-
-%macro INTRA_X3_MMX 0
-;-----------------------------------------------------------------------------
-; void intra_satd_x3_4x4( uint8_t *fenc, uint8_t *fdec, int *res )
-;-----------------------------------------------------------------------------
-cglobal intra_satd_x3_4x4, 3,3
-%if UNIX64
- ; stack is 16 byte aligned because abi says so
- %define top_1d rsp-8 ; size 8
- %define left_1d rsp-16 ; size 8
-%else
- ; WIN64: stack is 16 byte aligned because abi says so
- ; X86_32: stack is 16 byte aligned at least in gcc, and we've pushed 3 regs + return address, so it's still aligned
- SUB rsp, 16
- %define top_1d rsp+8
- %define left_1d rsp
-%endif
-
- call hadamard_load
- SCALAR_HADAMARD left, 0, m4, m5
- SCALAR_HADAMARD top, 0, m6, m5, m7
- paddw m6, m4
- pavgw m6, [pw_16]
- pand m6, [sw_f0] ; dc
-
- SUM3x4
- SUM4x3 m6, [left_1d], [top_1d]
- paddw m4, m7
- paddw m5, m7
- movq m1, m5
- psrlq m1, 16 ; 4x3 sum
- paddw m0, m1
-
- SUM_MM_X3 m0, m4, m5, m1, m2, m3, m6, pavgw
- movd [r2+0], m0 ; i4x4_v satd
- movd [r2+4], m4 ; i4x4_h satd
- movd [r2+8], m5 ; i4x4_dc satd
-%if UNIX64 == 0
- ADD rsp, 16
-%endif
- RET
-
-;-----------------------------------------------------------------------------
-; void intra_satd_x3_16x16( uint8_t *fenc, uint8_t *fdec, int *res )
-;-----------------------------------------------------------------------------
-cglobal intra_satd_x3_16x16, 0,5
- %assign stack_pad 120 + ((stack_offset+120+gprsize)&15)
- ; not really needed on x86_64, just shuts up valgrind about storing data below the stack across a function call
- SUB rsp, stack_pad
-%define sums rsp+64 ; size 56
-%define top_1d rsp+32 ; size 32
-%define left_1d rsp ; size 32
- movifnidn r1, r1mp
-
- pxor m7, m7
- mova [sums+ 0], m7
- mova [sums+ 8], m7
- mova [sums+16], m7
-%if HIGH_BIT_DEPTH
- mova [sums+24], m7
- mova [sums+32], m7
- mova [sums+40], m7
- mova [sums+48], m7
-%endif
-
- ; 1D hadamards
- mov r3d, 12
- movd m6, [pw_32]
-.loop_edge:
- SCALAR_HADAMARD left, r3, m0, m1
- SCALAR_HADAMARD top, r3, m1, m2, m3
- pavgw m0, m1
- paddw m6, m0
- sub r3d, 4
- jge .loop_edge
- psrlw m6, 2
- pand m6, [sw_f0] ; dc
-
- ; 2D hadamards
- movifnidn r0, r0mp
- mov r3, -4
-.loop_y:
- mov r4, -4
-.loop_x:
- call hadamard_load
-
- SUM3x4
- SUM4x3 m6, [left_1d+8*(r3+4)], [top_1d+8*(r4+4)]
- pavgw m4, m7
- pavgw m5, m7
- paddw m0, [sums+ 0] ; i16x16_v satd
- paddw m4, [sums+ 8] ; i16x16_h satd
- paddw m5, [sums+16] ; i16x16_dc satd
- mova [sums+ 0], m0
- mova [sums+ 8], m4
- mova [sums+16], m5
-
- add r0, 4*SIZEOF_PIXEL
- inc r4
- jl .loop_x
-%if HIGH_BIT_DEPTH
- psrld m7, m4, 16
- pslld m4, 16
- psrld m4, 16
- paddd m4, m7
- psrld m7, m0, 16
- pslld m0, 16
- psrld m0, 16
- paddd m0, m7
- paddd m4, [sums+32]
- paddd m0, [sums+24]
- mova [sums+32], m4
- mova [sums+24], m0
- pxor m7, m7
- punpckhwd m3, m5, m7
- punpcklwd m5, m7
- paddd m3, [sums+48]
- paddd m5, [sums+40]
- mova [sums+48], m3
- mova [sums+40], m5
- mova [sums+ 0], m7
- mova [sums+ 8], m7
- mova [sums+16], m7
-%endif
- add r0, 4*FENC_STRIDEB-16*SIZEOF_PIXEL
- inc r3
- jl .loop_y
-
-; horizontal sum
- movifnidn r2, r2mp
-%if HIGH_BIT_DEPTH
- mova m1, m5
- paddd m5, m3
- HADDD m5, m7 ; DC satd
- HADDD m4, m7 ; H satd
- HADDD m0, m7 ; the part of V satd that doesn't overlap with DC
- psrld m0, 1
- psrlq m1, 32 ; DC[1]
- paddd m0, m3 ; DC[2]
- psrlq m3, 32 ; DC[3]
- paddd m0, m1
- paddd m0, m3
-%else
- mova m7, m5
- SUM_MM_X3 m0, m4, m5, m3, m1, m2, m6, paddd
- psrld m0, 1
- pslld m7, 16
- psrld m7, 16
- paddd m0, m5
- psubd m0, m7
-%endif
- movd [r2+8], m5 ; i16x16_dc satd
- movd [r2+4], m4 ; i16x16_h satd
- movd [r2+0], m0 ; i16x16_v satd
- ADD rsp, stack_pad
- RET
-
-%if ARCH_X86_64
- %define t0 r6
-%else
- %define t0 r2
-%endif
-
-;-----------------------------------------------------------------------------
-; void intra_satd_x3_8x8c( uint8_t *fenc, uint8_t *fdec, int *res )
-;-----------------------------------------------------------------------------
-cglobal intra_satd_x3_8x8c, 0,6
- ; not really needed on x86_64, just shuts up valgrind about storing data below the stack across a function call
- SUB rsp, 72
-%define sums rsp+48 ; size 24
-%define dc_1d rsp+32 ; size 16
-%define top_1d rsp+16 ; size 16
-%define left_1d rsp ; size 16
- movifnidn r1, r1mp
- pxor m7, m7
- mova [sums+ 0], m7
- mova [sums+ 8], m7
- mova [sums+16], m7
-
- ; 1D hadamards
- mov r3d, 4
-.loop_edge:
- SCALAR_HADAMARD left, r3, m0, m1
- SCALAR_HADAMARD top, r3, m0, m1, m2
- sub r3d, 4
- jge .loop_edge
-
- ; dc
- movzx t0d, word [left_1d+0]
- movzx r3d, word [top_1d+0]
- movzx r4d, word [left_1d+8]
- movzx r5d, word [top_1d+8]
- lea t0d, [t0 + r3 + 16]
- lea r3d, [r4 + r5 + 16]
- shr t0d, 1
- shr r3d, 1
- add r4d, 8
- add r5d, 8
- and t0d, -16 ; tl
- and r3d, -16 ; br
- and r4d, -16 ; bl
- and r5d, -16 ; tr
- mov [dc_1d+ 0], t0d ; tl
- mov [dc_1d+ 4], r5d ; tr
- mov [dc_1d+ 8], r4d ; bl
- mov [dc_1d+12], r3d ; br
- lea r5, [dc_1d]
-
- ; 2D hadamards
- movifnidn r0, r0mp
- movifnidn r2, r2mp
- mov r3, -2
-.loop_y:
- mov r4, -2
-.loop_x:
- call hadamard_load
-
- SUM3x4
- SUM4x3 [r5+4*(r4+2)], [left_1d+8*(r3+2)], [top_1d+8*(r4+2)]
- pavgw m4, m7
- pavgw m5, m7
- paddw m0, [sums+16] ; i4x4_v satd
- paddw m4, [sums+8] ; i4x4_h satd
- paddw m5, [sums+0] ; i4x4_dc satd
- movq [sums+16], m0
- movq [sums+8], m4
- movq [sums+0], m5
-
- add r0, 4*SIZEOF_PIXEL
- inc r4
- jl .loop_x
- add r0, 4*FENC_STRIDEB-8*SIZEOF_PIXEL
- add r5, 8
- inc r3
- jl .loop_y
-
-; horizontal sum
- movq m0, [sums+0]
- movq m1, [sums+8]
- movq m2, [sums+16]
- movq m7, m0
-%if HIGH_BIT_DEPTH
- psrlq m7, 16
- HADDW m7, m3
- SUM_MM_X3 m0, m1, m2, m3, m4, m5, m6, paddd
- psrld m2, 1
- paddd m2, m7
-%else
- psrlq m7, 15
- paddw m2, m7
- SUM_MM_X3 m0, m1, m2, m3, m4, m5, m6, paddd
- psrld m2, 1
-%endif
- movd [r2+0], m0 ; i8x8c_dc satd
- movd [r2+4], m1 ; i8x8c_h satd
- movd [r2+8], m2 ; i8x8c_v satd
- ADD rsp, 72
- RET
-%endmacro ; INTRA_X3_MMX
-
-
-
-%macro PRED4x4_LOWPASS 5
-%ifid %5
- pavgb %5, %2, %3
- pxor %3, %2
- pand %3, [pb_1]
- psubusb %5, %3
- pavgb %1, %4, %5
-%else
- mova %5, %2
- pavgb %2, %3
- pxor %3, %5
- pand %3, [pb_1]
- psubusb %2, %3
- pavgb %1, %4, %2
-%endif
-%endmacro
-
-%macro INTRA_X9_PRED 2
-%if cpuflag(sse4)
- movu m1, [r1-1*FDEC_STRIDE-8]
- pinsrb m1, [r1+3*FDEC_STRIDE-1], 0
- pinsrb m1, [r1+2*FDEC_STRIDE-1], 1
- pinsrb m1, [r1+1*FDEC_STRIDE-1], 2
- pinsrb m1, [r1+0*FDEC_STRIDE-1], 3
-%else
- movd mm0, [r1+3*FDEC_STRIDE-4]
- punpcklbw mm0, [r1+2*FDEC_STRIDE-4]
- movd mm1, [r1+1*FDEC_STRIDE-4]
- punpcklbw mm1, [r1+0*FDEC_STRIDE-4]
- punpckhwd mm0, mm1
- psrlq mm0, 32
- movq2dq m0, mm0
- movu m1, [r1-1*FDEC_STRIDE-8]
- movss m1, m0 ; l3 l2 l1 l0 __ __ __ lt t0 t1 t2 t3 t4 t5 t6 t7
-%endif ; cpuflag
- pshufb m1, [intrax9_edge] ; l3 l3 l2 l1 l0 lt t0 t1 t2 t3 t4 t5 t6 t7 t7 __
- psrldq m0, m1, 1 ; l3 l2 l1 l0 lt t0 t1 t2 t3 t4 t5 t6 t7 t7 __ __
- psrldq m2, m1, 2 ; l2 l1 l0 lt t0 t1 t2 t3 t4 t5 t6 t7 t7 __ __ __
- pavgb m5, m0, m1 ; Gl3 Gl2 Gl1 Gl0 Glt Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 __ __ __ __ __
- mova %2, m1
- PRED4x4_LOWPASS m0, m1, m2, m0, m4 ; Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 __ __ __
- ; ddl ddr
- ; Ft1 Ft2 Ft3 Ft4 Flt Ft0 Ft1 Ft2
- ; Ft2 Ft3 Ft4 Ft5 Fl0 Flt Ft0 Ft1
- ; Ft3 Ft4 Ft5 Ft6 Fl1 Fl0 Flt Ft0
- ; Ft4 Ft5 Ft6 Ft7 Fl2 Fl1 Fl0 Flt
- pshufb m2, m0, [%1_ddlr1] ; a: ddl row0, ddl row1, ddr row0, ddr row1 / b: ddl row0, ddr row0, ddl row1, ddr row1
- pshufb m3, m0, [%1_ddlr2] ; rows 2,3
- ; hd hu
- ; Glt Flt Ft0 Ft1 Gl0 Fl1 Gl1 Fl2
- ; Gl0 Fl0 Glt Flt Gl1 Fl2 Gl2 Fl3
- ; Gl1 Fl1 Gl0 Fl0 Gl2 Fl3 Gl3 Gl3
- ; Gl2 Fl2 Gl1 Fl1 Gl3 Gl3 Gl3 Gl3
- pslldq m0, 5 ; ___ ___ ___ ___ ___ Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
- palignr m7, m5, m0, 5 ; Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Gl3 Gl2 Gl1 Gl0 Glt
- pshufb m6, m7, [%1_hdu1]
- pshufb m7, m7, [%1_hdu2]
- ; vr vl
- ; Gt0 Gt1 Gt2 Gt3 Gt1 Gt2 Gt3 Gt4
- ; Flt Ft0 Ft1 Ft2 Ft1 Ft2 Ft3 Ft4
- ; Fl0 Gt0 Gt1 Gt2 Gt2 Gt3 Gt4 Gt5
- ; Fl1 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
- psrldq m5, 5 ; Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 ...
- palignr m5, m0, 6 ; ___ Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Gt0 Gt1 Gt2 Gt3 Gt4 Gt5
- pshufb m4, m5, [%1_vrl1]
- pshufb m5, m5, [%1_vrl2]
-%endmacro ; INTRA_X9_PRED
-
-%macro INTRA_X9_VHDC 5 ; edge, fenc01, fenc23, tmp, tmp
- pshufb m2, m%1, [intrax9b_vh1]
- pshufb m3, m%1, [intrax9b_vh2]
- mova [pred_buf+0x60], m2
- mova [pred_buf+0x70], m3
- pshufb m%1, [intrax9b_edge2] ; t0 t1 t2 t3 t0 t1 t2 t3 l0 l1 l2 l3 l0 l1 l2 l3
- pmaddubsw m%1, [hmul_4p]
- pshufhw m0, m%1, q2301
- pshuflw m0, m0, q2301
- psignw m%1, [pw_pmpmpmpm]
- paddw m0, m%1
- psllw m0, 2 ; hadamard(top), hadamard(left)
- movhlps m3, m0
- pshufb m1, m0, [intrax9b_v1]
- pshufb m2, m0, [intrax9b_v2]
- paddw m0, m3
- psignw m3, [pw_pmmpzzzz] ; FIXME could this be eliminated?
- pavgw m0, [pw_16]
- pand m0, [sw_f0] ; dc
- ; This (as well as one of the steps in intra_satd_x9_4x4.satd_8x4) could be
- ; changed from a wd transpose to a qdq, with appropriate rearrangement of inputs.
- ; Which would be faster on conroe, but slower on penryn and sandybridge, and too invasive to ifdef.
- HADAMARD 0, sumsub, %2, %3, %4, %5
- HADAMARD 1, sumsub, %2, %3, %4, %5
- movd r3d, m0
- shr r3d, 4
- imul r3d, 0x01010101
- mov [pred_buf+0x80], r3d
- mov [pred_buf+0x88], r3d
- mov [pred_buf+0x90], r3d
- mov [pred_buf+0x98], r3d
- psubw m3, m%2
- psubw m0, m%2
- psubw m1, m%2
- psubw m2, m%3
- pabsw m%3, m%3
- pabsw m3, m3
- pabsw m0, m0
- pabsw m1, m1
- pabsw m2, m2
- pavgw m3, m%3
- pavgw m0, m%3
- pavgw m1, m2
-%if cpuflag(sse4)
- phaddw m3, m0
-%else
- SBUTTERFLY qdq, 3, 0, 2
- paddw m3, m0
-%endif
- movhlps m2, m1
- paddw m1, m2
-%if cpuflag(xop)
- vphaddwq m3, m3
- vphaddwq m1, m1
- packssdw m1, m3
-%else
- phaddw m1, m3
- pmaddwd m1, [pw_1] ; v, _, h, dc
-%endif
-%endmacro ; INTRA_X9_VHDC
-
-%macro INTRA_X9_END 2
-%if cpuflag(sse4)
- phminposuw m0, m0 ; h,dc,ddl,ddr,vr,hd,vl,hu
- movd eax, m0
- add eax, 1<<16
- cmp ax, r3w
- cmovge eax, r3d
-%else
-%if %1
- ; 4x4 sad is up to 12 bits; +bitcosts -> 13 bits; pack with 3 bit index
- psllw m0, 3
- paddw m0, [pw_s01234567] ; h,dc,ddl,ddr,vr,hd,vl,hu
-%else
- ; 4x4 satd is up to 13 bits; +bitcosts and saturate -> 13 bits; pack with 3 bit index
- psllw m0, 2
- paddusw m0, m0
- paddw m0, [pw_s01234657] ; h,dc,ddl,ddr,vr,vl,hd,hu
-%endif
- movhlps m1, m0
- pminsw m0, m1
- pshuflw m1, m0, q0032
- pminsw m0, m1
- pshuflw m1, m0, q0001
- pminsw m0, m1
- movd eax, m0
- movsx r2d, ax
- and eax, 7
- sar r2d, 3
- shl eax, 16
- ; 1<<16: increment index to match intra4x4_pred_e. couldn't do this before because it had to fit in 3 bits
- ; 1<<12: undo sign manipulation
- lea eax, [rax+r2+(1<<16)+(1<<12)]
- cmp ax, r3w
- cmovge eax, r3d
-%endif ; cpuflag
-
- ; output the predicted samples
- mov r3d, eax
- shr r3d, 16
-%ifdef PIC
- lea r2, [%2_lut]
- movzx r2d, byte [r2+r3]
-%else
- movzx r2d, byte [%2_lut+r3]
-%endif
-%if %1 ; sad
- movq mm0, [pred_buf+r2]
- movq mm1, [pred_buf+r2+16]
- movd [r1+0*FDEC_STRIDE], mm0
- movd [r1+2*FDEC_STRIDE], mm1
- psrlq mm0, 32
- psrlq mm1, 32
- movd [r1+1*FDEC_STRIDE], mm0
- movd [r1+3*FDEC_STRIDE], mm1
-%else ; satd
-%assign i 0
-%rep 4
- mov r3d, [pred_buf+r2+8*i]
- mov [r1+i*FDEC_STRIDE], r3d
-%assign i i+1
-%endrep
-%endif
-%endmacro ; INTRA_X9_END
-
-%macro INTRA_X9 0
-;-----------------------------------------------------------------------------
-; int intra_sad_x9_4x4( uint8_t *fenc, uint8_t *fdec, uint16_t *bitcosts )
-;-----------------------------------------------------------------------------
-%if notcpuflag(xop)
-cglobal intra_sad_x9_4x4, 3,4,9
- %assign pad 0xc0-gprsize-(stack_offset&15)
- %define pred_buf rsp
- sub rsp, pad
-%if ARCH_X86_64
- INTRA_X9_PRED intrax9a, m8
-%else
- INTRA_X9_PRED intrax9a, [rsp+0xa0]
-%endif
- mova [rsp+0x00], m2
- mova [rsp+0x10], m3
- mova [rsp+0x20], m4
- mova [rsp+0x30], m5
- mova [rsp+0x40], m6
- mova [rsp+0x50], m7
-%if cpuflag(sse4)
- movd m0, [r0+0*FENC_STRIDE]
- pinsrd m0, [r0+1*FENC_STRIDE], 1
- movd m1, [r0+2*FENC_STRIDE]
- pinsrd m1, [r0+3*FENC_STRIDE], 1
-%else
- movd mm0, [r0+0*FENC_STRIDE]
- punpckldq mm0, [r0+1*FENC_STRIDE]
- movd mm1, [r0+2*FENC_STRIDE]
- punpckldq mm1, [r0+3*FENC_STRIDE]
- movq2dq m0, mm0
- movq2dq m1, mm1
-%endif
- punpcklqdq m0, m0
- punpcklqdq m1, m1
- psadbw m2, m0
- psadbw m3, m1
- psadbw m4, m0
- psadbw m5, m1
- psadbw m6, m0
- psadbw m7, m1
- paddd m2, m3
- paddd m4, m5
- paddd m6, m7
-%if ARCH_X86_64
- SWAP 7, 8
- pxor m8, m8
- %define %%zero m8
-%else
- mova m7, [rsp+0xa0]
- %define %%zero [pb_0]
-%endif
- pshufb m3, m7, [intrax9a_vh1]
- pshufb m5, m7, [intrax9a_vh2]
- pshufb m7, [intrax9a_dc]
- psadbw m7, %%zero
- psrlw m7, 2
- mova [rsp+0x60], m3
- mova [rsp+0x70], m5
- psadbw m3, m0
- pavgw m7, %%zero
- pshufb m7, %%zero
- psadbw m5, m1
- movq [rsp+0x80], m7
- movq [rsp+0x90], m7
- psadbw m0, m7
- paddd m3, m5
- psadbw m1, m7
- paddd m0, m1
- movzx r3d, word [r2]
- movd r0d, m3 ; v
- add r3d, r0d
- punpckhqdq m3, m0 ; h, dc
- shufps m3, m2, q2020
- psllq m6, 32
- por m4, m6
- movu m0, [r2+2]
- packssdw m3, m4
- paddw m0, m3
- INTRA_X9_END 1, intrax9a
- add rsp, pad
- RET
-%endif ; cpuflag
-
-%if ARCH_X86_64
-;-----------------------------------------------------------------------------
-; int intra_satd_x9_4x4( uint8_t *fenc, uint8_t *fdec, uint16_t *bitcosts )
-;-----------------------------------------------------------------------------
-cglobal intra_satd_x9_4x4, 3,4,16
- %assign pad 0xb0-gprsize-(stack_offset&15)
- %define pred_buf rsp
- sub rsp, pad
- INTRA_X9_PRED intrax9b, m15
- mova [rsp+0x00], m2
- mova [rsp+0x10], m3
- mova [rsp+0x20], m4
- mova [rsp+0x30], m5
- mova [rsp+0x40], m6
- mova [rsp+0x50], m7
- movd m8, [r0+0*FENC_STRIDE]
- movd m9, [r0+1*FENC_STRIDE]
- movd m10, [r0+2*FENC_STRIDE]
- movd m11, [r0+3*FENC_STRIDE]
- mova m12, [hmul_8p]
- pshufd m8, m8, 0
- pshufd m9, m9, 0
- pshufd m10, m10, 0
- pshufd m11, m11, 0
- pmaddubsw m8, m12
- pmaddubsw m9, m12
- pmaddubsw m10, m12
- pmaddubsw m11, m12
- movddup m0, m2
- pshufd m1, m2, q3232
- movddup m2, m3
- movhlps m3, m3
- call .satd_8x4 ; ddr, ddl
- movddup m2, m5
- pshufd m3, m5, q3232
- mova m5, m0
- movddup m0, m4
- pshufd m1, m4, q3232
- call .satd_8x4 ; vr, vl
- movddup m2, m7
- pshufd m3, m7, q3232
- mova m4, m0
- movddup m0, m6
- pshufd m1, m6, q3232
- call .satd_8x4 ; hd, hu
-%if cpuflag(sse4)
- punpckldq m4, m0
-%else
- punpcklqdq m4, m0 ; conroe dislikes punpckldq, and ssse3 INTRA_X9_END can handle arbitrary orders whereas phminposuw can't
-%endif
- mova m1, [pw_ppmmppmm]
- psignw m8, m1
- psignw m10, m1
- paddw m8, m9
- paddw m10, m11
- INTRA_X9_VHDC 15, 8, 10, 6, 7
- ; find minimum
- movu m0, [r2+2]
- movd r3d, m1
- palignr m5, m1, 8
-%if notcpuflag(sse4)
- pshufhw m0, m0, q3120 ; compensate for different order in unpack
-%endif
- packssdw m5, m4
- paddw m0, m5
- movzx r0d, word [r2]
- add r3d, r0d
- INTRA_X9_END 0, intrax9b
- add rsp, pad
- RET
-RESET_MM_PERMUTATION
-ALIGN 16
-.satd_8x4:
- pmaddubsw m0, m12
- pmaddubsw m1, m12
- pmaddubsw m2, m12
- pmaddubsw m3, m12
- psubw m0, m8
- psubw m1, m9
- psubw m2, m10
- psubw m3, m11
- SATD_8x4_SSE 0, 0, 1, 2, 3, 13, 14, 0, swap
- pmaddwd m0, [pw_1]
-%if cpuflag(sse4)
- pshufd m1, m0, q0032
-%else
- movhlps m1, m0
-%endif
- paddd xmm0, m0, m1 ; consistent location of return value. only the avx version of hadamard permutes m0, so 3arg is free
- ret
-
-%else ; !ARCH_X86_64
-cglobal intra_satd_x9_4x4, 3,4,8
- %assign pad 0x120-gprsize-(stack_offset&15)
- %define fenc_buf rsp
- %define pred_buf rsp+0x40
- %define spill rsp+0xe0
- sub rsp, pad
- INTRA_X9_PRED intrax9b, [spill+0x20]
- mova [pred_buf+0x00], m2
- mova [pred_buf+0x10], m3
- mova [pred_buf+0x20], m4
- mova [pred_buf+0x30], m5
- mova [pred_buf+0x40], m6
- mova [pred_buf+0x50], m7
- movd m4, [r0+0*FENC_STRIDE]
- movd m5, [r0+1*FENC_STRIDE]
- movd m6, [r0+2*FENC_STRIDE]
- movd m0, [r0+3*FENC_STRIDE]
- mova m7, [hmul_8p]
- pshufd m4, m4, 0
- pshufd m5, m5, 0
- pshufd m6, m6, 0
- pshufd m0, m0, 0
- pmaddubsw m4, m7
- pmaddubsw m5, m7
- pmaddubsw m6, m7
- pmaddubsw m0, m7
- mova [fenc_buf+0x00], m4
- mova [fenc_buf+0x10], m5
- mova [fenc_buf+0x20], m6
- mova [fenc_buf+0x30], m0
- movddup m0, m2
- pshufd m1, m2, q3232
- movddup m2, m3
- movhlps m3, m3
- pmaddubsw m0, m7
- pmaddubsw m1, m7
- pmaddubsw m2, m7
- pmaddubsw m3, m7
- psubw m0, m4
- psubw m1, m5
- psubw m2, m6
- call .satd_8x4b ; ddr, ddl
- mova m3, [pred_buf+0x30]
- mova m1, [pred_buf+0x20]
- movddup m2, m3
- movhlps m3, m3
- movq [spill+0x08], m0
- movddup m0, m1
- movhlps m1, m1
- call .satd_8x4 ; vr, vl
- mova m3, [pred_buf+0x50]
- mova m1, [pred_buf+0x40]
- movddup m2, m3
- movhlps m3, m3
- movq [spill+0x10], m0
- movddup m0, m1
- movhlps m1, m1
- call .satd_8x4 ; hd, hu
- movq [spill+0x18], m0
- mova m1, [spill+0x20]
- mova m4, [fenc_buf+0x00]
- mova m5, [fenc_buf+0x20]
- mova m2, [pw_ppmmppmm]
- psignw m4, m2
- psignw m5, m2
- paddw m4, [fenc_buf+0x10]
- paddw m5, [fenc_buf+0x30]
- INTRA_X9_VHDC 1, 4, 5, 6, 7
- ; find minimum
- movu m0, [r2+2]
- movd r3d, m1
- punpckhqdq m1, [spill+0x00]
- packssdw m1, [spill+0x10]
-%if cpuflag(sse4)
- pshufhw m1, m1, q3120
-%else
- pshufhw m0, m0, q3120
-%endif
- paddw m0, m1
- movzx r0d, word [r2]
- add r3d, r0d
- INTRA_X9_END 0, intrax9b
- add rsp, pad
- RET
-RESET_MM_PERMUTATION
-ALIGN 16
-.satd_8x4:
- pmaddubsw m0, m7
- pmaddubsw m1, m7
- pmaddubsw m2, m7
- pmaddubsw m3, m7
- %xdefine fenc_buf fenc_buf+gprsize
- psubw m0, [fenc_buf+0x00]
- psubw m1, [fenc_buf+0x10]
- psubw m2, [fenc_buf+0x20]
-.satd_8x4b:
- psubw m3, [fenc_buf+0x30]
- SATD_8x4_SSE 0, 0, 1, 2, 3, 4, 5, 0, swap
- pmaddwd m0, [pw_1]
-%if cpuflag(sse4)
- pshufd m1, m0, q0032
-%else
- movhlps m1, m0
-%endif
- paddd xmm0, m0, m1
- ret
-%endif ; ARCH
-%endmacro ; INTRA_X9
-
-
-
-%macro INTRA8_X9 0
-;-----------------------------------------------------------------------------
-; int intra_sad_x9_8x8( uint8_t *fenc, uint8_t *fdec, uint8_t edge[36], uint16_t *bitcosts, uint16_t *satds )
-;-----------------------------------------------------------------------------
-cglobal intra_sad_x9_8x8, 5,6,9
- %define fenc02 m4
- %define fenc13 m5
- %define fenc46 m6
- %define fenc57 m7
-%if ARCH_X86_64
- %define tmp m8
- %assign padbase 0x0
-%else
- %define tmp [rsp]
- %assign padbase 0x10
-%endif
- %assign pad 0x240+0x10+padbase-gprsize-(stack_offset&15)
- %define pred(i,j) [rsp+i*0x40+j*0x10+padbase]
-
- SUB rsp, pad
- movq fenc02, [r0+FENC_STRIDE* 0]
- movq fenc13, [r0+FENC_STRIDE* 1]
- movq fenc46, [r0+FENC_STRIDE* 4]
- movq fenc57, [r0+FENC_STRIDE* 5]
- movhps fenc02, [r0+FENC_STRIDE* 2]
- movhps fenc13, [r0+FENC_STRIDE* 3]
- movhps fenc46, [r0+FENC_STRIDE* 6]
- movhps fenc57, [r0+FENC_STRIDE* 7]
-
- ; save instruction size: avoid 4-byte memory offsets
- lea r0, [intra8x9_h1+128]
- %define off(m) (r0+m-(intra8x9_h1+128))
-
-; v
- movddup m0, [r2+16]
- mova pred(0,0), m0
- psadbw m1, m0, fenc02
- mova pred(0,1), m0
- psadbw m2, m0, fenc13
- mova pred(0,2), m0
- psadbw m3, m0, fenc46
- mova pred(0,3), m0
- psadbw m0, m0, fenc57
- paddw m1, m2
- paddw m0, m3
- paddw m0, m1
- movhlps m1, m0
- paddw m0, m1
- movd [r4+0], m0
-
-; h
- movq m0, [r2+7]
- pshufb m1, m0, [off(intra8x9_h1)]
- pshufb m2, m0, [off(intra8x9_h2)]
- mova pred(1,0), m1
- psadbw m1, fenc02
- mova pred(1,1), m2
- psadbw m2, fenc13
- paddw m1, m2
- pshufb m3, m0, [off(intra8x9_h3)]
- pshufb m2, m0, [off(intra8x9_h4)]
- mova pred(1,2), m3
- psadbw m3, fenc46
- mova pred(1,3), m2
- psadbw m2, fenc57
- paddw m1, m3
- paddw m1, m2
- movhlps m2, m1
- paddw m1, m2
- movd [r4+2], m1
-
- lea r5, [rsp+padbase+0x100]
- %define pred(i,j) [r5+i*0x40+j*0x10-0x100]
-
-; dc
- movhps m0, [r2+16]
- pxor m2, m2
- psadbw m0, m2
- movhlps m1, m0
- paddw m0, m1
- psrlw m0, 3
- pavgw m0, m2
- pshufb m0, m2
- mova pred(2,0), m0
- psadbw m1, m0, fenc02
- mova pred(2,1), m0
- psadbw m2, m0, fenc13
- mova pred(2,2), m0
- psadbw m3, m0, fenc46
- mova pred(2,3), m0
- psadbw m0, m0, fenc57
- paddw m1, m2
- paddw m0, m3
- paddw m0, m1
- movhlps m1, m0
- paddw m0, m1
- movd [r4+4], m0
-
-; ddl
-; Ft1 Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8
-; Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9
-; Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA
-; Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA FtB
-; Ft5 Ft6 Ft7 Ft8 Ft9 FtA FtB FtC
-; Ft6 Ft7 Ft8 Ft9 FtA FtB FtC FtD
-; Ft7 Ft8 Ft9 FtA FtB FtC FtD FtE
-; Ft8 Ft9 FtA FtB FtC FtD FtE FtF
- mova m0, [r2+16]
- movu m2, [r2+17]
- pslldq m1, m0, 1
- pavgb m3, m0, m2 ; Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7 Gt8 Gt9 GtA GtB ___ ___ ___ ___ ___
- PRED4x4_LOWPASS m0, m1, m2, m0, tmp ; ___ Ft1 Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA FtB FtC FtD FtE FtF
- pshufb m1, m0, [off(intra8x9_ddl1)]
- pshufb m2, m0, [off(intra8x9_ddl2)]
- mova pred(3,0), m1
- psadbw m1, fenc02
- mova pred(3,1), m2
- psadbw m2, fenc13
- paddw m1, m2
- pshufb m2, m0, [off(intra8x9_ddl3)]
- mova pred(3,2), m2
- psadbw m2, fenc46
- paddw m1, m2
- pshufb m2, m0, [off(intra8x9_ddl4)]
- mova pred(3,3), m2
- psadbw m2, fenc57
- paddw m1, m2
- movhlps m2, m1
- paddw m1, m2
- movd [r4+6], m1
-
-; vl
-; Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7 Gt8
-; Ft1 Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8
-; Gt2 Gt3 Gt4 Gt5 Gt6 Gt7 Gt8 Gt9
-; Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9
-; Gt3 Gt4 Gt5 Gt6 Gt7 Gt8 Gt9 GtA
-; Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA
-; Gt4 Gt5 Gt6 Gt7 Gt8 Gt9 GtA GtB
-; Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA FtB
- pshufb m1, m3, [off(intra8x9_vl1)]
- pshufb m2, m0, [off(intra8x9_vl2)]
- pshufb m3, m3, [off(intra8x9_vl3)]
- pshufb m0, m0, [off(intra8x9_vl4)]
- mova pred(7,0), m1
- psadbw m1, fenc02
- mova pred(7,1), m2
- psadbw m2, fenc13
- mova pred(7,2), m3
- psadbw m3, fenc46
- mova pred(7,3), m0
- psadbw m0, fenc57
- paddw m1, m2
- paddw m0, m3
- paddw m0, m1
- movhlps m1, m0
- paddw m0, m1
-%if cpuflag(sse4)
- pextrw [r4+14], m0, 0
-%else
- movd r5d, m0
- mov [r4+14], r5w
- lea r5, [rsp+padbase+0x100]
-%endif
-
-; ddr
-; Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Ft6
-; Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
-; Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4
-; Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3
-; Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2
-; Fl4 Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1
-; Fl5 Fl4 Fl3 Fl2 Fl1 Fl0 Flt Ft0
-; Fl6 Fl5 Fl4 Fl3 Fl2 Fl1 Fl0 Flt
- movu m2, [r2+8]
- movu m0, [r2+7]
- movu m1, [r2+6]
- pavgb m3, m2, m0 ; Gl6 Gl5 Gl4 Gl3 Gl2 Gl1 Gl0 Glt Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7
- PRED4x4_LOWPASS m0, m1, m2, m0, tmp ; Fl7 Fl6 Fl5 Fl4 Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Ft6
- pshufb m1, m0, [off(intra8x9_ddr1)]
- pshufb m2, m0, [off(intra8x9_ddr2)]
- mova pred(4,0), m1
- psadbw m1, fenc02
- mova pred(4,1), m2
- psadbw m2, fenc13
- paddw m1, m2
- pshufb m2, m0, [off(intra8x9_ddr3)]
- mova pred(4,2), m2
- psadbw m2, fenc46
- paddw m1, m2
- pshufb m2, m0, [off(intra8x9_ddr4)]
- mova pred(4,3), m2
- psadbw m2, fenc57
- paddw m1, m2
- movhlps m2, m1
- paddw m1, m2
- movd [r4+8], m1
-
- add r0, 256
- add r5, 0xC0
- %define off(m) (r0+m-(intra8x9_h1+256+128))
- %define pred(i,j) [r5+i*0x40+j*0x10-0x1C0]
-
-; vr
-; Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7
-; Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Ft6
-; Fl0 Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 Gt6
-; Fl1 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
-; Fl2 Fl0 Gt0 Gt1 Gt2 Gt3 Gt4 Gt5
-; Fl3 Fl1 Flt Ft0 Ft1 Ft2 Ft3 Ft4
-; Fl4 Fl2 Fl0 Gt0 Gt1 Gt2 Gt3 Gt4
-; Fl5 Fl3 Fl1 Flt Ft0 Ft1 Ft2 Ft3
- movsd m2, m3, m0 ; Fl7 Fl6 Fl5 Fl4 Fl3 Fl2 Fl1 Fl0 Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7
- pshufb m1, m2, [off(intra8x9_vr1)]
- pshufb m2, m2, [off(intra8x9_vr3)]
- mova pred(5,0), m1
- psadbw m1, fenc02
- mova pred(5,2), m2
- psadbw m2, fenc46
- paddw m1, m2
- pshufb m2, m0, [off(intra8x9_vr2)]
- mova pred(5,1), m2
- psadbw m2, fenc13
- paddw m1, m2
- pshufb m2, m0, [off(intra8x9_vr4)]
- mova pred(5,3), m2
- psadbw m2, fenc57
- paddw m1, m2
- movhlps m2, m1
- paddw m1, m2
- movd [r4+10], m1
-
-; hd
-; Glt Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
-; Gl0 Fl0 Glt Flt Ft0 Ft1 Ft2 Ft3
-; Gl1 Fl1 Gl0 Fl0 Glt Flt Ft0 Ft1
-; Gl2 Fl2 Gl1 Fl1 Gl0 Fl0 Glt Flt
-; Gl3 Fl3 Gl2 Fl2 Gl1 Fl1 Gl0 Fl0
-; Gl4 Fl4 Gl3 Fl3 Gl2 Fl2 Gl1 Fl1
-; Gl5 Fl5 Gl4 Fl4 Gl3 Fl3 Gl2 Fl2
-; Gl6 Fl6 Gl5 Fl5 Gl4 Fl4 Gl3 Fl3
- pshufd m2, m3, q0001
-%if cpuflag(sse4)
- pblendw m2, m0, q3330 ; Gl2 Gl1 Gl0 Glt ___ Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 ___
-%else
- movss m1, m0, m2
- SWAP 1, 2
-%endif
- punpcklbw m0, m3 ; Fl7 Gl6 Fl6 Gl5 Fl5 Gl4 Fl4 Gl3 Fl3 Gl2 Fl2 Gl1 Fl1 Gl0 Fl0 ___
- pshufb m1, m2, [off(intra8x9_hd1)]
- pshufb m2, m2, [off(intra8x9_hd2)]
- mova pred(6,0), m1
- psadbw m1, fenc02
- mova pred(6,1), m2
- psadbw m2, fenc13
- paddw m1, m2
- pshufb m2, m0, [off(intra8x9_hd3)]
- pshufb m3, m0, [off(intra8x9_hd4)]
- mova pred(6,2), m2
- psadbw m2, fenc46
- mova pred(6,3), m3
- psadbw m3, fenc57
- paddw m1, m2
- paddw m1, m3
- movhlps m2, m1
- paddw m1, m2
- ; don't just store to [r4+12]. this is too close to the load of dqword [r4] and would cause a forwarding stall
- pslldq m1, 12
- SWAP 3, 1
-
-; hu
-; Gl0 Fl1 Gl1 Fl2 Gl2 Fl3 Gl3 Fl4
-; Gl1 Fl2 Gl2 Fl3 Gl3 Fl4 Gl4 Fl5
-; Gl2 Fl3 Gl3 Gl3 Gl4 Fl5 Gl5 Fl6
-; Gl3 Gl3 Gl4 Fl5 Gl5 Fl6 Gl6 Fl7
-; Gl4 Fl5 Gl5 Fl6 Gl6 Fl7 Gl7 Gl7
-; Gl5 Fl6 Gl6 Fl7 Gl7 Gl7 Gl7 Gl7
-; Gl6 Fl7 Gl7 Gl7 Gl7 Gl7 Gl7 Gl7
-; Gl7 Gl7 Gl7 Gl7 Gl7 Gl7 Gl7 Gl7
-%if cpuflag(sse4)
- pinsrb m0, [r2+7], 15 ; Gl7
-%else
- movd m1, [r2+7]
- pslldq m0, 1
- palignr m1, m0, 1
- SWAP 0, 1
-%endif
- pshufb m1, m0, [off(intra8x9_hu1)]
- pshufb m2, m0, [off(intra8x9_hu2)]
- mova pred(8,0), m1
- psadbw m1, fenc02
- mova pred(8,1), m2
- psadbw m2, fenc13
- paddw m1, m2
- pshufb m2, m0, [off(intra8x9_hu3)]
- pshufb m0, m0, [off(intra8x9_hu4)]
- mova pred(8,2), m2
- psadbw m2, fenc46
- mova pred(8,3), m0
- psadbw m0, fenc57
- paddw m1, m2
- paddw m1, m0
- movhlps m2, m1
- paddw m1, m2
- movd r2d, m1
-
- movu m0, [r3]
- por m3, [r4]
- paddw m0, m3
- mova [r4], m0
- movzx r5d, word [r3+16]
- add r2d, r5d
- mov [r4+16], r2w
-
-%if cpuflag(sse4)
- phminposuw m0, m0 ; v,h,dc,ddl,ddr,vr,hd,vl
- movd eax, m0
-%else
- ; 8x8 sad is up to 14 bits; +bitcosts and saturate -> 14 bits; pack with 2 bit index
- paddusw m0, m0
- paddusw m0, m0
- paddw m0, [off(pw_s00112233)]
- movhlps m1, m0
- pminsw m0, m1
- pshuflw m1, m0, q0032
- pminsw m0, m1
- movd eax, m0
- ; repack with 3 bit index
- xor eax, 0x80008000
- movzx r3d, ax
- shr eax, 15
- add r3d, r3d
- or eax, 1
- cmp eax, r3d
- cmovg eax, r3d
- ; reverse to phminposuw order
- mov r3d, eax
- and eax, 7
- shr r3d, 3
- shl eax, 16
- or eax, r3d
-%endif
- add r2d, 8<<16
- cmp ax, r2w
- cmovg eax, r2d
-
- mov r2d, eax
- shr r2d, 16
- shl r2d, 6
- add r1, 4*FDEC_STRIDE
- mova m0, [rsp+padbase+r2+0x00]
- mova m1, [rsp+padbase+r2+0x10]
- mova m2, [rsp+padbase+r2+0x20]
- mova m3, [rsp+padbase+r2+0x30]
- movq [r1+FDEC_STRIDE*-4], m0
- movhps [r1+FDEC_STRIDE*-2], m0
- movq [r1+FDEC_STRIDE*-3], m1
- movhps [r1+FDEC_STRIDE*-1], m1
- movq [r1+FDEC_STRIDE* 0], m2
- movhps [r1+FDEC_STRIDE* 2], m2
- movq [r1+FDEC_STRIDE* 1], m3
- movhps [r1+FDEC_STRIDE* 3], m3
- ADD rsp, pad
- RET
-
-%if ARCH_X86_64
-;-----------------------------------------------------------------------------
-; int intra_sa8d_x9_8x8( uint8_t *fenc, uint8_t *fdec, uint8_t edge[36], uint16_t *bitcosts, uint16_t *satds )
-;-----------------------------------------------------------------------------
-cglobal intra_sa8d_x9_8x8, 5,6,16
- %assign pad 0x2c0+0x10-gprsize-(stack_offset&15)
- %define fenc_buf rsp
- %define pred_buf rsp+0x80
- SUB rsp, pad
- mova m15, [hmul_8p]
- pxor m8, m8
-%assign %%i 0
-%rep 8
- movddup m %+ %%i, [r0+%%i*FENC_STRIDE]
- pmaddubsw m9, m %+ %%i, m15
- punpcklbw m %+ %%i, m8
- mova [fenc_buf+%%i*0x10], m9
-%assign %%i %%i+1
-%endrep
-
- ; save instruction size: avoid 4-byte memory offsets
- lea r0, [intra8x9_h1+0x80]
- %define off(m) (r0+m-(intra8x9_h1+0x80))
- lea r5, [pred_buf+0x80]
-
-; v, h, dc
- HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 8
- pabsw m11, m1
-%assign %%i 2
-%rep 6
- pabsw m8, m %+ %%i
- paddw m11, m8
-%assign %%i %%i+1
-%endrep
-
- ; 1D hadamard of edges
- movq m8, [r2+7]
- movddup m9, [r2+16]
- mova [r5-0x80], m9
- mova [r5-0x70], m9
- mova [r5-0x60], m9
- mova [r5-0x50], m9
- punpcklwd m8, m8
- pshufb m9, [intrax3_shuf]
- pmaddubsw m8, [pb_pppm]
- pmaddubsw m9, [pb_pppm]
- HSUMSUB2 psignw, m8, m9, m12, m13, m9, q1032, [pw_ppppmmmm]
- HSUMSUB2 psignw, m8, m9, m12, m13, m9, q2301, [pw_ppmmppmm]
-
- ; dc
- paddw m10, m8, m9
- paddw m10, [pw_8]
- pand m10, [sw_f0]
- psrlw m12, m10, 4
- psllw m10, 2
- pxor m13, m13
- pshufb m12, m13
- mova [r5+0x00], m12
- mova [r5+0x10], m12
- mova [r5+0x20], m12
- mova [r5+0x30], m12
-
- ; differences
- psllw m8, 3 ; left edge
- psubw m8, m0
- psubw m10, m0
- pabsw m8, m8 ; 1x8 sum
- pabsw m10, m10
- paddw m8, m11
- paddw m11, m10
- punpcklwd m0, m1
- punpcklwd m2, m3
- punpcklwd m4, m5
- punpcklwd m6, m7
- punpckldq m0, m2
- punpckldq m4, m6
- punpcklqdq m0, m4 ; transpose
- psllw m9, 3 ; top edge
- psrldq m10, m11, 2 ; 8x7 sum
- psubw m0, m9 ; 8x1 sum
- pabsw m0, m0
- paddw m10, m0
-
- phaddd m10, m8 ; logically phaddw, but this is faster and it won't overflow
- psrlw m11, 1
- psrlw m10, 1
-
-; store h
- movq m3, [r2+7]
- pshufb m0, m3, [off(intra8x9_h1)]
- pshufb m1, m3, [off(intra8x9_h2)]
- pshufb m2, m3, [off(intra8x9_h3)]
- pshufb m3, m3, [off(intra8x9_h4)]
- mova [r5-0x40], m0
- mova [r5-0x30], m1
- mova [r5-0x20], m2
- mova [r5-0x10], m3
-
-; ddl
- mova m8, [r2+16]
- movu m2, [r2+17]
- pslldq m1, m8, 1
- pavgb m9, m8, m2
- PRED4x4_LOWPASS m8, m1, m2, m8, m3
- pshufb m0, m8, [off(intra8x9_ddl1)]
- pshufb m1, m8, [off(intra8x9_ddl2)]
- pshufb m2, m8, [off(intra8x9_ddl3)]
- pshufb m3, m8, [off(intra8x9_ddl4)]
- add r5, 0x40
- call .sa8d
- phaddd m11, m0
-
-; vl
- pshufb m0, m9, [off(intra8x9_vl1)]
- pshufb m1, m8, [off(intra8x9_vl2)]
- pshufb m2, m9, [off(intra8x9_vl3)]
- pshufb m3, m8, [off(intra8x9_vl4)]
- add r5, 0x100
- call .sa8d
- phaddd m10, m11
- mova m12, m0
-
-; ddr
- movu m2, [r2+8]
- movu m8, [r2+7]
- movu m1, [r2+6]
- pavgb m9, m2, m8
- PRED4x4_LOWPASS m8, m1, m2, m8, m3
- pshufb m0, m8, [off(intra8x9_ddr1)]
- pshufb m1, m8, [off(intra8x9_ddr2)]
- pshufb m2, m8, [off(intra8x9_ddr3)]
- pshufb m3, m8, [off(intra8x9_ddr4)]
- sub r5, 0xc0
- call .sa8d
- mova m11, m0
-
- add r0, 0x100
- %define off(m) (r0+m-(intra8x9_h1+0x180))
-
-; vr
- movsd m2, m9, m8
- pshufb m0, m2, [off(intra8x9_vr1)]
- pshufb m1, m8, [off(intra8x9_vr2)]
- pshufb m2, m2, [off(intra8x9_vr3)]
- pshufb m3, m8, [off(intra8x9_vr4)]
- add r5, 0x40
- call .sa8d
- phaddd m11, m0
-
-; hd
-%if cpuflag(sse4)
- pshufd m1, m9, q0001
- pblendw m1, m8, q3330
-%else
- pshufd m2, m9, q0001
- movss m1, m8, m2
-%endif
- punpcklbw m8, m9
- pshufb m0, m1, [off(intra8x9_hd1)]
- pshufb m1, m1, [off(intra8x9_hd2)]
- pshufb m2, m8, [off(intra8x9_hd3)]
- pshufb m3, m8, [off(intra8x9_hd4)]
- add r5, 0x40
- call .sa8d
- phaddd m0, m12
- phaddd m11, m0
-
-; hu
-%if cpuflag(sse4)
- pinsrb m8, [r2+7], 15
-%else
- movd m9, [r2+7]
- pslldq m8, 1
- palignr m9, m8, 1
- SWAP 8, 9
-%endif
- pshufb m0, m8, [off(intra8x9_hu1)]
- pshufb m1, m8, [off(intra8x9_hu2)]
- pshufb m2, m8, [off(intra8x9_hu3)]
- pshufb m3, m8, [off(intra8x9_hu4)]
- add r5, 0x80
- call .sa8d
-
- pmaddwd m0, [pw_1]
- phaddw m10, m11
- movhlps m1, m0
- paddw m0, m1
- pshuflw m1, m0, q0032
- pavgw m0, m1
- pxor m2, m2
- pavgw m10, m2
- movd r2d, m0
-
- movu m0, [r3]
- paddw m0, m10
- mova [r4], m0
- movzx r5d, word [r3+16]
- add r2d, r5d
- mov [r4+16], r2w
-
-%if cpuflag(sse4)
- phminposuw m0, m0
- movd eax, m0
-%else
- ; 8x8 sa8d is up to 15 bits; +bitcosts and saturate -> 15 bits; pack with 1 bit index
- paddusw m0, m0
- paddw m0, [off(pw_s00001111)]
- movhlps m1, m0
- pminsw m0, m1
- pshuflw m1, m0, q0032
- mova m2, m0
- pminsw m0, m1
- pcmpgtw m2, m1 ; 2nd index bit
- movd r3d, m0
- movd r4d, m2
- ; repack with 3 bit index
- xor r3d, 0x80008000
- and r4d, 0x00020002
- movzx eax, r3w
- movzx r5d, r4w
- shr r3d, 16
- shr r4d, 16
- lea eax, [rax*4+r5]
- lea r3d, [ r3*4+r4+1]
- cmp eax, r3d
- cmovg eax, r3d
- ; reverse to phminposuw order
- mov r3d, eax
- and eax, 7
- shr r3d, 3
- shl eax, 16
- or eax, r3d
-%endif
- add r2d, 8<<16
- cmp ax, r2w
- cmovg eax, r2d
-
- mov r2d, eax
- shr r2d, 16
- shl r2d, 6
- add r1, 4*FDEC_STRIDE
- mova m0, [pred_buf+r2+0x00]
- mova m1, [pred_buf+r2+0x10]
- mova m2, [pred_buf+r2+0x20]
- mova m3, [pred_buf+r2+0x30]
- movq [r1+FDEC_STRIDE*-4], m0
- movhps [r1+FDEC_STRIDE*-2], m0
- movq [r1+FDEC_STRIDE*-3], m1
- movhps [r1+FDEC_STRIDE*-1], m1
- movq [r1+FDEC_STRIDE* 0], m2
- movhps [r1+FDEC_STRIDE* 2], m2
- movq [r1+FDEC_STRIDE* 1], m3
- movhps [r1+FDEC_STRIDE* 3], m3
- ADD rsp, pad
- RET
-
-ALIGN 16
-.sa8d:
- %xdefine mret m0
- %xdefine fenc_buf fenc_buf+gprsize
- mova [r5+0x00], m0
- mova [r5+0x10], m1
- mova [r5+0x20], m2
- mova [r5+0x30], m3
- movddup m4, m0
- movddup m5, m1
- movddup m6, m2
- movddup m7, m3
- punpckhqdq m0, m0
- punpckhqdq m1, m1
- punpckhqdq m2, m2
- punpckhqdq m3, m3
- PERMUTE 0,4, 1,5, 2,0, 3,1, 4,6, 5,7, 6,2, 7,3
- pmaddubsw m0, m15
- pmaddubsw m1, m15
- psubw m0, [fenc_buf+0x00]
- psubw m1, [fenc_buf+0x10]
- pmaddubsw m2, m15
- pmaddubsw m3, m15
- psubw m2, [fenc_buf+0x20]
- psubw m3, [fenc_buf+0x30]
- pmaddubsw m4, m15
- pmaddubsw m5, m15
- psubw m4, [fenc_buf+0x40]
- psubw m5, [fenc_buf+0x50]
- pmaddubsw m6, m15
- pmaddubsw m7, m15
- psubw m6, [fenc_buf+0x60]
- psubw m7, [fenc_buf+0x70]
- HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 13, 14
- paddw m0, m1
- paddw m0, m2
- paddw mret, m0, m3
- ret
-%endif ; ARCH_X86_64
-%endmacro ; INTRA8_X9
-
-; in: r0=pix, r1=stride, r2=stride*3, r3=tmp, m6=mask_ac4, m7=0
-; out: [tmp]=hadamard4, m0=satd
-INIT_MMX mmx2
-cglobal hadamard_ac_4x4
-%if HIGH_BIT_DEPTH
- mova m0, [r0]
- mova m1, [r0+r1]
- mova m2, [r0+r1*2]
- mova m3, [r0+r2]
-%else ; !HIGH_BIT_DEPTH
- movh m0, [r0]
- movh m1, [r0+r1]
- movh m2, [r0+r1*2]
- movh m3, [r0+r2]
- punpcklbw m0, m7
- punpcklbw m1, m7
- punpcklbw m2, m7
- punpcklbw m3, m7
-%endif ; HIGH_BIT_DEPTH
- HADAMARD4_2D 0, 1, 2, 3, 4
- mova [r3], m0
- mova [r3+8], m1
- mova [r3+16], m2
- mova [r3+24], m3
- ABSW m0, m0, m4
- ABSW m1, m1, m4
- pand m0, m6
- ABSW m2, m2, m4
- ABSW m3, m3, m4
- paddw m0, m1
- paddw m2, m3
- paddw m0, m2
- SAVE_MM_PERMUTATION
- ret
-
-cglobal hadamard_ac_2x2max
- mova m0, [r3+0x00]
- mova m1, [r3+0x20]
- mova m2, [r3+0x40]
- mova m3, [r3+0x60]
- sub r3, 8
- SUMSUB_BADC w, 0, 1, 2, 3, 4
- ABSW2 m0, m2, m0, m2, m4, m5
- ABSW2 m1, m3, m1, m3, m4, m5
- HADAMARD 0, max, 0, 2, 4, 5
- HADAMARD 0, max, 1, 3, 4, 5
-%if HIGH_BIT_DEPTH
- pmaddwd m0, m7
- pmaddwd m1, m7
- paddd m6, m0
- paddd m6, m1
-%else ; !HIGH_BIT_DEPTH
- paddw m7, m0
- paddw m7, m1
-%endif ; HIGH_BIT_DEPTH
- SAVE_MM_PERMUTATION
- ret
-
-%macro AC_PREP 2
-%if HIGH_BIT_DEPTH
- pmaddwd %1, %2
-%endif
-%endmacro
-
-%macro AC_PADD 3
-%if HIGH_BIT_DEPTH
- AC_PREP %2, %3
- paddd %1, %2
-%else
- paddw %1, %2
-%endif ; HIGH_BIT_DEPTH
-%endmacro
-
-cglobal hadamard_ac_8x8
- mova m6, [mask_ac4]
-%if HIGH_BIT_DEPTH
- mova m7, [pw_1]
-%else
- pxor m7, m7
-%endif ; HIGH_BIT_DEPTH
- call hadamard_ac_4x4_mmx2
- add r0, 4*SIZEOF_PIXEL
- add r3, 32
- mova m5, m0
- AC_PREP m5, m7
- call hadamard_ac_4x4_mmx2
- lea r0, [r0+4*r1]
- add r3, 64
- AC_PADD m5, m0, m7
- call hadamard_ac_4x4_mmx2
- sub r0, 4*SIZEOF_PIXEL
- sub r3, 32
- AC_PADD m5, m0, m7
- call hadamard_ac_4x4_mmx2
- AC_PADD m5, m0, m7
- sub r3, 40
- mova [rsp+gprsize+8], m5 ; save satd
-%if HIGH_BIT_DEPTH
- pxor m6, m6
-%endif
-%rep 3
- call hadamard_ac_2x2max_mmx2
-%endrep
- mova m0, [r3+0x00]
- mova m1, [r3+0x20]
- mova m2, [r3+0x40]
- mova m3, [r3+0x60]
- SUMSUB_BADC w, 0, 1, 2, 3, 4
- HADAMARD 0, sumsub, 0, 2, 4, 5
- ABSW2 m1, m3, m1, m3, m4, m5
- ABSW2 m0, m2, m0, m2, m4, m5
- HADAMARD 0, max, 1, 3, 4, 5
-%if HIGH_BIT_DEPTH
- pand m0, [mask_ac4]
- pmaddwd m1, m7
- pmaddwd m0, m7
- pmaddwd m2, m7
- paddd m6, m1
- paddd m0, m2
- paddd m6, m6
- paddd m0, m6
- SWAP 0, 6
-%else ; !HIGH_BIT_DEPTH
- pand m6, m0
- paddw m7, m1
- paddw m6, m2
- paddw m7, m7
- paddw m6, m7
-%endif ; HIGH_BIT_DEPTH
- mova [rsp+gprsize], m6 ; save sa8d
- SWAP 0, 6
- SAVE_MM_PERMUTATION
- ret
-
-%macro HADAMARD_AC_WXH_SUM_MMX 2
- mova m1, [rsp+1*mmsize]
-%if HIGH_BIT_DEPTH
-%if %1*%2 >= 128
- paddd m0, [rsp+2*mmsize]
- paddd m1, [rsp+3*mmsize]
-%endif
-%if %1*%2 == 256
- mova m2, [rsp+4*mmsize]
- paddd m1, [rsp+5*mmsize]
- paddd m2, [rsp+6*mmsize]
- mova m3, m0
- paddd m1, [rsp+7*mmsize]
- paddd m0, m2
-%endif
- psrld m0, 1
- HADDD m0, m2
- psrld m1, 1
- HADDD m1, m3
-%else ; !HIGH_BIT_DEPTH
-%if %1*%2 >= 128
- paddusw m0, [rsp+2*mmsize]
- paddusw m1, [rsp+3*mmsize]
-%endif
-%if %1*%2 == 256
- mova m2, [rsp+4*mmsize]
- paddusw m1, [rsp+5*mmsize]
- paddusw m2, [rsp+6*mmsize]
- mova m3, m0
- paddusw m1, [rsp+7*mmsize]
- pxor m3, m2
- pand m3, [pw_1]
- pavgw m0, m2
- psubusw m0, m3
- HADDUW m0, m2
-%else
- psrlw m0, 1
- HADDW m0, m2
-%endif
- psrlw m1, 1
- HADDW m1, m3
-%endif ; HIGH_BIT_DEPTH
-%endmacro
-
-%macro HADAMARD_AC_WXH_MMX 2
-cglobal pixel_hadamard_ac_%1x%2, 2,4
- %assign pad 16-gprsize-(stack_offset&15)
- %define ysub r1
- FIX_STRIDES r1
- sub rsp, 16+128+pad
- lea r2, [r1*3]
- lea r3, [rsp+16]
- call hadamard_ac_8x8_mmx2
-%if %2==16
- %define ysub r2
- lea r0, [r0+r1*4]
- sub rsp, 16
- call hadamard_ac_8x8_mmx2
-%endif
-%if %1==16
- neg ysub
- sub rsp, 16
- lea r0, [r0+ysub*4+8*SIZEOF_PIXEL]
- neg ysub
- call hadamard_ac_8x8_mmx2
-%if %2==16
- lea r0, [r0+r1*4]
- sub rsp, 16
- call hadamard_ac_8x8_mmx2
-%endif
-%endif
- HADAMARD_AC_WXH_SUM_MMX %1, %2
- movd edx, m0
- movd eax, m1
- shr edx, 1
-%if ARCH_X86_64
- shl rdx, 32
- add rax, rdx
-%endif
- add rsp, 128+%1*%2/4+pad
- RET
-%endmacro ; HADAMARD_AC_WXH_MMX
-
-HADAMARD_AC_WXH_MMX 16, 16
-HADAMARD_AC_WXH_MMX 8, 16
-HADAMARD_AC_WXH_MMX 16, 8
-HADAMARD_AC_WXH_MMX 8, 8
-
-%macro LOAD_INC_8x4W_SSE2 5
-%if HIGH_BIT_DEPTH
- movu m%1, [r0]
- movu m%2, [r0+r1]
- movu m%3, [r0+r1*2]
- movu m%4, [r0+r2]
-%ifidn %1, 0
- lea r0, [r0+r1*4]
-%endif
-%else ; !HIGH_BIT_DEPTH
- movh m%1, [r0]
- movh m%2, [r0+r1]
- movh m%3, [r0+r1*2]
- movh m%4, [r0+r2]
-%ifidn %1, 0
- lea r0, [r0+r1*4]
-%endif
- punpcklbw m%1, m%5
- punpcklbw m%2, m%5
- punpcklbw m%3, m%5
- punpcklbw m%4, m%5
-%endif ; HIGH_BIT_DEPTH
-%endmacro
-
-%macro LOAD_INC_8x4W_SSSE3 5
- LOAD_DUP_4x8P %3, %4, %1, %2, [r0+r1*2], [r0+r2], [r0], [r0+r1]
-%ifidn %1, 0
- lea r0, [r0+r1*4]
-%endif
- HSUMSUB %1, %2, %3, %4, %5
-%endmacro
-
-%macro HADAMARD_AC_SSE2 0
-; in: r0=pix, r1=stride, r2=stride*3
-; out: [esp+16]=sa8d, [esp+32]=satd, r0+=stride*4
-cglobal hadamard_ac_8x8
-%if ARCH_X86_64
- %define spill0 m8
- %define spill1 m9
- %define spill2 m10
-%else
- %define spill0 [rsp+gprsize]
- %define spill1 [rsp+gprsize+mmsize]
- %define spill2 [rsp+gprsize+mmsize*2]
-%endif
-%if HIGH_BIT_DEPTH
- %define vertical 1
-%elif cpuflag(ssse3) && notcpuflag(atom)
- %define vertical 0
- ;LOAD_INC loads sumsubs
- mova m7, [hmul_8p]
-%else
- %define vertical 1
- ;LOAD_INC only unpacks to words
- pxor m7, m7
-%endif
- LOAD_INC_8x4W 0, 1, 2, 3, 7
-%if vertical
- HADAMARD4_2D_SSE 0, 1, 2, 3, 4
-%else
- HADAMARD4_V 0, 1, 2, 3, 4
-%endif
- mova spill0, m1
- SWAP 1, 7
- LOAD_INC_8x4W 4, 5, 6, 7, 1
-%if vertical
- HADAMARD4_2D_SSE 4, 5, 6, 7, 1
-%else
- HADAMARD4_V 4, 5, 6, 7, 1
- ; FIXME SWAP
- mova m1, spill0
- mova spill0, m6
- mova spill1, m7
- HADAMARD 1, sumsub, 0, 1, 6, 7
- HADAMARD 1, sumsub, 2, 3, 6, 7
- mova m6, spill0
- mova m7, spill1
- mova spill0, m1
- mova spill1, m0
- HADAMARD 1, sumsub, 4, 5, 1, 0
- HADAMARD 1, sumsub, 6, 7, 1, 0
- mova m0, spill1
-%endif
- mova spill1, m2
- mova spill2, m3
- ABSW m1, m0, m0
- ABSW m2, m4, m4
- ABSW m3, m5, m5
- paddw m1, m2
- SUMSUB_BA w, 0, 4
-%if vertical
- pand m1, [mask_ac4]
-%else
- pand m1, [mask_ac4b]
-%endif
- AC_PREP m1, [pw_1]
- ABSW m2, spill0
- AC_PADD m1, m3, [pw_1]
- ABSW m3, spill1
- AC_PADD m1, m2, [pw_1]
- ABSW m2, spill2
- AC_PADD m1, m3, [pw_1]
- ABSW m3, m6, m6
- AC_PADD m1, m2, [pw_1]
- ABSW m2, m7, m7
- AC_PADD m1, m3, [pw_1]
- AC_PADD m1, m2, [pw_1]
- paddw m3, m7, spill2
- psubw m7, spill2
- mova [rsp+gprsize+mmsize*2], m1 ; save satd
- paddw m2, m6, spill1
- psubw m6, spill1
- paddw m1, m5, spill0
- psubw m5, spill0
- %assign %%x 2
-%if vertical
- %assign %%x 4
-%endif
- mova spill1, m4
- HADAMARD %%x, amax, 3, 7, 4
- HADAMARD %%x, amax, 2, 6, 7, 4
- mova m4, spill1
- HADAMARD %%x, amax, 1, 5, 6, 7
- HADAMARD %%x, sumsub, 0, 4, 5, 6
- AC_PREP m2, [pw_1]
- AC_PADD m2, m3, [pw_1]
- AC_PADD m2, m1, [pw_1]
-%if HIGH_BIT_DEPTH
- paddd m2, m2
-%else
- paddw m2, m2
-%endif ; HIGH_BIT_DEPTH
- ABSW m4, m4, m7
- pand m0, [mask_ac8]
- ABSW m0, m0, m7
- AC_PADD m2, m4, [pw_1]
- AC_PADD m2, m0, [pw_1]
- mova [rsp+gprsize+mmsize], m2 ; save sa8d
- SWAP 0, 2
- SAVE_MM_PERMUTATION
- ret
-
-HADAMARD_AC_WXH_SSE2 16, 16
-HADAMARD_AC_WXH_SSE2 16, 8
-%if mmsize <= 16
-HADAMARD_AC_WXH_SSE2 8, 16
-HADAMARD_AC_WXH_SSE2 8, 8
-%endif
-%endmacro ; HADAMARD_AC_SSE2
-
-%macro HADAMARD_AC_WXH_SUM_SSE2 2
- mova m1, [rsp+2*mmsize]
-%if HIGH_BIT_DEPTH
-%if %1*%2 >= 128
- paddd m0, [rsp+3*mmsize]
- paddd m1, [rsp+4*mmsize]
-%endif
-%if %1*%2 == 256
- paddd m0, [rsp+5*mmsize]
- paddd m1, [rsp+6*mmsize]
- paddd m0, [rsp+7*mmsize]
- paddd m1, [rsp+8*mmsize]
- psrld m0, 1
-%endif
- HADDD xm0, xm2
- HADDD xm1, xm3
-%else ; !HIGH_BIT_DEPTH
-%if %1*%2*16/mmsize >= 128
- paddusw m0, [rsp+3*mmsize]
- paddusw m1, [rsp+4*mmsize]
-%endif
-%if %1*%2*16/mmsize == 256
- paddusw m0, [rsp+5*mmsize]
- paddusw m1, [rsp+6*mmsize]
- paddusw m0, [rsp+7*mmsize]
- paddusw m1, [rsp+8*mmsize]
- psrlw m0, 1
-%endif
-%if mmsize==32
- vextracti128 xm2, m0, 1
- vextracti128 xm3, m1, 1
- paddusw xm0, xm2
- paddusw xm1, xm3
-%endif
- HADDUW xm0, xm2
- HADDW xm1, xm3
-%endif ; HIGH_BIT_DEPTH
-%endmacro
-
-; struct { int satd, int sa8d; } pixel_hadamard_ac_16x16( uint8_t *pix, int stride )
-%macro HADAMARD_AC_WXH_SSE2 2
-cglobal pixel_hadamard_ac_%1x%2, 2,4,11
- %define ysub r1
- FIX_STRIDES r1
- mov r3, rsp
- and rsp, ~(mmsize-1)
- sub rsp, mmsize*3
- lea r2, [r1*3]
- call hadamard_ac_8x8
-%if %2==16
- %define ysub r2
- lea r0, [r0+r1*4]
- sub rsp, mmsize*2
- call hadamard_ac_8x8
-%endif
-%if %1==16 && mmsize <= 16
- neg ysub
- sub rsp, mmsize*2
- lea r0, [r0+ysub*4+8*SIZEOF_PIXEL]
- neg ysub
- call hadamard_ac_8x8
-%if %2==16
- lea r0, [r0+r1*4]
- sub rsp, mmsize*2
- call hadamard_ac_8x8
-%endif
-%endif
- HADAMARD_AC_WXH_SUM_SSE2 %1, %2
- movd edx, xm0
- movd eax, xm1
- shr edx, 2 - (%1*%2*16/mmsize >> 8)
- shr eax, 1
-%if ARCH_X86_64
- shl rdx, 32
- add rax, rdx
-%endif
- mov rsp, r3
- RET
-%endmacro ; HADAMARD_AC_WXH_SSE2
-
-; instantiate satds
-
-%if ARCH_X86_64 == 0
-cextern pixel_sa8d_8x8_internal_mmx2
-INIT_MMX mmx2
-;SA8D
-%endif
-
%define TRANS TRANS_SSE2
%define DIFFOP DIFF_UNPACK_SSE2
-%define LOAD_INC_8x4W LOAD_INC_8x4W_SSE2
%define LOAD_SUMSUB_8x4P LOAD_DIFF_8x4P
%define LOAD_SUMSUB_16P LOAD_SUMSUB_16P_SSE2
%define movdqa movaps ; doesn't hurt pre-nehalem, might as well save size
@@ -9491,19 +7344,11 @@
%if ARCH_X86_64
SA8D_SATD
%endif
-%if HIGH_BIT_DEPTH == 0
-INTRA_SA8D_SSE2
-%endif
-INIT_MMX mmx2
-INTRA_X3_MMX
-INIT_XMM sse2
-HADAMARD_AC_SSE2
%if HIGH_BIT_DEPTH == 0
INIT_XMM ssse3,atom
SATDS_SSE2
SA8D
-HADAMARD_AC_SSE2
%if ARCH_X86_64
SA8D_SATD
%endif
@@ -9512,42 +7357,27 @@
%define DIFFOP DIFF_SUMSUB_SSSE3
%define LOAD_DUP_4x8P LOAD_DUP_4x8P_CONROE
%if HIGH_BIT_DEPTH == 0
-%define LOAD_INC_8x4W LOAD_INC_8x4W_SSSE3
%define LOAD_SUMSUB_8x4P LOAD_SUMSUB_8x4P_SSSE3
%define LOAD_SUMSUB_16P LOAD_SUMSUB_16P_SSSE3
%endif
INIT_XMM ssse3
SATDS_SSE2
SA8D
-HADAMARD_AC_SSE2
%if ARCH_X86_64
SA8D_SATD
%endif
-%if HIGH_BIT_DEPTH == 0
-INTRA_X9
-INTRA8_X9
-%endif
%undef movdqa ; nehalem doesn't like movaps
%undef movdqu ; movups
%undef punpcklqdq ; or movlhps
-%if HIGH_BIT_DEPTH == 0
-INIT_MMX ssse3
-INTRA_X3_MMX
-%endif
%define TRANS TRANS_SSE4
%define LOAD_DUP_4x8P LOAD_DUP_4x8P_PENRYN
INIT_XMM sse4
SATDS_SSE2
SA8D
-HADAMARD_AC_SSE2
%if ARCH_X86_64
SA8D_SATD
%endif
-%if HIGH_BIT_DEPTH == 0
-INTRA_X9
-INTRA8_X9
-%endif
; Sandy/Ivy Bridge and Bulldozer do movddup in the load unit, so
; it's effectively free.
@@ -9558,11 +7388,6 @@
%if ARCH_X86_64
SA8D_SATD
%endif
-%if HIGH_BIT_DEPTH == 0
-INTRA_X9
-INTRA8_X9
-%endif
-HADAMARD_AC_SSE2
%define TRANS TRANS_XOP
INIT_XMM xop
@@ -9571,19 +7396,12 @@
%if ARCH_X86_64
SA8D_SATD
%endif
-%if HIGH_BIT_DEPTH == 0
-INTRA_X9
-; no xop INTRA8_X9. it's slower than avx on bulldozer. dunno why.
-%endif
-HADAMARD_AC_SSE2
%if HIGH_BIT_DEPTH == 0
%define LOAD_SUMSUB_8x4P LOAD_SUMSUB8_16x4P_AVX2
%define LOAD_DUP_4x8P LOAD_DUP_4x16P_AVX2
%define TRANS TRANS_SSE4
-INIT_YMM avx2
-HADAMARD_AC_SSE2
%if ARCH_X86_64
SA8D_SATD
%endif
@@ -9708,189 +7526,6 @@
add eax, 1
shr eax, 1
RET
-
-cglobal intra_sad_x9_8x8, 5,7,8
- %define pred(i,j) [rsp+i*0x40+j*0x20]
-
- mov r6, rsp
- and rsp, ~31
- sub rsp, 0x240
- movu m5, [r0+0*FENC_STRIDE]
- movu m6, [r0+4*FENC_STRIDE]
- punpcklqdq m5, [r0+2*FENC_STRIDE]
- punpcklqdq m6, [r0+6*FENC_STRIDE]
-
- ; save instruction size: avoid 4-byte memory offsets
- lea r0, [intra8x9_h1+128]
- %define off(m) (r0+m-(intra8x9_h1+128))
-
- vpbroadcastq m0, [r2+16]
- psadbw m4, m0, m5
- psadbw m2, m0, m6
- mova pred(0,0), m0
- mova pred(0,1), m0
- paddw m4, m2
-
- vpbroadcastq m1, [r2+7]
- pshufb m3, m1, [off(intra8x9_h1)]
- pshufb m2, m1, [off(intra8x9_h3)]
- mova pred(1,0), m3
- mova pred(1,1), m2
- psadbw m3, m5
- psadbw m2, m6
- paddw m3, m2
-
- lea r5, [rsp+0x100]
- %define pred(i,j) [r5+i*0x40+j*0x20-0x100]
-
- ; combine the first two
- pslldq m3, 2
- por m4, m3
-
- pxor m2, m2
- psadbw m0, m2
- psadbw m1, m2
- paddw m0, m1
- psrlw m0, 3
- pavgw m0, m2
- pshufb m0, m2
- mova pred(2,0), m0
- mova pred(2,1), m0
- psadbw m3, m0, m5
- psadbw m2, m0, m6
- paddw m3, m2
-
- pslldq m3, 4
- por m4, m3
-
- vbroadcasti128 m0, [r2+16]
- vbroadcasti128 m2, [r2+17]
- pslldq m1, m0, 1
- pavgb m3, m0, m2
- PRED4x4_LOWPASS m0, m1, m2, m0, m7
- pshufb m1, m0, [off(intra8x9_ddl1)]
- pshufb m2, m0, [off(intra8x9_ddl3)]
- mova pred(3,0), m1
- mova pred(3,1), m2
- psadbw m1, m5
- psadbw m2, m6
- paddw m1, m2
-
- pslldq m1, 6
- por m4, m1
- vextracti128 xm1, m4, 1
- paddw xm4, xm1
- mova [r4], xm4
-
- ; for later
- vinserti128 m7, m3, xm0, 1
-
- vbroadcasti128 m2, [r2+8]
- vbroadcasti128 m0, [r2+7]
- vbroadcasti128 m1, [r2+6]
- pavgb m3, m2, m0
- PRED4x4_LOWPASS m0, m1, m2, m0, m4
- pshufb m1, m0, [off(intra8x9_ddr1)]
- pshufb m2, m0, [off(intra8x9_ddr3)]
- mova pred(4,0), m1
- mova pred(4,1), m2
- psadbw m4, m1, m5
- psadbw m2, m6
- paddw m4, m2
-
- add r0, 256
- add r5, 0xC0
- %define off(m) (r0+m-(intra8x9_h1+256+128))
- %define pred(i,j) [r5+i*0x40+j*0x20-0x1C0]
-
- vpblendd m2, m3, m0, 11110011b
- pshufb m1, m2, [off(intra8x9_vr1)]
- pshufb m2, m2, [off(intra8x9_vr3)]
- mova pred(5,0), m1
- mova pred(5,1), m2
- psadbw m1, m5
- psadbw m2, m6
- paddw m1, m2
-
- pslldq m1, 2
- por m4, m1
-
- psrldq m2, m3, 4
- pblendw m2, m0, q3330
- punpcklbw m0, m3
- pshufb m1, m2, [off(intra8x9_hd1)]
- pshufb m2, m0, [off(intra8x9_hd3)]
- mova pred(6,0), m1
- mova pred(6,1), m2
- psadbw m1, m5
- psadbw m2, m6
- paddw m1, m2
-
- pslldq m1, 4
- por m4, m1
-
- pshufb m1, m7, [off(intra8x9_vl1)]
- pshufb m2, m7, [off(intra8x9_vl3)]
- mova pred(7,0), m1
- mova pred(7,1), m2
- psadbw m1, m5
- psadbw m2, m6
- paddw m1, m2
-
- pslldq m1, 6
- por m4, m1
- vextracti128 xm1, m4, 1
- paddw xm4, xm1
- mova xm3, [r4]
- SBUTTERFLY qdq, 3, 4, 7
- paddw xm3, xm4
-
- pslldq m1, m0, 1
- vpbroadcastd m0, [r2+7]
- palignr m0, m1, 1
- pshufb m1, m0, [off(intra8x9_hu1)]
- pshufb m2, m0, [off(intra8x9_hu3)]
- mova pred(8,0), m1
- mova pred(8,1), m2
- psadbw m1, m5
- psadbw m2, m6
- paddw m1, m2
- vextracti128 xm2, m1, 1
- paddw xm1, xm2
- movhlps xm2, xm1
- paddw xm1, xm2
- movd r2d, xm1
-
- paddw xm3, [r3]
- mova [r4], xm3
- add r2w, word [r3+16]
- mov [r4+16], r2w
-
- phminposuw xm3, xm3
- movd r3d, xm3
- add r2d, 8<<16
- cmp r3w, r2w
- cmovg r3d, r2d
-
- mov r2d, r3d
- shr r3, 16
- shl r3, 6
- add r1, 4*FDEC_STRIDE
- mova xm0, [rsp+r3+0x00]
- mova xm1, [rsp+r3+0x10]
- mova xm2, [rsp+r3+0x20]
- mova xm3, [rsp+r3+0x30]
- movq [r1+FDEC_STRIDE*-4], xm0
- movhps [r1+FDEC_STRIDE*-2], xm0
- movq [r1+FDEC_STRIDE*-3], xm1
- movhps [r1+FDEC_STRIDE*-1], xm1
- movq [r1+FDEC_STRIDE* 0], xm2
- movhps [r1+FDEC_STRIDE* 2], xm2
- movq [r1+FDEC_STRIDE* 1], xm3
- movhps [r1+FDEC_STRIDE* 3], xm3
- mov rsp, r6
- mov eax, r2d
- RET
%endif ; HIGH_BIT_DEPTH
;=============================================================================
More information about the x265-devel
mailing list