[x265] [PATCH 07 of 20] x86inc: Use more consistent indentation
vignesh at multicorewareinc.com
vignesh at multicorewareinc.com
Mon Jun 12 07:37:49 CEST 2017
# HG changeset patch
# User Vignesh Vijayakumar
# Date 1496742956 -19800
# Tue Jun 06 15:25:56 2017 +0530
# Node ID cda41a81b2f657dd25c51e38195a13b93405a6db
# Parent bdf98aa6a337bfea485a310ab8ac69b6e45f857b
x86inc: Use more consistent indentation
diff -r bdf98aa6a337 -r cda41a81b2f6 source/common/x86/x86inc.asm
--- a/source/common/x86/x86inc.asm Tue Jun 06 15:14:23 2017 +0530
+++ b/source/common/x86/x86inc.asm Tue Jun 06 15:25:56 2017 +0530
@@ -163,9 +163,9 @@
%define e%1h %3
%define r%1b %2
%define e%1b %2
-%if ARCH_X86_64 == 0
- %define r%1 e%1
-%endif
+ %if ARCH_X86_64 == 0
+ %define r%1 e%1
+ %endif
%endmacro
DECLARE_REG_SIZE ax, al, ah
@@ -484,9 +484,9 @@
%macro RET 0
WIN64_RESTORE_XMM_INTERNAL
POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
@@ -523,17 +523,17 @@
%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
%macro RET 0
-%if stack_size_padded > 0
-%if required_stack_alignment > STACK_ALIGNMENT
- mov rsp, rstkm
-%else
- add rsp, stack_size_padded
-%endif
-%endif
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
POP_IF_USED 14, 13, 12, 11, 10, 9
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
@@ -579,29 +579,29 @@
%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
%macro RET 0
-%if stack_size_padded > 0
-%if required_stack_alignment > STACK_ALIGNMENT
- mov rsp, rstkm
-%else
- add rsp, stack_size_padded
-%endif
-%endif
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
POP_IF_USED 6, 5, 4, 3
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
%endif ;======================================================================
%if WIN64 == 0
-%macro WIN64_SPILL_XMM 1
-%endmacro
-%macro WIN64_RESTORE_XMM 0
-%endmacro
-%macro WIN64_PUSH_XMM 0
-%endmacro
+ %macro WIN64_SPILL_XMM 1
+ %endmacro
+ %macro WIN64_RESTORE_XMM 0
+ %endmacro
+ %macro WIN64_PUSH_XMM 0
+ %endmacro
%endif
; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
@@ -827,14 +827,14 @@
%define movnta movntq
%assign %%i 0
%rep 8
- CAT_XDEFINE m, %%i, mm %+ %%i
- CAT_XDEFINE nmm, %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nmm, %%i, %%i
+ %assign %%i %%i+1
%endrep
%rep 8
- CAT_UNDEF m, %%i
- CAT_UNDEF nmm, %%i
- %assign %%i %%i+1
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nmm, %%i
+ %assign %%i %%i+1
%endrep
INIT_CPUFLAGS %1
%endmacro
@@ -866,7 +866,7 @@
%define mmsize 32
%define num_mmregs 8
%if ARCH_X86_64
- %define num_mmregs 16
+ %define num_mmregs 16
%endif
%define mova movdqa
%define movu movdqu
@@ -874,9 +874,9 @@
%define movnta movntdq
%assign %%i 0
%rep num_mmregs
- CAT_XDEFINE m, %%i, ymm %+ %%i
- CAT_XDEFINE nymm, %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE m, %%i, ymm %+ %%i
+ CAT_XDEFINE nymm, %%i, %%i
+ %assign %%i %%i+1
%endrep
INIT_CPUFLAGS %1
%endmacro
@@ -902,7 +902,7 @@
%assign i 0
%rep 16
DECLARE_MMCAST i
-%assign i i+1
+ %assign i i+1
%endrep
; I often want to use macros that permute their arguments. e.g. there's no
@@ -920,23 +920,23 @@
; doesn't cost any cycles.
%macro PERMUTE 2-* ; takes a list of pairs to swap
-%rep %0/2
- %xdefine %%tmp%2 m%2
- %rotate 2
-%endrep
-%rep %0/2
- %xdefine m%1 %%tmp%2
- CAT_XDEFINE n, m%1, %1
- %rotate 2
-%endrep
+ %rep %0/2
+ %xdefine %%tmp%2 m%2
+ %rotate 2
+ %endrep
+ %rep %0/2
+ %xdefine m%1 %%tmp%2
+ CAT_XDEFINE n, m%1, %1
+ %rotate 2
+ %endrep
%endmacro
%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
-%ifnum %1 ; SWAP 0, 1, ...
- SWAP_INTERNAL_NUM %1, %2
-%else ; SWAP m0, m1, ...
- SWAP_INTERNAL_NAME %1, %2
-%endif
+ %ifnum %1 ; SWAP 0, 1, ...
+ SWAP_INTERNAL_NUM %1, %2
+ %else ; SWAP m0, m1, ...
+ SWAP_INTERNAL_NAME %1, %2
+ %endif
%endmacro
%macro SWAP_INTERNAL_NUM 2-*
@@ -946,7 +946,7 @@
%xdefine m%2 %%tmp
CAT_XDEFINE n, m%1, %1
CAT_XDEFINE n, m%2, %2
- %rotate 1
+ %rotate 1
%endrep
%endmacro
@@ -954,7 +954,7 @@
%xdefine %%args n %+ %1
%rep %0-1
%xdefine %%args %%args, n %+ %2
- %rotate 1
+ %rotate 1
%endrep
SWAP_INTERNAL_NUM %%args
%endmacro
@@ -971,7 +971,7 @@
%assign %%i 0
%rep num_mmregs
CAT_XDEFINE %%f, %%i, m %+ %%i
- %assign %%i %%i+1
+ %assign %%i %%i+1
%endrep
%endmacro
@@ -981,7 +981,7 @@
%rep num_mmregs
CAT_XDEFINE m, %%i, %1_m %+ %%i
CAT_XDEFINE n, m %+ %%i, %%i
- %assign %%i %%i+1
+ %assign %%i %%i+1
%endrep
%endif
%endmacro
@@ -1037,7 +1037,7 @@
%endif
CAT_XDEFINE sizeofxmm, i, 16
CAT_XDEFINE sizeofymm, i, 32
-%assign i i+1
+ %assign i i+1
%endrep
%undef i
@@ -1412,7 +1412,7 @@
%else
CAT_XDEFINE q, j, i
%endif
-%assign i i+1
+ %assign i i+1
%endrep
%undef i
%undef j
More information about the x265-devel
mailing list