[x264-devel] [PATCH 02/32] aarch64: Move the standard function prefix to a single place
Vittorio Giovara
vittorio.giovara at gmail.com
Fri Jan 20 15:20:27 CET 2017
Needed to modify the function signature depending on bitdepth.
---
common/aarch64/asm.S | 4 +-
common/aarch64/bitstream-a.S | 2 +-
common/aarch64/cabac-a.S | 14 +++---
common/aarch64/dct-a.S | 106 +++++++++++++++++++++----------------------
common/aarch64/deblock-a.S | 26 +++++------
common/aarch64/mc-a.S | 96 +++++++++++++++++++--------------------
common/aarch64/pixel-a.S | 92 ++++++++++++++++++-------------------
common/aarch64/predict-a.S | 66 +++++++++++++--------------
common/aarch64/quant-a.S | 36 +++++++--------
9 files changed, 221 insertions(+), 221 deletions(-)
diff --git a/common/aarch64/asm.S b/common/aarch64/asm.S
index 106a0af..fff572a 100644
--- a/common/aarch64/asm.S
+++ b/common/aarch64/asm.S
@@ -28,9 +28,9 @@
#include "config.h"
#ifdef PREFIX
-# define EXTERN_ASM _
+# define EXTERN_ASM _x264_
#else
-# define EXTERN_ASM
+# define EXTERN_ASM x264_
#endif
#ifdef __ELF__
diff --git a/common/aarch64/bitstream-a.S b/common/aarch64/bitstream-a.S
index 3ec7c27..eb7017f 100644
--- a/common/aarch64/bitstream-a.S
+++ b/common/aarch64/bitstream-a.S
@@ -25,7 +25,7 @@
#include "asm.S"
-function x264_nal_escape_neon, export=1
+function nal_escape_neon, export=1
movi v0.16b, #0xff
movi v4.16b, #4
mov w3, #3
diff --git a/common/aarch64/cabac-a.S b/common/aarch64/cabac-a.S
index ace336f..e1763cb 100644
--- a/common/aarch64/cabac-a.S
+++ b/common/aarch64/cabac-a.S
@@ -26,12 +26,12 @@
#include "asm.S"
#include "asm-offsets.h"
-// w11 holds x264_cabac_t.i_low
-// w12 holds x264_cabac_t.i_range
+// w11 holds cabac_t.i_low
+// w12 holds cabac_t.i_range
-function x264_cabac_encode_decision_asm, export=1
- movrel x8, X(x264_cabac_range_lps)
- movrel x9, X(x264_cabac_transition)
+function cabac_encode_decision_asm, export=1
+ movrel x8, X(cabac_range_lps)
+ movrel x9, X(cabac_transition)
add w10, w1, #CABAC_STATE
ldrb w3, [x0, x10] // i_state
ldr w12, [x0, #CABAC_I_RANGE]
@@ -101,7 +101,7 @@ cabac_putbyte:
ret
endfunc
-function x264_cabac_encode_bypass_asm, export=1
+function cabac_encode_bypass_asm, export=1
ldr w12, [x0, #CABAC_I_RANGE]
ldr w11, [x0, #CABAC_I_LOW]
ldr w2, [x0, #CABAC_I_QUEUE]
@@ -114,7 +114,7 @@ function x264_cabac_encode_bypass_asm, export=1
ret
endfunc
-function x264_cabac_encode_terminal_asm, export=1
+function cabac_encode_terminal_asm, export=1
ldr w12, [x0, #CABAC_I_RANGE]
ldr w11, [x0, #CABAC_I_LOW]
sub w12, w12, #2
diff --git a/common/aarch64/dct-a.S b/common/aarch64/dct-a.S
index 525a569..0d735e4 100644
--- a/common/aarch64/dct-a.S
+++ b/common/aarch64/dct-a.S
@@ -79,7 +79,7 @@ endconst
.endm
-function x264_dct4x4dc_neon, export=1
+function dct4x4dc_neon, export=1
ld1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x0]
movi v31.4h, #1
SUMSUB_AB v4.4h, v5.4h, v0.4h, v1.4h
@@ -102,7 +102,7 @@ function x264_dct4x4dc_neon, export=1
ret
endfunc
-function x264_idct4x4dc_neon, export=1
+function idct4x4dc_neon, export=1
ld1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x0]
SUMSUB_AB v4.4h, v5.4h, v0.4h, v1.4h
SUMSUB_AB v6.4h, v7.4h, v2.4h, v3.4h
@@ -131,7 +131,7 @@ endfunc
sub \v3, \v7, \v5
.endm
-function x264_sub4x4_dct_neon, export=1
+function sub4x4_dct_neon, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
ld1 {v0.s}[0], [x1], x3
@@ -154,7 +154,7 @@ function x264_sub4x4_dct_neon, export=1
ret
endfunc
-function x264_sub8x4_dct_neon
+function sub8x4_dct_neon
ld1 {v0.8b}, [x1], x3
ld1 {v1.8b}, [x2], x4
usubl v16.8h, v0.8b, v1.8b
@@ -193,34 +193,34 @@ function x264_sub8x4_dct_neon
ret
endfunc
-function x264_sub8x8_dct_neon, export=1
+function sub8x8_dct_neon, export=1
mov x5, x30
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
- bl x264_sub8x4_dct_neon
+ bl sub8x4_dct_neon
mov x30, x5
- b x264_sub8x4_dct_neon
+ b sub8x4_dct_neon
endfunc
-function x264_sub16x16_dct_neon, export=1
+function sub16x16_dct_neon, export=1
mov x5, x30
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
- bl x264_sub8x4_dct_neon
- bl x264_sub8x4_dct_neon
+ bl sub8x4_dct_neon
+ bl sub8x4_dct_neon
sub x1, x1, #8*FENC_STRIDE-8
sub x2, x2, #8*FDEC_STRIDE-8
- bl x264_sub8x4_dct_neon
- bl x264_sub8x4_dct_neon
+ bl sub8x4_dct_neon
+ bl sub8x4_dct_neon
sub x1, x1, #8
sub x2, x2, #8
- bl x264_sub8x4_dct_neon
- bl x264_sub8x4_dct_neon
+ bl sub8x4_dct_neon
+ bl sub8x4_dct_neon
sub x1, x1, #8*FENC_STRIDE-8
sub x2, x2, #8*FDEC_STRIDE-8
- bl x264_sub8x4_dct_neon
+ bl sub8x4_dct_neon
mov x30, x5
- b x264_sub8x4_dct_neon
+ b sub8x4_dct_neon
endfunc
@@ -255,7 +255,7 @@ endfunc
SUMSUB_SHR2 2, v3.8h, v5.8h, v30.8h, v29.8h, v20.8h, v21.8h
.endm
-function x264_sub8x8_dct8_neon, export=1
+function sub8x8_dct8_neon, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
ld1 {v16.8b}, [x1], x3
@@ -292,19 +292,19 @@ function x264_sub8x8_dct8_neon, export=1
ret
endfunc
-function x264_sub16x16_dct8_neon, export=1
+function sub16x16_dct8_neon, export=1
mov x7, x30
- bl X(x264_sub8x8_dct8_neon)
+ bl X(sub8x8_dct8_neon)
sub x1, x1, #FENC_STRIDE*8 - 8
sub x2, x2, #FDEC_STRIDE*8 - 8
- bl X(x264_sub8x8_dct8_neon)
+ bl X(sub8x8_dct8_neon)
sub x1, x1, #8
sub x2, x2, #8
- bl X(x264_sub8x8_dct8_neon)
+ bl X(sub8x8_dct8_neon)
mov x30, x7
sub x1, x1, #FENC_STRIDE*8 - 8
sub x2, x2, #FDEC_STRIDE*8 - 8
- b X(x264_sub8x8_dct8_neon)
+ b X(sub8x8_dct8_neon)
endfunc
@@ -317,7 +317,7 @@ endfunc
add \d6, \d6, \d1
.endm
-function x264_add4x4_idct_neon, export=1
+function add4x4_idct_neon, export=1
mov x2, #FDEC_STRIDE
ld1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x1]
@@ -357,7 +357,7 @@ function x264_add4x4_idct_neon, export=1
ret
endfunc
-function x264_add8x4_idct_neon, export=1
+function add8x4_idct_neon, export=1
ld1 {v0.8h,v1.8h}, [x1], #32
ld1 {v2.8h,v3.8h}, [x1], #32
transpose v20.2d, v21.2d, v0.2d, v2.2d
@@ -398,29 +398,29 @@ function x264_add8x4_idct_neon, export=1
ret
endfunc
-function x264_add8x8_idct_neon, export=1
+function add8x8_idct_neon, export=1
mov x2, #FDEC_STRIDE
mov x5, x30
- bl X(x264_add8x4_idct_neon)
+ bl X(add8x4_idct_neon)
mov x30, x5
- b X(x264_add8x4_idct_neon)
+ b X(add8x4_idct_neon)
endfunc
-function x264_add16x16_idct_neon, export=1
+function add16x16_idct_neon, export=1
mov x2, #FDEC_STRIDE
mov x5, x30
- bl X(x264_add8x4_idct_neon)
- bl X(x264_add8x4_idct_neon)
+ bl X(add8x4_idct_neon)
+ bl X(add8x4_idct_neon)
sub x0, x0, #8*FDEC_STRIDE-8
- bl X(x264_add8x4_idct_neon)
- bl X(x264_add8x4_idct_neon)
+ bl X(add8x4_idct_neon)
+ bl X(add8x4_idct_neon)
sub x0, x0, #8
- bl X(x264_add8x4_idct_neon)
- bl X(x264_add8x4_idct_neon)
+ bl X(add8x4_idct_neon)
+ bl X(add8x4_idct_neon)
sub x0, x0, #8*FDEC_STRIDE-8
- bl X(x264_add8x4_idct_neon)
+ bl X(add8x4_idct_neon)
mov x30, x5
- b X(x264_add8x4_idct_neon)
+ b X(add8x4_idct_neon)
endfunc
.macro IDCT8_1D type
@@ -446,7 +446,7 @@ endfunc
SUMSUB_AB v19.8h, v20.8h, v2.8h, v20.8h
.endm
-function x264_add8x8_idct8_neon, export=1
+function add8x8_idct8_neon, export=1
mov x2, #FDEC_STRIDE
ld1 {v16.8h,v17.8h}, [x1], #32
ld1 {v18.8h,v19.8h}, [x1], #32
@@ -503,19 +503,19 @@ function x264_add8x8_idct8_neon, export=1
ret
endfunc
-function x264_add16x16_idct8_neon, export=1
+function add16x16_idct8_neon, export=1
mov x7, x30
- bl X(x264_add8x8_idct8_neon)
+ bl X(add8x8_idct8_neon)
sub x0, x0, #8*FDEC_STRIDE-8
- bl X(x264_add8x8_idct8_neon)
+ bl X(add8x8_idct8_neon)
sub x0, x0, #8
- bl X(x264_add8x8_idct8_neon)
+ bl X(add8x8_idct8_neon)
sub x0, x0, #8*FDEC_STRIDE-8
mov x30, x7
- b X(x264_add8x8_idct8_neon)
+ b X(add8x8_idct8_neon)
endfunc
-function x264_add8x8_idct_dc_neon, export=1
+function add8x8_idct_dc_neon, export=1
mov x2, #FDEC_STRIDE
ld1 {v16.4h}, [x1]
ld1 {v0.8b}, [x0], x2
@@ -605,7 +605,7 @@ endfunc
st1 {v7.16b}, [x2], x3
.endm
-function x264_add16x16_idct_dc_neon, export=1
+function add16x16_idct_dc_neon, export=1
mov x2, x0
mov x3, #FDEC_STRIDE
@@ -640,7 +640,7 @@ endfunc
add \dst\().8h, \dst\().8h, \t3\().8h
.endm
-function x264_sub8x8_dct_dc_neon, export=1
+function sub8x8_dct_dc_neon, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
@@ -660,7 +660,7 @@ function x264_sub8x8_dct_dc_neon, export=1
ret
endfunc
-function x264_sub8x16_dct_dc_neon, export=1
+function sub8x16_dct_dc_neon, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
sub4x4x2_dct_dc v0, v16, v17, v18, v19, v20, v21, v22, v23
@@ -689,7 +689,7 @@ function x264_sub8x16_dct_dc_neon, export=1
ret
endfunc
-function x264_zigzag_interleave_8x8_cavlc_neon, export=1
+function zigzag_interleave_8x8_cavlc_neon, export=1
mov x3, #7
movi v31.4s, #1
ld4 {v0.8h,v1.8h,v2.8h,v3.8h}, [x1], #64
@@ -718,7 +718,7 @@ function x264_zigzag_interleave_8x8_cavlc_neon, export=1
ret
endfunc
-function x264_zigzag_scan_4x4_frame_neon, export=1
+function zigzag_scan_4x4_frame_neon, export=1
movrel x2, scan4x4_frame
ld1 {v0.16b,v1.16b}, [x1]
ld1 {v16.16b,v17.16b}, [x2]
@@ -729,7 +729,7 @@ function x264_zigzag_scan_4x4_frame_neon, export=1
endfunc
.macro zigzag_sub_4x4 f ac
-function x264_zigzag_sub_4x4\ac\()_\f\()_neon, export=1
+function zigzag_sub_4x4\ac\()_\f\()_neon, export=1
mov x9, #FENC_STRIDE
mov x4, #FDEC_STRIDE
movrel x5, sub4x4_\f
@@ -772,7 +772,7 @@ zigzag_sub_4x4 field, ac
zigzag_sub_4x4 frame
zigzag_sub_4x4 frame, ac
-function x264_zigzag_scan_4x4_field_neon, export=1
+function zigzag_scan_4x4_field_neon, export=1
movrel x2, scan4x4_field
ld1 {v0.8h,v1.8h}, [x1]
ld1 {v16.16b}, [x2]
@@ -781,7 +781,7 @@ function x264_zigzag_scan_4x4_field_neon, export=1
ret
endfunc
-function x264_zigzag_scan_8x8_frame_neon, export=1
+function zigzag_scan_8x8_frame_neon, export=1
movrel x2, scan8x8_frame
ld1 {v0.8h,v1.8h}, [x1], #32
ld1 {v2.8h,v3.8h}, [x1], #32
@@ -841,7 +841,7 @@ const scan8x8_frame, align=5
.byte T(7,5), T(7,6), T(6,7), T(7,7)
endconst
-function x264_zigzag_scan_8x8_field_neon, export=1
+function zigzag_scan_8x8_field_neon, export=1
movrel x2, scan8x8_field
ld1 {v0.8h,v1.8h}, [x1], #32
ld1 {v2.8h,v3.8h}, [x1], #32
@@ -868,7 +868,7 @@ function x264_zigzag_scan_8x8_field_neon, export=1
endfunc
.macro zigzag_sub8x8 f
-function x264_zigzag_sub_8x8_\f\()_neon, export=1
+function zigzag_sub_8x8_\f\()_neon, export=1
movrel x4, sub8x8_\f
mov x5, #FENC_STRIDE
mov x6, #FDEC_STRIDE
diff --git a/common/aarch64/deblock-a.S b/common/aarch64/deblock-a.S
index 1310f3b..ed221b5 100644
--- a/common/aarch64/deblock-a.S
+++ b/common/aarch64/deblock-a.S
@@ -108,7 +108,7 @@
sqxtun2 v0.16b, v24.8h
.endm
-function x264_deblock_v_luma_neon, export=1
+function deblock_v_luma_neon, export=1
h264_loop_filter_start
ld1 {v0.16b}, [x0], x1
@@ -131,7 +131,7 @@ function x264_deblock_v_luma_neon, export=1
ret
endfunc
-function x264_deblock_h_luma_neon, export=1
+function deblock_h_luma_neon, export=1
h264_loop_filter_start
sub x0, x0, #4
@@ -302,7 +302,7 @@ endfunc
bit v2.16b, v26.16b, v18.16b // q2'_2
.endm
-function x264_deblock_v_luma_intra_neon, export=1
+function deblock_v_luma_intra_neon, export=1
h264_loop_filter_start_intra
ld1 {v0.16b}, [x0], x1 // q0
@@ -328,7 +328,7 @@ function x264_deblock_v_luma_intra_neon, export=1
ret
endfunc
-function x264_deblock_h_luma_intra_neon, export=1
+function deblock_h_luma_intra_neon, export=1
h264_loop_filter_start_intra
sub x0, x0, #4
@@ -421,7 +421,7 @@ endfunc
sqxtun2 v0.16b, v23.8h
.endm
-function x264_deblock_v_chroma_neon, export=1
+function deblock_v_chroma_neon, export=1
h264_loop_filter_start
sub x0, x0, x1, lsl #1
@@ -439,7 +439,7 @@ function x264_deblock_v_chroma_neon, export=1
ret
endfunc
-function x264_deblock_h_chroma_neon, export=1
+function deblock_h_chroma_neon, export=1
h264_loop_filter_start
sub x0, x0, #4
@@ -472,7 +472,7 @@ deblock_h_chroma:
ret
endfunc
-function x264_deblock_h_chroma_422_neon, export=1
+function deblock_h_chroma_422_neon, export=1
add x5, x0, x1
sub x0, x0, #4
add x1, x1, x1
@@ -516,7 +516,7 @@ endfunc
sqxtun v17.8b, v22.8h
.endm
-function x264_deblock_h_chroma_mbaff_neon, export=1
+function deblock_h_chroma_mbaff_neon, export=1
h264_loop_filter_start
sub x4, x0, #4
@@ -575,7 +575,7 @@ endfunc
bit v17.16b, v25.16b, v26.16b
.endm
-function x264_deblock_v_chroma_intra_neon, export=1
+function deblock_v_chroma_intra_neon, export=1
h264_loop_filter_start_intra
sub x0, x0, x1, lsl #1
@@ -593,7 +593,7 @@ function x264_deblock_v_chroma_intra_neon, export=1
ret
endfunc
-function x264_deblock_h_chroma_intra_mbaff_neon, export=1
+function deblock_h_chroma_intra_mbaff_neon, export=1
h264_loop_filter_start_intra
sub x4, x0, #4
@@ -615,7 +615,7 @@ function x264_deblock_h_chroma_intra_mbaff_neon, export=1
ret
endfunc
-function x264_deblock_h_chroma_intra_neon, export=1
+function deblock_h_chroma_intra_neon, export=1
h264_loop_filter_start_intra
sub x4, x0, #4
@@ -645,7 +645,7 @@ function x264_deblock_h_chroma_intra_neon, export=1
ret
endfunc
-function x264_deblock_h_chroma_422_intra_neon, export=1
+function deblock_h_chroma_422_intra_neon, export=1
h264_loop_filter_start_intra
sub x4, x0, #4
@@ -702,7 +702,7 @@ endfunc
// int16_t mv[2][X264_SCAN8_LUMA_SIZE][2],
// uint8_t bs[2][8][4], int mvy_limit,
// int bframe )
-function x264_deblock_strength_neon, export=1
+function deblock_strength_neon, export=1
movi v4.16b, #0
lsl w4, w4, #8
add x3, x3, #32
diff --git a/common/aarch64/mc-a.S b/common/aarch64/mc-a.S
index 5656c0b..68aa907 100644
--- a/common/aarch64/mc-a.S
+++ b/common/aarch64/mc-a.S
@@ -31,7 +31,7 @@
// note: prefetch stuff assumes 64-byte cacheline
// void prefetch_ref( uint8_t *pix, intptr_t stride, int parity )
-function x264_prefetch_ref_aarch64, export=1
+function prefetch_ref_aarch64, export=1
cmp w2, #1
csel x2, xzr, x1, eq
add x0, x0, #64
@@ -54,8 +54,8 @@ endfunc
// void prefetch_fenc( uint8_t *pix_y, intptr_t stride_y,
// uint8_t *pix_uv, intptr_t stride_uv, int mb_x )
-.macro x264_prefetch_fenc sub
-function x264_prefetch_fenc_\sub\()_aarch64, export=1
+.macro prefetch_fenc sub
+function prefetch_fenc_\sub\()_aarch64, export=1
and w6, w5, #3
and w7, w5, #3
mul x6, x6, x1
@@ -82,14 +82,14 @@ function x264_prefetch_fenc_\sub\()_aarch64, export=1
endfunc
.endm
-x264_prefetch_fenc 420
-x264_prefetch_fenc 422
+prefetch_fenc 420
+prefetch_fenc 422
// void pixel_avg( uint8_t *dst, intptr_t dst_stride,
// uint8_t *src1, intptr_t src1_stride,
// uint8_t *src2, intptr_t src2_stride, int weight );
.macro AVGH w h
-function x264_pixel_avg_\w\()x\h\()_neon, export=1
+function pixel_avg_\w\()x\h\()_neon, export=1
mov w10, #64
cmp w6, #32
mov w9, #\h
@@ -292,7 +292,7 @@ function pixel_avg_w16_neon
ret
endfunc
-function x264_pixel_avg2_w4_neon, export=1
+function pixel_avg2_w4_neon, export=1
1:
subs w5, w5, #2
ld1 {v0.s}[0], [x2], x3
@@ -307,7 +307,7 @@ function x264_pixel_avg2_w4_neon, export=1
ret
endfunc
-function x264_pixel_avg2_w8_neon, export=1
+function pixel_avg2_w8_neon, export=1
1:
subs w5, w5, #2
ld1 {v0.8b}, [x2], x3
@@ -322,7 +322,7 @@ function x264_pixel_avg2_w8_neon, export=1
ret
endfunc
-function x264_pixel_avg2_w16_neon, export=1
+function pixel_avg2_w16_neon, export=1
1:
subs w5, w5, #2
ld1 {v0.16b}, [x2], x3
@@ -337,7 +337,7 @@ function x264_pixel_avg2_w16_neon, export=1
ret
endfunc
-function x264_pixel_avg2_w20_neon, export=1
+function pixel_avg2_w20_neon, export=1
sub x1, x1, #16
1:
subs w5, w5, #2
@@ -372,8 +372,8 @@ endfunc
.endm
// void mc_weight( uint8_t *src, intptr_t src_stride, uint8_t *dst,
-// intptr_t dst_stride, const x264_weight_t *weight, int h )
-function x264_mc_weight_w20_neon, export=1
+// intptr_t dst_stride, const weight_t *weight, int h )
+function mc_weight_w20_neon, export=1
weight_prologue full
sub x1, x1, #16
1:
@@ -409,7 +409,7 @@ function x264_mc_weight_w20_neon, export=1
ret
endfunc
-function x264_mc_weight_w16_neon, export=1
+function mc_weight_w16_neon, export=1
weight_prologue full
weight16_loop:
1:
@@ -438,7 +438,7 @@ weight16_loop:
ret
endfunc
-function x264_mc_weight_w8_neon, export=1
+function mc_weight_w8_neon, export=1
weight_prologue full
1:
subs w9, w9, #2
@@ -458,7 +458,7 @@ function x264_mc_weight_w8_neon, export=1
ret
endfunc
-function x264_mc_weight_w4_neon, export=1
+function mc_weight_w4_neon, export=1
weight_prologue full
1:
subs w9, w9, #2
@@ -474,7 +474,7 @@ function x264_mc_weight_w4_neon, export=1
ret
endfunc
-function x264_mc_weight_w20_nodenom_neon, export=1
+function mc_weight_w20_nodenom_neon, export=1
weight_prologue nodenom
sub x1, x1, #16
1:
@@ -505,7 +505,7 @@ function x264_mc_weight_w20_nodenom_neon, export=1
ret
endfunc
-function x264_mc_weight_w16_nodenom_neon, export=1
+function mc_weight_w16_nodenom_neon, export=1
weight_prologue nodenom
1:
subs w9, w9, #2
@@ -529,7 +529,7 @@ function x264_mc_weight_w16_nodenom_neon, export=1
ret
endfunc
-function x264_mc_weight_w8_nodenom_neon, export=1
+function mc_weight_w8_nodenom_neon, export=1
weight_prologue nodenom
1:
subs w9, w9, #2
@@ -547,7 +547,7 @@ function x264_mc_weight_w8_nodenom_neon, export=1
ret
endfunc
-function x264_mc_weight_w4_nodenom_neon, export=1
+function mc_weight_w4_nodenom_neon, export=1
weight_prologue nodenom
1:
subs w9, w9, #2
@@ -568,7 +568,7 @@ endfunc
.endm
.macro weight_simple name op
-function x264_mc_weight_w20_\name\()_neon, export=1
+function mc_weight_w20_\name\()_neon, export=1
weight_simple_prologue
1:
subs w5, w5, #2
@@ -588,7 +588,7 @@ function x264_mc_weight_w20_\name\()_neon, export=1
ret
endfunc
-function x264_mc_weight_w16_\name\()_neon, export=1
+function mc_weight_w16_\name\()_neon, export=1
weight_simple_prologue
1:
subs w5, w5, #2
@@ -602,7 +602,7 @@ function x264_mc_weight_w16_\name\()_neon, export=1
ret
endfunc
-function x264_mc_weight_w8_\name\()_neon, export=1
+function mc_weight_w8_\name\()_neon, export=1
weight_simple_prologue
1:
subs w5, w5, #2
@@ -616,7 +616,7 @@ function x264_mc_weight_w8_\name\()_neon, export=1
ret
endfunc
-function x264_mc_weight_w4_\name\()_neon, export=1
+function mc_weight_w4_\name\()_neon, export=1
weight_simple_prologue
1:
subs w5, w5, #2
@@ -635,7 +635,7 @@ weight_simple offsetsub, uqsub
// void mc_copy( uint8_t *dst, intptr_t dst_stride, uint8_t *src, intptr_t src_stride, int height )
-function x264_mc_copy_w4_neon, export=1
+function mc_copy_w4_neon, export=1
1:
subs w4, w4, #4
ld1 {v0.s}[0], [x2], x3
@@ -650,7 +650,7 @@ function x264_mc_copy_w4_neon, export=1
ret
endfunc
-function x264_mc_copy_w8_neon, export=1
+function mc_copy_w8_neon, export=1
1: subs w4, w4, #4
ld1 {v0.8b}, [x2], x3
ld1 {v1.8b}, [x2], x3
@@ -664,7 +664,7 @@ function x264_mc_copy_w8_neon, export=1
ret
endfunc
-function x264_mc_copy_w16_neon, export=1
+function mc_copy_w16_neon, export=1
1: subs w4, w4, #4
ld1 {v0.16b}, [x2], x3
ld1 {v1.16b}, [x2], x3
@@ -678,11 +678,11 @@ function x264_mc_copy_w16_neon, export=1
ret
endfunc
-// void x264_mc_chroma_neon( uint8_t *dst_u, uint8_t *dst_v,
+// void mc_chroma_neon( uint8_t *dst_u, uint8_t *dst_v,
// intptr_t i_dst_stride,
// uint8_t *src, intptr_t i_src_stride,
// int dx, int dy, int i_width, int i_height );
-function x264_mc_chroma_neon, export=1
+function mc_chroma_neon, export=1
ldr w15, [sp] // height
sbfx x12, x6, #3, #29 // asr(3) and sign extend
sbfx x11, x5, #3, #29 // asr(3) and sign extend
@@ -1018,7 +1018,7 @@ endfunc
//void hpel_filter( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
// intptr_t stride, int width, int height, int16_t *buf )
-function x264_hpel_filter_neon, export=1
+function hpel_filter_neon, export=1
ubfm x9, x3, #0, #3
add w15, w5, w9
sub x13, x3, x9 // align src
@@ -1158,7 +1158,7 @@ endfunc
// frame_init_lowres_core( uint8_t *src0, uint8_t *dst0, uint8_t *dsth,
// uint8_t *dstv, uint8_t *dstc, intptr_t src_stride,
// intptr_t dst_stride, int width, int height )
-function x264_frame_init_lowres_core_neon, export=1
+function frame_init_lowres_core_neon, export=1
ldr w8, [sp]
sub x10, x6, w7, uxtw // dst_stride - width
and x10, x10, #~15
@@ -1233,12 +1233,12 @@ function x264_frame_init_lowres_core_neon, export=1
ret
endfunc
-function x264_load_deinterleave_chroma_fenc_neon, export=1
+function load_deinterleave_chroma_fenc_neon, export=1
mov x4, #FENC_STRIDE/2
b load_deinterleave_chroma
endfunc
-function x264_load_deinterleave_chroma_fdec_neon, export=1
+function load_deinterleave_chroma_fdec_neon, export=1
mov x4, #FDEC_STRIDE/2
load_deinterleave_chroma:
ld2 {v0.8b,v1.8b}, [x1], x2
@@ -1253,7 +1253,7 @@ load_deinterleave_chroma:
ret
endfunc
-function x264_plane_copy_core_neon, export=1
+function plane_copy_core_neon, export=1
add w8, w4, #15 // 32-bit write clears the upper 32-bit the register
and w4, w8, #~15
// safe use of the full reg since negative width makes no sense
@@ -1282,7 +1282,7 @@ function x264_plane_copy_core_neon, export=1
ret
endfunc
-function x264_plane_copy_swap_core_neon, export=1
+function plane_copy_swap_core_neon, export=1
lsl w4, w4, #1
sub x1, x1, x4
sub x3, x3, x4
@@ -1310,7 +1310,7 @@ function x264_plane_copy_swap_core_neon, export=1
ret
endfunc
-function x264_plane_copy_deinterleave_neon, export=1
+function plane_copy_deinterleave_neon, export=1
add w9, w6, #15
and w9, w9, #0xfffffff0
sub x1, x1, x9
@@ -1349,7 +1349,7 @@ endfunc
b.gt 1b
.endm
-function x264_plane_copy_deinterleave_rgb_neon, export=1
+function plane_copy_deinterleave_rgb_neon, export=1
#if SYS_MACOSX
ldr w8, [sp]
ldp w9, w10, [sp, #4]
@@ -1381,7 +1381,7 @@ function x264_plane_copy_deinterleave_rgb_neon, export=1
ret
endfunc
-function x264_plane_copy_interleave_core_neon, export=1
+function plane_copy_interleave_core_neon, export=1
add w9, w6, #15
and w9, w9, #0xfffffff0
sub x1, x1, x9, lsl #1
@@ -1404,7 +1404,7 @@ function x264_plane_copy_interleave_core_neon, export=1
ret
endfunc
-function x264_store_interleave_chroma_neon, export=1
+function store_interleave_chroma_neon, export=1
mov x5, #FDEC_STRIDE
1:
ld1 {v0.8b}, [x2], x5
@@ -1431,7 +1431,7 @@ endfunc
add v0.8h, v0.8h, v5.8h
.endm
-function x264_integral_init4h_neon, export=1
+function integral_init4h_neon, export=1
sub x3, x0, x2, lsl #1
ld1 {v6.8b,v7.8b}, [x1], #16
1:
@@ -1466,7 +1466,7 @@ endfunc
add v0.8h, v0.8h, \s\().8h
.endm
-function x264_integral_init8h_neon, export=1
+function integral_init8h_neon, export=1
sub x3, x0, x2, lsl #1
ld1 {v16.8b,v17.8b}, [x1], #16
1:
@@ -1483,7 +1483,7 @@ function x264_integral_init8h_neon, export=1
ret
endfunc
-function x264_integral_init4v_neon, export=1
+function integral_init4v_neon, export=1
mov x3, x0
add x4, x0, x2, lsl #3
add x8, x0, x2, lsl #4
@@ -1518,7 +1518,7 @@ function x264_integral_init4v_neon, export=1
ret
endfunc
-function x264_integral_init8v_neon, export=1
+function integral_init8v_neon, export=1
add x2, x0, x1, lsl #4
sub x1, x1, #8
ands x3, x1, #16 - 1
@@ -1542,7 +1542,7 @@ function x264_integral_init8v_neon, export=1
ret
endfunc
-function x264_mbtree_propagate_cost_neon, export=1
+function mbtree_propagate_cost_neon, export=1
ld1r {v5.4s}, [x5]
8:
subs w6, w6, #8
@@ -1593,7 +1593,7 @@ const pw_0to15, align=5
.short 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
endconst
-function x264_mbtree_propagate_list_internal_neon, export=1
+function mbtree_propagate_list_internal_neon, export=1
movrel x11, pw_0to15
dup v31.8h, w4 // bipred_weight
movi v30.8h, #0xc0, lsl #8
@@ -1659,7 +1659,7 @@ function x264_mbtree_propagate_list_internal_neon, export=1
ret
endfunc
-function x264_memcpy_aligned_neon, export=1
+function memcpy_aligned_neon, export=1
tst x2, #16
b.eq 32f
sub x2, x2, #16
@@ -1684,7 +1684,7 @@ function x264_memcpy_aligned_neon, export=1
ret
endfunc
-function x264_memzero_aligned_neon, export=1
+function memzero_aligned_neon, export=1
movi v0.16b, #0
movi v1.16b, #0
1:
@@ -1698,7 +1698,7 @@ function x264_memzero_aligned_neon, export=1
endfunc
// void mbtree_fix8_pack( int16_t *dst, float *src, int count )
-function x264_mbtree_fix8_pack_neon, export=1
+function mbtree_fix8_pack_neon, export=1
subs w3, w2, #8
b.lt 2f
1:
@@ -1726,7 +1726,7 @@ function x264_mbtree_fix8_pack_neon, export=1
endfunc
// void mbtree_fix8_unpack( float *dst, int16_t *src, int count )
-function x264_mbtree_fix8_unpack_neon, export=1
+function mbtree_fix8_unpack_neon, export=1
subs w3, w2, #8
b.lt 2f
1:
diff --git a/common/aarch64/pixel-a.S b/common/aarch64/pixel-a.S
index 92ec92d..c6816e9 100644
--- a/common/aarch64/pixel-a.S
+++ b/common/aarch64/pixel-a.S
@@ -97,7 +97,7 @@ endconst
.endm
.macro SAD_FUNC w, h, name
-function x264_pixel_sad\name\()_\w\()x\h\()_neon, export=1
+function pixel_sad\name\()_\w\()x\h\()_neon, export=1
SAD_START_\w
.rept \h / 2 - 1
@@ -195,7 +195,7 @@ SAD_FUNC 16, 16
.endm
.macro SAD_X_FUNC x, w, h
-function x264_pixel_sad_x\x\()_\w\()x\h\()_neon, export=1
+function pixel_sad_x\x\()_\w\()x\h\()_neon, export=1
.if \x == 3
mov x6, x5
mov x5, x4
@@ -249,7 +249,7 @@ SAD_X_FUNC 4, 16, 8
SAD_X_FUNC 4, 16, 16
-function x264_pixel_vsad_neon, export=1
+function pixel_vsad_neon, export=1
subs w2, w2, #2
ld1 {v0.16b}, [x0], x1
ld1 {v1.16b}, [x0], x1
@@ -273,7 +273,7 @@ function x264_pixel_vsad_neon, export=1
ret
endfunc
-function x264_pixel_asd8_neon, export=1
+function pixel_asd8_neon, export=1
sub w4, w4, #2
ld1 {v0.8b}, [x0], x1
ld1 {v1.8b}, [x2], x3
@@ -379,7 +379,7 @@ endfunc
.endm
.macro SSD_FUNC w h
-function x264_pixel_ssd_\w\()x\h\()_neon, export=1
+function pixel_ssd_\w\()x\h\()_neon, export=1
SSD_START_\w
.rept \h-2
SSD_\w
@@ -402,7 +402,7 @@ SSD_FUNC 16, 8
SSD_FUNC 16, 16
-function x264_pixel_ssd_nv12_core_neon, export=1
+function pixel_ssd_nv12_core_neon, export=1
sxtw x8, w4
add x8, x8, #8
and x8, x8, #~15
@@ -473,7 +473,7 @@ function x264_pixel_ssd_nv12_core_neon, export=1
endfunc
.macro pixel_var_8 h
-function x264_pixel_var_8x\h\()_neon, export=1
+function pixel_var_8x\h\()_neon, export=1
ld1 {v16.8b}, [x0], x1
ld1 {v17.8b}, [x0], x1
mov x2, \h - 4
@@ -512,14 +512,14 @@ function x264_pixel_var_8x\h\()_neon, export=1
uadalp v1.4s, v28.8h
uadalp v2.4s, v29.8h
- b x264_var_end
+ b var_end
endfunc
.endm
pixel_var_8 8
pixel_var_8 16
-function x264_pixel_var_16x16_neon, export=1
+function pixel_var_16x16_neon, export=1
ld1 {v16.16b}, [x0], x1
ld1 {v17.16b}, [x0], x1
mov x2, #14
@@ -556,7 +556,7 @@ function x264_pixel_var_16x16_neon, export=1
uadalp v2.4s, v4.8h
endfunc
-function x264_var_end
+function var_end
add v1.4s, v1.4s, v2.4s
uaddlv s0, v0.8h
uaddlv d1, v1.4s
@@ -568,7 +568,7 @@ endfunc
.macro pixel_var2_8 h
-function x264_pixel_var2_8x\h\()_neon, export=1
+function pixel_var2_8x\h\()_neon, export=1
ld1 {v16.8b}, [x0], x1
ld1 {v18.8b}, [x2], x3
ld1 {v17.8b}, [x0], x1
@@ -629,7 +629,7 @@ pixel_var2_8 8
pixel_var2_8 16
-function x264_pixel_satd_4x4_neon, export=1
+function pixel_satd_4x4_neon, export=1
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
ld1 {v3.s}[0], [x2], x3
@@ -662,7 +662,7 @@ function x264_pixel_satd_4x4_neon, export=1
ret
endfunc
-function x264_pixel_satd_4x8_neon, export=1
+function pixel_satd_4x8_neon, export=1
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
ld1 {v3.s}[0], [x2], x3
@@ -679,10 +679,10 @@ function x264_pixel_satd_4x8_neon, export=1
ld1 {v4.s}[1], [x0], x1
ld1 {v7.s}[1], [x2], x3
ld1 {v6.s}[1], [x0], x1
- b x264_satd_4x8_8x4_end_neon
+ b satd_4x8_8x4_end_neon
endfunc
-function x264_pixel_satd_8x4_neon, export=1
+function pixel_satd_8x4_neon, export=1
ld1 {v1.8b}, [x2], x3
ld1 {v0.8b}, [x0], x1
ld1 {v3.8b}, [x2], x3
@@ -693,7 +693,7 @@ function x264_pixel_satd_8x4_neon, export=1
ld1 {v6.8b}, [x0], x1
endfunc
-function x264_satd_4x8_8x4_end_neon
+function satd_4x8_8x4_end_neon
usubl v0.8h, v0.8b, v1.8b
usubl v1.8h, v2.8b, v3.8b
usubl v2.8h, v4.8b, v5.8b
@@ -729,10 +729,10 @@ function x264_satd_4x8_8x4_end_neon
ret
endfunc
-function x264_pixel_satd_8x8_neon, export=1
+function pixel_satd_8x8_neon, export=1
mov x4, x30
- bl x264_satd_8x8_neon
+ bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v0.8h, v0.8h, v1.8h
@@ -741,15 +741,15 @@ function x264_pixel_satd_8x8_neon, export=1
ret x4
endfunc
-function x264_pixel_satd_8x16_neon, export=1
+function pixel_satd_8x16_neon, export=1
mov x4, x30
- bl x264_satd_8x8_neon
+ bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v0.8h, v1.8h
- bl x264_satd_8x8_neon
+ bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v31.8h, v0.8h, v1.8h
@@ -805,12 +805,12 @@ endfunc
SUMSUB_ABCD \r1, \r3, \r2, \r4, \t1, \t3, \t2, \t4
.endm
-function x264_satd_8x8_neon
+function satd_8x8_neon
load_diff_fly_8x8
endfunc
// one vertical hadamard pass and two horizontal
-function x264_satd_8x4v_8x8h_neon
+function satd_8x4v_8x8h_neon
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
@@ -848,14 +848,14 @@ function x264_satd_8x4v_8x8h_neon
ret
endfunc
-function x264_pixel_satd_16x8_neon, export=1
+function pixel_satd_16x8_neon, export=1
mov x4, x30
- bl x264_satd_16x4_neon
+ bl satd_16x4_neon
add v30.8h, v0.8h, v1.8h
add v31.8h, v2.8h, v3.8h
- bl x264_satd_16x4_neon
+ bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
@@ -867,26 +867,26 @@ function x264_pixel_satd_16x8_neon, export=1
ret x4
endfunc
-function x264_pixel_satd_16x16_neon, export=1
+function pixel_satd_16x16_neon, export=1
mov x4, x30
- bl x264_satd_16x4_neon
+ bl satd_16x4_neon
add v30.8h, v0.8h, v1.8h
add v31.8h, v2.8h, v3.8h
- bl x264_satd_16x4_neon
+ bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
add v31.8h, v31.8h, v1.8h
- bl x264_satd_16x4_neon
+ bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
add v31.8h, v31.8h, v1.8h
- bl x264_satd_16x4_neon
+ bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
@@ -898,7 +898,7 @@ function x264_pixel_satd_16x16_neon, export=1
ret x4
endfunc
-function x264_satd_16x4_neon
+function satd_16x4_neon
ld1 {v1.16b}, [x2], x3
ld1 {v0.16b}, [x0], x1
ld1 {v3.16b}, [x2], x3
@@ -920,10 +920,10 @@ function x264_satd_16x4_neon
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
- b x264_satd_8x4v_8x8h_neon
+ b satd_8x4v_8x8h_neon
endfunc
-function x264_pixel_satd_4x16_neon, export=1
+function pixel_satd_4x16_neon, export=1
mov x4, x30
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
@@ -969,7 +969,7 @@ function x264_pixel_satd_4x16_neon, export=1
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
- bl x264_satd_8x4v_8x8h_neon
+ bl satd_8x4v_8x8h_neon
add v30.8h, v0.8h, v1.8h
add v31.8h, v2.8h, v3.8h
@@ -979,7 +979,7 @@ function x264_pixel_satd_4x16_neon, export=1
ret x4
endfunc
-function x264_pixel_sa8d_8x8_neon, export=1
+function pixel_sa8d_8x8_neon, export=1
mov x4, x30
bl pixel_sa8d_8x8_neon
add v0.8h, v0.8h, v1.8h
@@ -990,7 +990,7 @@ function x264_pixel_sa8d_8x8_neon, export=1
ret x4
endfunc
-function x264_pixel_sa8d_16x16_neon, export=1
+function pixel_sa8d_16x16_neon, export=1
mov x4, x30
bl pixel_sa8d_8x8_neon
uaddlp v30.4s, v0.8h
@@ -1112,7 +1112,7 @@ endfunc
sa8d_satd_8x8
sa8d_satd_8x8 satd_
-function x264_pixel_sa8d_satd_16x16_neon, export=1
+function pixel_sa8d_satd_16x16_neon, export=1
mov x4, x30
bl pixel_sa8d_satd_8x8_neon
uaddlp v30.4s, v0.8h
@@ -1150,25 +1150,25 @@ function x264_pixel_sa8d_satd_16x16_neon, export=1
endfunc
.macro HADAMARD_AC w h
-function x264_pixel_hadamard_ac_\w\()x\h\()_neon, export=1
+function pixel_hadamard_ac_\w\()x\h\()_neon, export=1
movrel x5, mask_ac_4_8
mov x4, x30
ld1 {v30.8h,v31.8h}, [x5]
movi v28.16b, #0
movi v29.16b, #0
- bl x264_hadamard_ac_8x8_neon
+ bl hadamard_ac_8x8_neon
.if \h > 8
- bl x264_hadamard_ac_8x8_neon
+ bl hadamard_ac_8x8_neon
.endif
.if \w > 8
sub x0, x0, x1, lsl #3
add x0, x0, #8
- bl x264_hadamard_ac_8x8_neon
+ bl hadamard_ac_8x8_neon
.endif
.if \w * \h == 256
sub x0, x0, x1, lsl #4
- bl x264_hadamard_ac_8x8_neon
+ bl hadamard_ac_8x8_neon
.endif
addv s1, v29.4s
@@ -1188,7 +1188,7 @@ HADAMARD_AC 16, 8
HADAMARD_AC 16, 16
// v28: satd v29: sa8d v30: mask_ac4 v31: mask_ac8
-function x264_hadamard_ac_8x8_neon
+function hadamard_ac_8x8_neon
ld1 {v16.8b}, [x0], x1
ld1 {v17.8b}, [x0], x1
ld1 {v18.8b}, [x0], x1
@@ -1280,7 +1280,7 @@ function x264_hadamard_ac_8x8_neon
endfunc
-function x264_pixel_ssim_4x4x2_core_neon, export=1
+function pixel_ssim_4x4x2_core_neon, export=1
ld1 {v0.8b}, [x0], x1
ld1 {v2.8b}, [x2], x3
umull v16.8h, v0.8b, v0.8b
@@ -1339,7 +1339,7 @@ function x264_pixel_ssim_4x4x2_core_neon, export=1
ret
endfunc
-function x264_pixel_ssim_end4_neon, export=1
+function pixel_ssim_end4_neon, export=1
mov x5, #4
ld1 {v16.4s,v17.4s}, [x0], #32
ld1 {v18.4s,v19.4s}, [x1], #32
diff --git a/common/aarch64/predict-a.S b/common/aarch64/predict-a.S
index 0bfb9b4..427c8fc 100644
--- a/common/aarch64/predict-a.S
+++ b/common/aarch64/predict-a.S
@@ -62,7 +62,7 @@ endconst
.endm
-function x264_predict_4x4_h_aarch64, export=1
+function predict_4x4_h_aarch64, export=1
ldrb w1, [x0, #0*FDEC_STRIDE-1]
mov w5, #0x01010101
ldrb w2, [x0, #1*FDEC_STRIDE-1]
@@ -79,7 +79,7 @@ function x264_predict_4x4_h_aarch64, export=1
ret
endfunc
-function x264_predict_4x4_v_aarch64, export=1
+function predict_4x4_v_aarch64, export=1
ldr w1, [x0, #0 - 1 * FDEC_STRIDE]
str w1, [x0, #0 + 0 * FDEC_STRIDE]
str w1, [x0, #0 + 1 * FDEC_STRIDE]
@@ -88,7 +88,7 @@ function x264_predict_4x4_v_aarch64, export=1
ret
endfunc
-function x264_predict_4x4_dc_neon, export=1
+function predict_4x4_dc_neon, export=1
sub x1, x0, #FDEC_STRIDE
ldrb w4, [x0, #-1 + 0 * FDEC_STRIDE]
ldrb w5, [x0, #-1 + 1 * FDEC_STRIDE]
@@ -110,7 +110,7 @@ function x264_predict_4x4_dc_neon, export=1
ret
endfunc
-function x264_predict_4x4_dc_top_neon, export=1
+function predict_4x4_dc_top_neon, export=1
sub x1, x0, #FDEC_STRIDE
ldr s0, [x1]
uaddlv h0, v0.8b
@@ -124,7 +124,7 @@ function x264_predict_4x4_dc_top_neon, export=1
ret
endfunc
-function x264_predict_4x4_ddr_neon, export=1
+function predict_4x4_ddr_neon, export=1
sub x1, x0, #FDEC_STRIDE+1
mov x7, #FDEC_STRIDE
ld1 {v0.8b}, [x1], x7 // # -FDEC_STRIDE-1
@@ -152,7 +152,7 @@ function x264_predict_4x4_ddr_neon, export=1
ret
endfunc
-function x264_predict_4x4_ddl_neon, export=1
+function predict_4x4_ddl_neon, export=1
sub x0, x0, #FDEC_STRIDE
mov x7, #FDEC_STRIDE
ld1 {v0.8b}, [x0], x7
@@ -171,7 +171,7 @@ function x264_predict_4x4_ddl_neon, export=1
ret
endfunc
-function x264_predict_8x8_dc_neon, export=1
+function predict_8x8_dc_neon, export=1
mov x7, #FDEC_STRIDE
ld1 {v0.16b}, [x1], #16
ld1 {v1.8b}, [x1]
@@ -187,7 +187,7 @@ function x264_predict_8x8_dc_neon, export=1
ret
endfunc
-function x264_predict_8x8_h_neon, export=1
+function predict_8x8_h_neon, export=1
mov x7, #FDEC_STRIDE
ld1 {v16.16b}, [x1]
dup v0.8b, v16.b[14]
@@ -209,7 +209,7 @@ function x264_predict_8x8_h_neon, export=1
ret
endfunc
-function x264_predict_8x8_v_neon, export=1
+function predict_8x8_v_neon, export=1
add x1, x1, #16
mov x7, #FDEC_STRIDE
ld1 {v0.8b}, [x1]
@@ -219,7 +219,7 @@ function x264_predict_8x8_v_neon, export=1
ret
endfunc
-function x264_predict_8x8_ddl_neon, export=1
+function predict_8x8_ddl_neon, export=1
add x1, x1, #16
mov x7, #FDEC_STRIDE
ld1 {v0.16b}, [x1]
@@ -248,7 +248,7 @@ function x264_predict_8x8_ddl_neon, export=1
ret
endfunc
-function x264_predict_8x8_ddr_neon, export=1
+function predict_8x8_ddr_neon, export=1
ld1 {v0.16b,v1.16b}, [x1]
ext v2.16b, v0.16b, v1.16b, #7
ext v4.16b, v0.16b, v1.16b, #9
@@ -278,7 +278,7 @@ function x264_predict_8x8_ddr_neon, export=1
ret
endfunc
-function x264_predict_8x8_vl_neon, export=1
+function predict_8x8_vl_neon, export=1
add x1, x1, #16
mov x7, #FDEC_STRIDE
@@ -309,7 +309,7 @@ function x264_predict_8x8_vl_neon, export=1
ret
endfunc
-function x264_predict_8x8_vr_neon, export=1
+function predict_8x8_vr_neon, export=1
add x1, x1, #8
mov x7, #FDEC_STRIDE
ld1 {v2.16b}, [x1]
@@ -343,7 +343,7 @@ function x264_predict_8x8_vr_neon, export=1
ret
endfunc
-function x264_predict_8x8_hd_neon, export=1
+function predict_8x8_hd_neon, export=1
add x1, x1, #7
mov x7, #FDEC_STRIDE
@@ -378,7 +378,7 @@ function x264_predict_8x8_hd_neon, export=1
ret
endfunc
-function x264_predict_8x8_hu_neon, export=1
+function predict_8x8_hu_neon, export=1
add x1, x1, #7
mov x7, #FDEC_STRIDE
ld1 {v7.8b}, [x1]
@@ -416,7 +416,7 @@ function x264_predict_8x8_hu_neon, export=1
endfunc
-function x264_predict_8x8c_dc_top_neon, export=1
+function predict_8x8c_dc_top_neon, export=1
sub x2, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
ld1 {v0.8b}, [x2]
@@ -429,7 +429,7 @@ function x264_predict_8x8c_dc_top_neon, export=1
b pred8x8c_dc_end
endfunc
-function x264_predict_8x8c_dc_left_neon, export=1
+function predict_8x8c_dc_left_neon, export=1
ldrb w2, [x0, #0 * FDEC_STRIDE - 1]
ldrb w3, [x0, #1 * FDEC_STRIDE - 1]
ldrb w4, [x0, #2 * FDEC_STRIDE - 1]
@@ -452,7 +452,7 @@ function x264_predict_8x8c_dc_left_neon, export=1
b pred8x8c_dc_end
endfunc
-function x264_predict_8x8c_dc_neon, export=1
+function predict_8x8c_dc_neon, export=1
mov x1, #FDEC_STRIDE
sub x2, x0, #FDEC_STRIDE
ldrb w10, [x0, #0 * FDEC_STRIDE - 1]
@@ -498,7 +498,7 @@ pred8x8c_dc_end:
ret
endfunc
-function x264_predict_8x8c_h_neon, export=1
+function predict_8x8c_h_neon, export=1
sub x1, x0, #1
mov x7, #FDEC_STRIDE
.rept 4
@@ -510,7 +510,7 @@ function x264_predict_8x8c_h_neon, export=1
ret
endfunc
-function x264_predict_8x8c_v_aarch64, export=1
+function predict_8x8c_v_aarch64, export=1
ldr x1, [x0, #-FDEC_STRIDE]
.irp c, 0,1,2,3,4,5,6,7
str x1, [x0, #\c * FDEC_STRIDE]
@@ -518,7 +518,7 @@ function x264_predict_8x8c_v_aarch64, export=1
ret
endfunc
-function x264_predict_8x8c_p_neon, export=1
+function predict_8x8c_p_neon, export=1
sub x3, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
add x2, x3, #4
@@ -577,7 +577,7 @@ endfunc
add \wd, \wd, \t1
.endm
-function x264_predict_8x16c_h_neon, export=1
+function predict_8x16c_h_neon, export=1
sub x2, x0, #1
add x3, x0, #FDEC_STRIDE - 1
mov x7, #2 * FDEC_STRIDE
@@ -595,7 +595,7 @@ function x264_predict_8x16c_h_neon, export=1
ret
endfunc
-function x264_predict_8x16c_v_neon, export=1
+function predict_8x16c_v_neon, export=1
sub x1, x0, #FDEC_STRIDE
mov x2, #2 * FDEC_STRIDE
ld1 {v0.8b}, [x1], x2
@@ -606,7 +606,7 @@ function x264_predict_8x16c_v_neon, export=1
ret
endfunc
-function x264_predict_8x16c_p_neon, export=1
+function predict_8x16c_p_neon, export=1
movrel x4, p16weight
ld1 {v17.8h}, [x4]
sub x3, x0, #FDEC_STRIDE
@@ -673,7 +673,7 @@ function x264_predict_8x16c_p_neon, export=1
ret
endfunc
-function x264_predict_8x16c_dc_neon, export=1
+function predict_8x16c_dc_neon, export=1
mov x1, #FDEC_STRIDE
sub x10, x0, #FDEC_STRIDE
loadsum4 w2, w3, w4, w5, x0, 0
@@ -718,7 +718,7 @@ function x264_predict_8x16c_dc_neon, export=1
ret
endfunc
-function x264_predict_8x16c_dc_left_neon, export=1
+function predict_8x16c_dc_left_neon, export=1
mov x1, #FDEC_STRIDE
ldrb w2, [x0, # 0 * FDEC_STRIDE - 1]
ldrb w3, [x0, # 1 * FDEC_STRIDE - 1]
@@ -772,7 +772,7 @@ function x264_predict_8x16c_dc_left_neon, export=1
ret
endfunc
-function x264_predict_8x16c_dc_top_neon, export=1
+function predict_8x16c_dc_top_neon, export=1
sub x2, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
ld1 {v0.8b}, [x2]
@@ -789,7 +789,7 @@ function x264_predict_8x16c_dc_top_neon, export=1
endfunc
-function x264_predict_16x16_dc_top_neon, export=1
+function predict_16x16_dc_top_neon, export=1
sub x2, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
ld1 {v0.16b}, [x2]
@@ -799,7 +799,7 @@ function x264_predict_16x16_dc_top_neon, export=1
b pred16x16_dc_end
endfunc
-function x264_predict_16x16_dc_left_neon, export=1
+function predict_16x16_dc_left_neon, export=1
sub x2, x0, #1
mov x1, #FDEC_STRIDE
ldcol.16 v0, x2, x1
@@ -809,7 +809,7 @@ function x264_predict_16x16_dc_left_neon, export=1
b pred16x16_dc_end
endfunc
-function x264_predict_16x16_dc_neon, export=1
+function predict_16x16_dc_neon, export=1
sub x3, x0, #FDEC_STRIDE
sub x2, x0, #1
mov x1, #FDEC_STRIDE
@@ -827,7 +827,7 @@ pred16x16_dc_end:
ret
endfunc
-function x264_predict_16x16_h_neon, export=1
+function predict_16x16_h_neon, export=1
sub x1, x0, #1
mov x7, #FDEC_STRIDE
.rept 8
@@ -839,7 +839,7 @@ function x264_predict_16x16_h_neon, export=1
ret
endfunc
-function x264_predict_16x16_v_neon, export=1
+function predict_16x16_v_neon, export=1
sub x0, x0, #FDEC_STRIDE
mov x7, #FDEC_STRIDE
ld1 {v0.16b}, [x0], x7
@@ -849,7 +849,7 @@ function x264_predict_16x16_v_neon, export=1
ret
endfunc
-function x264_predict_16x16_p_neon, export=1
+function predict_16x16_p_neon, export=1
sub x3, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
add x2, x3, #8
diff --git a/common/aarch64/quant-a.S b/common/aarch64/quant-a.S
index 46b971e..7de86fc 100644
--- a/common/aarch64/quant-a.S
+++ b/common/aarch64/quant-a.S
@@ -57,7 +57,7 @@
.endm
// quant_2x2_dc( int16_t dct[4], int mf, int bias )
-function x264_quant_2x2_dc_neon, export=1
+function quant_2x2_dc_neon, export=1
ld1 {v0.4h}, [x0]
dup v2.4h, w2
dup v1.4h, w1
@@ -73,7 +73,7 @@ function x264_quant_2x2_dc_neon, export=1
endfunc
// quant_4x4_dc( int16_t dct[16], int mf, int bias )
-function x264_quant_4x4_dc_neon, export=1
+function quant_4x4_dc_neon, export=1
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
@@ -85,7 +85,7 @@ function x264_quant_4x4_dc_neon, export=1
endfunc
// quant_4x4( int16_t dct[16], uint16_t mf[16], uint16_t bias[16] )
-function x264_quant_4x4_neon, export=1
+function quant_4x4_neon, export=1
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
@@ -97,7 +97,7 @@ function x264_quant_4x4_neon, export=1
endfunc
// quant_4x4x4( int16_t dct[4][16], uint16_t mf[16], uint16_t bias[16] )
-function x264_quant_4x4x4_neon, export=1
+function quant_4x4x4_neon, export=1
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
@@ -140,7 +140,7 @@ function x264_quant_4x4x4_neon, export=1
endfunc
// quant_8x8( int16_t dct[64], uint16_t mf[64], uint16_t bias[64] )
-function x264_quant_8x8_neon, export=1
+function quant_8x8_neon, export=1
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
@@ -177,7 +177,7 @@ endfunc
// dequant_4x4( int16_t dct[16], int dequant_mf[6][16], int i_qp )
.macro DEQUANT size bits
-function x264_dequant_\size\()_neon, export=1
+function dequant_\size\()_neon, export=1
DEQUANT_START \bits+2, \bits
.ifc \size, 8x8
mov w2, #4
@@ -258,7 +258,7 @@ DEQUANT 4x4, 4
DEQUANT 8x8, 6
// dequant_4x4_dc( int16_t dct[16], int dequant_mf[6][16], int i_qp )
-function x264_dequant_4x4_dc_neon, export=1
+function dequant_4x4_dc_neon, export=1
DEQUANT_START 6, 6, yes
b.lt dequant_4x4_dc_rshift
@@ -303,9 +303,9 @@ dequant_4x4_dc_rshift:
endfunc
.macro decimate_score_1x size
-function x264_decimate_score\size\()_neon, export=1
+function decimate_score\size\()_neon, export=1
ld1 {v0.8h,v1.8h}, [x0]
- movrel x5, X(x264_decimate_table4)
+ movrel x5, X(decimate_table4)
movi v3.16b, #0x01
sqxtn v0.8b, v0.8h
sqxtn2 v0.16b, v1.8h
@@ -348,7 +348,7 @@ const mask64, align=6
.byte 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01
endconst
-function x264_decimate_score64_neon, export=1
+function decimate_score64_neon, export=1
ld1 {v0.8h,v1.8h}, [x0], #32
ld1 {v2.8h,v3.8h}, [x0], #32
ld1 {v4.8h,v5.8h}, [x0], #32
@@ -391,7 +391,7 @@ function x264_decimate_score64_neon, export=1
mvn x1, x1
mov w0, #0
cbz x1, 0f
- movrel x5, X(x264_decimate_table8)
+ movrel x5, X(decimate_table8)
1:
clz x3, x1
lsl x1, x1, x3
@@ -407,7 +407,7 @@ function x264_decimate_score64_neon, export=1
endfunc
// int coeff_last( int16_t *l )
-function x264_coeff_last4_aarch64, export=1
+function coeff_last4_aarch64, export=1
ldr x2, [x0]
mov w4, #3
clz x0, x2
@@ -415,7 +415,7 @@ function x264_coeff_last4_aarch64, export=1
ret
endfunc
-function x264_coeff_last8_aarch64, export=1
+function coeff_last8_aarch64, export=1
ldr x3, [x0, #8]
mov w4, #7
clz x2, x3
@@ -430,7 +430,7 @@ function x264_coeff_last8_aarch64, export=1
endfunc
.macro COEFF_LAST_1x size
-function x264_coeff_last\size\()_neon, export=1
+function coeff_last\size\()_neon, export=1
.if \size == 15
sub x0, x0, #2
.endif
@@ -450,7 +450,7 @@ endfunc
COEFF_LAST_1x 15
COEFF_LAST_1x 16
-function x264_coeff_last64_neon, export=1
+function coeff_last64_neon, export=1
ld1 {v0.8h,v1.8h,v2.8h,v3.8h}, [x0], 64
movi v31.8h, #8
movi v30.8h, #1
@@ -523,7 +523,7 @@ endfunc
mov w0, w7
.endm
-function x264_coeff_level_run4_aarch64, export=1
+function coeff_level_run4_aarch64, export=1
ldr x2, [x0]
coeff_level_run_start 4
@@ -534,7 +534,7 @@ function x264_coeff_level_run4_aarch64, export=1
endfunc
.macro X264_COEFF_LEVEL_RUN size
-function x264_coeff_level_run\size\()_neon, export=1
+function coeff_level_run\size\()_neon, export=1
.if \size == 15
sub x0, x0, #2
.endif
@@ -566,7 +566,7 @@ X264_COEFF_LEVEL_RUN 8
X264_COEFF_LEVEL_RUN 15
X264_COEFF_LEVEL_RUN 16
-function x264_denoise_dct_neon, export=1
+function denoise_dct_neon, export=1
1: subs w3, w3, #16
ld1 {v0.8h,v1.8h}, [x0]
ld1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x1]
--
2.10.0
More information about the x264-devel
mailing list