[x265] [PATCH 05/11] AArch64: Add SVE asm implementation of HBD SSE_SS
Gerda Zsejke More
gerdazsejke.more at arm.com
Tue Dec 10 16:02:14 UTC 2024
Add an SVE asm implementation of high bitdepth SSE_SS functions for
all block sizes. This implementation is 25-43% faster on Neoverse
platforms compared to the existing Neon asm implementation.
Change-Id: I94ba604e7f1e63ed1ea3e04591e496f79a1a9a89
---
source/common/aarch64/asm-primitives.cpp | 7 +
source/common/aarch64/ssd-a-sve.S | 155 +++++++++++++++++++++++
2 files changed, 162 insertions(+)
diff --git a/source/common/aarch64/asm-primitives.cpp b/source/common/aarch64/asm-primitives.cpp
index bba85440c..72aa1cf3d 100644
--- a/source/common/aarch64/asm-primitives.cpp
+++ b/source/common/aarch64/asm-primitives.cpp
@@ -872,6 +872,13 @@ void setupSvePrimitives(EncoderPrimitives &p)
p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp = PFX(pixel_sse_pp_8x16_sve);
p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_sse_pp_16x32_sve);
p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = PFX(pixel_sse_pp_32x64_sve);
+
+ // sse_ss
+ p.cu[BLOCK_4x4].sse_ss = PFX(pixel_sse_ss_4x4_sve);
+ p.cu[BLOCK_8x8].sse_ss = PFX(pixel_sse_ss_8x8_sve);
+ p.cu[BLOCK_16x16].sse_ss = PFX(pixel_sse_ss_16x16_sve);
+ p.cu[BLOCK_32x32].sse_ss = PFX(pixel_sse_ss_32x32_sve);
+ p.cu[BLOCK_64x64].sse_ss = PFX(pixel_sse_ss_64x64_sve);
#endif // !HIGH_BIT_DEPTH
}
#endif // defined(HAVE_SVE2) || defined(HAVE_SVE)
diff --git a/source/common/aarch64/ssd-a-sve.S b/source/common/aarch64/ssd-a-sve.S
index 8c0b4ed68..ac0ee710d 100644
--- a/source/common/aarch64/ssd-a-sve.S
+++ b/source/common/aarch64/ssd-a-sve.S
@@ -213,4 +213,159 @@ function PFX(pixel_sse_pp_64x64_sve)
ret
endfunc
+.macro SSE_SS_4x2
+ ldr d16, [x0]
+ ldr d17, [x2]
+ ldr d18, [x0, x1]
+ ldr d19, [x2, x3]
+ sub v2.4h, v16.4h, v17.4h
+ sub v3.4h, v18.4h, v19.4h
+ sdot z0.d, z2.h, z2.h
+ sdot z0.d, z3.h, z3.h
+.endm
+
+function PFX(pixel_sse_ss_4x4_sve)
+ movi v0.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+ SSE_SS_4x2
+ add x0, x0, x1, lsl #1
+ add x2, x2, x3, lsl #1
+ SSE_SS_4x2
+
+ fmov w0, s0
+ ret
+endfunc
+
+function PFX(pixel_sse_ss_8x8_sve)
+ movi v0.4s, #0
+ movi v1.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+.rept 4
+ ld1 {v16.8h}, [x0], x1
+ ld1 {v17.8h}, [x2], x3
+ sub v2.8h, v16.8h, v17.8h
+ sdot z0.d, z2.h, z2.h
+ ld1 {v18.8h}, [x0], x1
+ ld1 {v19.8h}, [x2], x3
+ sub v3.8h, v18.8h, v19.8h
+ sdot z1.d, z3.h, z3.h
+.endr
+
+ add v0.2d, v0.2d, v1.2d
+ addp d0, v0.2d
+ fmov w0, s0
+ ret
+endfunc
+
+function PFX(pixel_sse_ss_16x16_sve)
+ movi v0.4s, #0
+ movi v1.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+ mov w12, #16
+.Loop_sse_ss_16:
+ sub w12, w12, #1
+
+ ld1 {v16.8h-v17.8h}, [x0], x1
+ ld1 {v18.8h-v19.8h}, [x2], x3
+ sub v2.8h, v16.8h, v18.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v17.8h, v19.8h
+ sdot z1.d, z3.h, z3.h
+ cbnz w12, .Loop_sse_ss_16
+
+ add v0.2d, v0.2d, v1.2d
+ addp d0, v0.2d
+ fmov x0, d0
+ ret
+endfunc
+
+function PFX(pixel_sse_ss_32x32_sve)
+ movi v0.4s, #0
+ movi v1.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+ mov w12, #32
+.Loop_sse_ss_32:
+ sub w12, w12, #1
+
+ ld1 {v16.8h-v17.8h}, [x0]
+ ld1 {v20.8h-v21.8h}, [x2]
+ sub v2.8h, v16.8h, v20.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v17.8h, v21.8h
+ sdot z1.d, z3.h, z3.h
+
+ ldp q18, q19, [x0, #32]
+ ldp q22, q23, [x2, #32]
+ sub v2.8h, v18.8h, v22.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v19.8h, v23.8h
+ sdot z1.d, z3.h, z3.h
+
+ add x0, x0, x1
+ add x2, x2, x3
+ cbnz w12, .Loop_sse_ss_32
+
+ add v0.2d, v0.2d, v1.2d
+ addp d0, v0.2d
+ fmov x0, d0
+ ret
+endfunc
+
+
+function PFX(pixel_sse_ss_64x64_sve)
+ movi v0.4s, #0
+ movi v1.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+ mov w12, #64
+.Loop_sse_ss_64:
+ sub w12, w12, #1
+
+ ld1 {v16.8h-v17.8h}, [x0]
+ ld1 {v20.8h-v21.8h}, [x2]
+ sub v2.8h, v16.8h, v20.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v17.8h, v21.8h
+ sdot z1.d, z3.h, z3.h
+
+ ldp q18, q19, [x0, #32]
+ ldp q22, q23, [x2, #32]
+ sub v2.8h, v18.8h, v22.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v19.8h, v23.8h
+ sdot z1.d, z3.h, z3.h
+
+ ldp q16, q17, [x0, #64]
+ ldp q20, q21, [x2, #64]
+ sub v2.8h, v16.8h, v20.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v17.8h, v21.8h
+ sdot z1.d, z3.h, z3.h
+
+ ldp q18, q19, [x0, #96]
+ ldp q22, q23, [x2, #96]
+ sub v2.8h, v18.8h, v22.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v19.8h, v23.8h
+ sdot z1.d, z3.h, z3.h
+
+ add x0, x0, x1
+ add x2, x2, x3
+ cbnz w12, .Loop_sse_ss_64
+
+ add v0.2d, v0.2d, v1.2d
+ addp d0, v0.2d
+ fmov x0, d0
+ ret
+endfunc
+
#endif // HIGH_BIT_DEPTH
--
2.39.5 (Apple Git-154)
-------------- next part --------------
>From af9e84e3a42f050b5ea6c6210dba61c615d4604b Mon Sep 17 00:00:00 2001
Message-Id: <af9e84e3a42f050b5ea6c6210dba61c615d4604b.1733846134.git.gerdazsejke.more at arm.com>
In-Reply-To: <cover.1733846134.git.gerdazsejke.more at arm.com>
References: <cover.1733846134.git.gerdazsejke.more at arm.com>
From: Gerda Zsejke More <gerdazsejke.more at arm.com>
Date: Wed, 4 Dec 2024 23:24:43 +0100
Subject: [PATCH 05/11] AArch64: Add SVE asm implementation of HBD SSE_SS
Add an SVE asm implementation of high bitdepth SSE_SS functions for
all block sizes. This implementation is 25-43% faster on Neoverse
platforms compared to the existing Neon asm implementation.
Change-Id: I94ba604e7f1e63ed1ea3e04591e496f79a1a9a89
---
source/common/aarch64/asm-primitives.cpp | 7 +
source/common/aarch64/ssd-a-sve.S | 155 +++++++++++++++++++++++
2 files changed, 162 insertions(+)
diff --git a/source/common/aarch64/asm-primitives.cpp b/source/common/aarch64/asm-primitives.cpp
index bba85440c..72aa1cf3d 100644
--- a/source/common/aarch64/asm-primitives.cpp
+++ b/source/common/aarch64/asm-primitives.cpp
@@ -872,6 +872,13 @@ void setupSvePrimitives(EncoderPrimitives &p)
p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp = PFX(pixel_sse_pp_8x16_sve);
p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_sse_pp_16x32_sve);
p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = PFX(pixel_sse_pp_32x64_sve);
+
+ // sse_ss
+ p.cu[BLOCK_4x4].sse_ss = PFX(pixel_sse_ss_4x4_sve);
+ p.cu[BLOCK_8x8].sse_ss = PFX(pixel_sse_ss_8x8_sve);
+ p.cu[BLOCK_16x16].sse_ss = PFX(pixel_sse_ss_16x16_sve);
+ p.cu[BLOCK_32x32].sse_ss = PFX(pixel_sse_ss_32x32_sve);
+ p.cu[BLOCK_64x64].sse_ss = PFX(pixel_sse_ss_64x64_sve);
#endif // !HIGH_BIT_DEPTH
}
#endif // defined(HAVE_SVE2) || defined(HAVE_SVE)
diff --git a/source/common/aarch64/ssd-a-sve.S b/source/common/aarch64/ssd-a-sve.S
index 8c0b4ed68..ac0ee710d 100644
--- a/source/common/aarch64/ssd-a-sve.S
+++ b/source/common/aarch64/ssd-a-sve.S
@@ -213,4 +213,159 @@ function PFX(pixel_sse_pp_64x64_sve)
ret
endfunc
+.macro SSE_SS_4x2
+ ldr d16, [x0]
+ ldr d17, [x2]
+ ldr d18, [x0, x1]
+ ldr d19, [x2, x3]
+ sub v2.4h, v16.4h, v17.4h
+ sub v3.4h, v18.4h, v19.4h
+ sdot z0.d, z2.h, z2.h
+ sdot z0.d, z3.h, z3.h
+.endm
+
+function PFX(pixel_sse_ss_4x4_sve)
+ movi v0.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+ SSE_SS_4x2
+ add x0, x0, x1, lsl #1
+ add x2, x2, x3, lsl #1
+ SSE_SS_4x2
+
+ fmov w0, s0
+ ret
+endfunc
+
+function PFX(pixel_sse_ss_8x8_sve)
+ movi v0.4s, #0
+ movi v1.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+.rept 4
+ ld1 {v16.8h}, [x0], x1
+ ld1 {v17.8h}, [x2], x3
+ sub v2.8h, v16.8h, v17.8h
+ sdot z0.d, z2.h, z2.h
+ ld1 {v18.8h}, [x0], x1
+ ld1 {v19.8h}, [x2], x3
+ sub v3.8h, v18.8h, v19.8h
+ sdot z1.d, z3.h, z3.h
+.endr
+
+ add v0.2d, v0.2d, v1.2d
+ addp d0, v0.2d
+ fmov w0, s0
+ ret
+endfunc
+
+function PFX(pixel_sse_ss_16x16_sve)
+ movi v0.4s, #0
+ movi v1.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+ mov w12, #16
+.Loop_sse_ss_16:
+ sub w12, w12, #1
+
+ ld1 {v16.8h-v17.8h}, [x0], x1
+ ld1 {v18.8h-v19.8h}, [x2], x3
+ sub v2.8h, v16.8h, v18.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v17.8h, v19.8h
+ sdot z1.d, z3.h, z3.h
+ cbnz w12, .Loop_sse_ss_16
+
+ add v0.2d, v0.2d, v1.2d
+ addp d0, v0.2d
+ fmov x0, d0
+ ret
+endfunc
+
+function PFX(pixel_sse_ss_32x32_sve)
+ movi v0.4s, #0
+ movi v1.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+ mov w12, #32
+.Loop_sse_ss_32:
+ sub w12, w12, #1
+
+ ld1 {v16.8h-v17.8h}, [x0]
+ ld1 {v20.8h-v21.8h}, [x2]
+ sub v2.8h, v16.8h, v20.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v17.8h, v21.8h
+ sdot z1.d, z3.h, z3.h
+
+ ldp q18, q19, [x0, #32]
+ ldp q22, q23, [x2, #32]
+ sub v2.8h, v18.8h, v22.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v19.8h, v23.8h
+ sdot z1.d, z3.h, z3.h
+
+ add x0, x0, x1
+ add x2, x2, x3
+ cbnz w12, .Loop_sse_ss_32
+
+ add v0.2d, v0.2d, v1.2d
+ addp d0, v0.2d
+ fmov x0, d0
+ ret
+endfunc
+
+
+function PFX(pixel_sse_ss_64x64_sve)
+ movi v0.4s, #0
+ movi v1.4s, #0
+ add x1, x1, x1
+ add x3, x3, x3
+
+ mov w12, #64
+.Loop_sse_ss_64:
+ sub w12, w12, #1
+
+ ld1 {v16.8h-v17.8h}, [x0]
+ ld1 {v20.8h-v21.8h}, [x2]
+ sub v2.8h, v16.8h, v20.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v17.8h, v21.8h
+ sdot z1.d, z3.h, z3.h
+
+ ldp q18, q19, [x0, #32]
+ ldp q22, q23, [x2, #32]
+ sub v2.8h, v18.8h, v22.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v19.8h, v23.8h
+ sdot z1.d, z3.h, z3.h
+
+ ldp q16, q17, [x0, #64]
+ ldp q20, q21, [x2, #64]
+ sub v2.8h, v16.8h, v20.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v17.8h, v21.8h
+ sdot z1.d, z3.h, z3.h
+
+ ldp q18, q19, [x0, #96]
+ ldp q22, q23, [x2, #96]
+ sub v2.8h, v18.8h, v22.8h
+ sdot z0.d, z2.h, z2.h
+ sub v3.8h, v19.8h, v23.8h
+ sdot z1.d, z3.h, z3.h
+
+ add x0, x0, x1
+ add x2, x2, x3
+ cbnz w12, .Loop_sse_ss_64
+
+ add v0.2d, v0.2d, v1.2d
+ addp d0, v0.2d
+ fmov x0, d0
+ ret
+endfunc
+
#endif // HIGH_BIT_DEPTH
--
2.39.5 (Apple Git-154)
More information about the x265-devel
mailing list