[x265] [PATCH 04/11] AArch64: Add Neon asm implementation of HBD SSE_SS

Gerda Zsejke More gerdazsejke.more at arm.com
Tue Dec 10 16:01:47 UTC 2024


Add a Neon asm implementation of high bitdepth SSE_SS functions for
all block sizes. This implementation is up to 54% faster on Neoverse
platforms compared to the existing C implementation.

Change-Id: If54d71b7d127ab38d5a93f56f829dea1227f1140
---
 source/common/aarch64/asm-primitives.cpp |  14 +-
 source/common/aarch64/ssd-a.S            | 181 +++++++++++++++++++++++
 2 files changed, 188 insertions(+), 7 deletions(-)

diff --git a/source/common/aarch64/asm-primitives.cpp b/source/common/aarch64/asm-primitives.cpp
index ad61a39ad..bba85440c 100644
--- a/source/common/aarch64/asm-primitives.cpp
+++ b/source/common/aarch64/asm-primitives.cpp
@@ -545,6 +545,13 @@ void setupNeonPrimitives(EncoderPrimitives &p)
     p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_sse_pp_16x32_neon);
     p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = PFX(pixel_sse_pp_32x64_neon);
 
+    // sse_ss
+    p.cu[BLOCK_4x4].sse_ss   = PFX(pixel_sse_ss_4x4_neon);
+    p.cu[BLOCK_8x8].sse_ss   = PFX(pixel_sse_ss_8x8_neon);
+    p.cu[BLOCK_16x16].sse_ss = PFX(pixel_sse_ss_16x16_neon);
+    p.cu[BLOCK_32x32].sse_ss = PFX(pixel_sse_ss_32x32_neon);
+    p.cu[BLOCK_64x64].sse_ss = PFX(pixel_sse_ss_64x64_neon);
+
 #if !HIGH_BIT_DEPTH
     // pixel_avg_pp
     ALL_LUMA_PU(pixelavg_pp[NONALIGNED], pixel_avg_pp, neon);
@@ -558,13 +565,6 @@ void setupNeonPrimitives(EncoderPrimitives &p)
     ALL_CHROMA_420_PU(addAvg[ALIGNED], addAvg, neon);
     ALL_CHROMA_422_PU(addAvg[ALIGNED], addAvg, neon);
 
-    // sse_ss
-    p.cu[BLOCK_4x4].sse_ss   = PFX(pixel_sse_ss_4x4_neon);
-    p.cu[BLOCK_8x8].sse_ss   = PFX(pixel_sse_ss_8x8_neon);
-    p.cu[BLOCK_16x16].sse_ss = PFX(pixel_sse_ss_16x16_neon);
-    p.cu[BLOCK_32x32].sse_ss = PFX(pixel_sse_ss_32x32_neon);
-    p.cu[BLOCK_64x64].sse_ss = PFX(pixel_sse_ss_64x64_neon);
-
     // ssd_s
     p.cu[BLOCK_4x4].ssd_s[NONALIGNED]   = PFX(pixel_ssd_s_4x4_neon);
     p.cu[BLOCK_8x8].ssd_s[NONALIGNED]   = PFX(pixel_ssd_s_8x8_neon);
diff --git a/source/common/aarch64/ssd-a.S b/source/common/aarch64/ssd-a.S
index 9d730897e..5c521c2c3 100644
--- a/source/common/aarch64/ssd-a.S
+++ b/source/common/aarch64/ssd-a.S
@@ -605,4 +605,185 @@ function PFX(pixel_sse_pp_64x64_neon)
     ret
 endfunc
 
+.macro SSE_SS_4x2
+    ldr             d16, [x0]
+    ldr             d17, [x2]
+    ldr             d18, [x0, x1]
+    ldr             d19, [x2, x3]
+    sub             v2.4h, v16.4h, v17.4h
+    sub             v3.4h, v18.4h, v19.4h
+    smlal           v0.4s, v2.4h, v2.4h
+    smlal           v0.4s, v3.4h, v3.4h
+.endm
+
+function PFX(pixel_sse_ss_4x4_neon)
+    movi            v0.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    SSE_SS_4x2
+    add             x0, x0, x1, lsl 1
+    add             x2, x2, x3, lsl 1
+    SSE_SS_4x2
+
+    ret_v0_w0
+endfunc
+
+function PFX(pixel_sse_ss_8x8_neon)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+.rept 8
+    ld1             {v16.8h}, [x0], x1
+    ld1             {v17.8h}, [x2], x3
+    sub             v2.8h, v16.8h, v17.8h
+    smlal           v0.4s, v2.4h, v2.4h
+    smlal2          v1.4s, v2.8h, v2.8h
+.endr
+    add             v0.4s, v0.4s, v1.4s
+    ret_v0_w0
+endfunc
+
+function PFX(pixel_sse_ss_16x16_neon)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+    mov             w12, #16
+.Loop_sse_ss_16:
+    sub             w12, w12, #1
+    ld1             {v16.8h-v17.8h}, [x0], x1
+    ld1             {v18.8h-v19.8h}, [x2], x3
+    sub             v2.8h, v16.8h, v18.8h
+    smlal           v0.4s, v2.4h, v2.4h
+    smlal2          v1.4s, v2.8h, v2.8h
+    sub             v3.8h, v17.8h, v19.8h
+    smlal           v0.4s, v3.4h, v3.4h
+    smlal2          v1.4s, v3.8h, v3.8h
+    cbnz            w12, .Loop_sse_ss_16
+
+    add             v0.4s, v0.4s, v1.4s
+    uaddlv          d0, v0.4s
+    fmov            x0, d0
+    ret
+endfunc
+
+function PFX(pixel_sse_ss_32x32_neon)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    movi            v2.4s, #0
+    movi            v3.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+    mov             w12, #32
+.Loop_sse_ss_32:
+    sub             w12, w12, #1
+    ld1             {v16.8h-v17.8h}, [x0]
+    ld1             {v18.8h-v19.8h}, [x2]
+    sub             v4.8h, v16.8h, v18.8h
+    smlal           v0.4s, v4.4h, v4.4h
+    smlal2          v1.4s, v4.8h, v4.8h
+    sub             v5.8h, v17.8h, v19.8h
+    smlal           v0.4s, v5.4h, v5.4h
+    smlal2          v1.4s, v5.8h, v5.8h
+
+    ldp             q16, q17, [x0, #32]
+    ldp             q18, q19, [x2, #32]
+    sub             v6.8h, v16.8h, v18.8h
+    smlal           v2.4s, v6.4h, v6.4h
+    smlal2          v3.4s, v6.8h, v6.8h
+    sub             v7.8h, v17.8h, v19.8h
+    smlal           v2.4s, v7.4h, v7.4h
+    smlal2          v3.4s, v7.8h, v7.8h
+
+    add             x0, x0, x1
+    add             x2, x2, x3
+    cbnz            w12, .Loop_sse_ss_32
+
+    uaddlp          v0.2d, v0.4s
+    uadalp          v0.2d, v1.4s
+    uadalp          v0.2d, v2.4s
+    uadalp          v0.2d, v3.4s
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+
+function PFX(pixel_sse_ss_64x64_neon)
+    movi            v0.4s, #0
+    movi            v31.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    mov             w12, #2
+.Loop_sse_ss_64x32:
+    sub             w12, w12, #1
+    movi            v1.4s, #0
+    movi            v2.4s, #0
+    movi            v3.4s, #0
+    movi            v4.4s, #0
+    movi            v5.4s, #0
+    movi            v6.4s, #0
+    movi            v7.4s, #0
+    movi            v8.4s, #0
+    mov             w11, #32
+.Loop_sse_ss_64x1:
+    sub             w11, w11, #1
+
+    ld1             {v16.8h-v17.8h}, [x0]
+    ld1             {v18.8h-v19.8h}, [x2]
+    sub             v20.8h, v16.8h, v18.8h
+    smlal           v1.4s, v20.4h, v20.4h
+    smlal2          v2.4s, v20.8h, v20.8h
+    sub             v21.8h, v17.8h, v19.8h
+    smlal           v1.4s, v21.4h, v21.4h
+    smlal2          v2.4s, v21.8h, v21.8h
+
+    ldp             q16, q17, [x0, #32]
+    ldp             q18, q19, [x2, #32]
+    sub             v22.8h, v16.8h, v18.8h
+    smlal           v3.4s, v22.4h, v22.4h
+    smlal2          v4.4s, v22.8h, v22.8h
+    sub             v23.8h, v17.8h, v19.8h
+    smlal           v3.4s, v23.4h, v23.4h
+    smlal2          v4.4s, v23.8h, v23.8h
+
+    ldp             q16, q17, [x0, #64]
+    ldp             q18, q19, [x2, #64]
+    sub             v20.8h, v16.8h, v18.8h
+    smlal           v5.4s, v20.4h, v20.4h
+    smlal2          v6.4s, v20.8h, v20.8h
+    sub             v21.8h, v17.8h, v19.8h
+    smlal           v5.4s, v21.4h, v21.4h
+    smlal2          v6.4s, v21.8h, v21.8h
+
+    ldp             q16, q17, [x0, #96]
+    ldp             q18, q19, [x2, #96]
+    sub             v22.8h, v16.8h, v18.8h
+    smlal           v7.4s, v22.4h, v22.4h
+    smlal2          v8.4s, v22.8h, v22.8h
+    sub             v23.8h, v17.8h, v19.8h
+    smlal           v7.4s, v23.4h, v23.4h
+    smlal2          v8.4s, v23.8h, v23.8h
+
+    add             x0, x0, x1
+    add             x2, x2, x3
+    cbnz            w11, .Loop_sse_ss_64x1
+
+    uadalp          v0.2d, v1.4s
+    uadalp          v0.2d, v2.4s
+    uadalp          v0.2d, v3.4s
+    uadalp          v0.2d, v4.4s
+    uadalp          v0.2d, v5.4s
+    uadalp          v0.2d, v6.4s
+    uadalp          v0.2d, v7.4s
+    uadalp          v0.2d, v8.4s
+    cbnz            w12, .Loop_sse_ss_64x32
+
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+
 #endif // !HIGH_BIT_DEPTH
-- 
2.39.5 (Apple Git-154)

-------------- next part --------------
>From a6c217b28fcbec6df0b522eee6a4c2a613303673 Mon Sep 17 00:00:00 2001
Message-Id: <a6c217b28fcbec6df0b522eee6a4c2a613303673.1733846134.git.gerdazsejke.more at arm.com>
In-Reply-To: <cover.1733846134.git.gerdazsejke.more at arm.com>
References: <cover.1733846134.git.gerdazsejke.more at arm.com>
From: Gerda Zsejke More <gerdazsejke.more at arm.com>
Date: Mon, 2 Dec 2024 22:55:06 +0100
Subject: [PATCH 04/11] AArch64: Add Neon asm implementation of HBD SSE_SS

Add a Neon asm implementation of high bitdepth SSE_SS functions for
all block sizes. This implementation is up to 54% faster on Neoverse
platforms compared to the existing C implementation.

Change-Id: If54d71b7d127ab38d5a93f56f829dea1227f1140
---
 source/common/aarch64/asm-primitives.cpp |  14 +-
 source/common/aarch64/ssd-a.S            | 181 +++++++++++++++++++++++
 2 files changed, 188 insertions(+), 7 deletions(-)

diff --git a/source/common/aarch64/asm-primitives.cpp b/source/common/aarch64/asm-primitives.cpp
index ad61a39ad..bba85440c 100644
--- a/source/common/aarch64/asm-primitives.cpp
+++ b/source/common/aarch64/asm-primitives.cpp
@@ -545,6 +545,13 @@ void setupNeonPrimitives(EncoderPrimitives &p)
     p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_sse_pp_16x32_neon);
     p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = PFX(pixel_sse_pp_32x64_neon);
 
+    // sse_ss
+    p.cu[BLOCK_4x4].sse_ss   = PFX(pixel_sse_ss_4x4_neon);
+    p.cu[BLOCK_8x8].sse_ss   = PFX(pixel_sse_ss_8x8_neon);
+    p.cu[BLOCK_16x16].sse_ss = PFX(pixel_sse_ss_16x16_neon);
+    p.cu[BLOCK_32x32].sse_ss = PFX(pixel_sse_ss_32x32_neon);
+    p.cu[BLOCK_64x64].sse_ss = PFX(pixel_sse_ss_64x64_neon);
+
 #if !HIGH_BIT_DEPTH
     // pixel_avg_pp
     ALL_LUMA_PU(pixelavg_pp[NONALIGNED], pixel_avg_pp, neon);
@@ -558,13 +565,6 @@ void setupNeonPrimitives(EncoderPrimitives &p)
     ALL_CHROMA_420_PU(addAvg[ALIGNED], addAvg, neon);
     ALL_CHROMA_422_PU(addAvg[ALIGNED], addAvg, neon);
 
-    // sse_ss
-    p.cu[BLOCK_4x4].sse_ss   = PFX(pixel_sse_ss_4x4_neon);
-    p.cu[BLOCK_8x8].sse_ss   = PFX(pixel_sse_ss_8x8_neon);
-    p.cu[BLOCK_16x16].sse_ss = PFX(pixel_sse_ss_16x16_neon);
-    p.cu[BLOCK_32x32].sse_ss = PFX(pixel_sse_ss_32x32_neon);
-    p.cu[BLOCK_64x64].sse_ss = PFX(pixel_sse_ss_64x64_neon);
-
     // ssd_s
     p.cu[BLOCK_4x4].ssd_s[NONALIGNED]   = PFX(pixel_ssd_s_4x4_neon);
     p.cu[BLOCK_8x8].ssd_s[NONALIGNED]   = PFX(pixel_ssd_s_8x8_neon);
diff --git a/source/common/aarch64/ssd-a.S b/source/common/aarch64/ssd-a.S
index 9d730897e..5c521c2c3 100644
--- a/source/common/aarch64/ssd-a.S
+++ b/source/common/aarch64/ssd-a.S
@@ -605,4 +605,185 @@ function PFX(pixel_sse_pp_64x64_neon)
     ret
 endfunc
 
+.macro SSE_SS_4x2
+    ldr             d16, [x0]
+    ldr             d17, [x2]
+    ldr             d18, [x0, x1]
+    ldr             d19, [x2, x3]
+    sub             v2.4h, v16.4h, v17.4h
+    sub             v3.4h, v18.4h, v19.4h
+    smlal           v0.4s, v2.4h, v2.4h
+    smlal           v0.4s, v3.4h, v3.4h
+.endm
+
+function PFX(pixel_sse_ss_4x4_neon)
+    movi            v0.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    SSE_SS_4x2
+    add             x0, x0, x1, lsl 1
+    add             x2, x2, x3, lsl 1
+    SSE_SS_4x2
+
+    ret_v0_w0
+endfunc
+
+function PFX(pixel_sse_ss_8x8_neon)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+.rept 8
+    ld1             {v16.8h}, [x0], x1
+    ld1             {v17.8h}, [x2], x3
+    sub             v2.8h, v16.8h, v17.8h
+    smlal           v0.4s, v2.4h, v2.4h
+    smlal2          v1.4s, v2.8h, v2.8h
+.endr
+    add             v0.4s, v0.4s, v1.4s
+    ret_v0_w0
+endfunc
+
+function PFX(pixel_sse_ss_16x16_neon)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+    mov             w12, #16
+.Loop_sse_ss_16:
+    sub             w12, w12, #1
+    ld1             {v16.8h-v17.8h}, [x0], x1
+    ld1             {v18.8h-v19.8h}, [x2], x3
+    sub             v2.8h, v16.8h, v18.8h
+    smlal           v0.4s, v2.4h, v2.4h
+    smlal2          v1.4s, v2.8h, v2.8h
+    sub             v3.8h, v17.8h, v19.8h
+    smlal           v0.4s, v3.4h, v3.4h
+    smlal2          v1.4s, v3.8h, v3.8h
+    cbnz            w12, .Loop_sse_ss_16
+
+    add             v0.4s, v0.4s, v1.4s
+    uaddlv          d0, v0.4s
+    fmov            x0, d0
+    ret
+endfunc
+
+function PFX(pixel_sse_ss_32x32_neon)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    movi            v2.4s, #0
+    movi            v3.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+    mov             w12, #32
+.Loop_sse_ss_32:
+    sub             w12, w12, #1
+    ld1             {v16.8h-v17.8h}, [x0]
+    ld1             {v18.8h-v19.8h}, [x2]
+    sub             v4.8h, v16.8h, v18.8h
+    smlal           v0.4s, v4.4h, v4.4h
+    smlal2          v1.4s, v4.8h, v4.8h
+    sub             v5.8h, v17.8h, v19.8h
+    smlal           v0.4s, v5.4h, v5.4h
+    smlal2          v1.4s, v5.8h, v5.8h
+
+    ldp             q16, q17, [x0, #32]
+    ldp             q18, q19, [x2, #32]
+    sub             v6.8h, v16.8h, v18.8h
+    smlal           v2.4s, v6.4h, v6.4h
+    smlal2          v3.4s, v6.8h, v6.8h
+    sub             v7.8h, v17.8h, v19.8h
+    smlal           v2.4s, v7.4h, v7.4h
+    smlal2          v3.4s, v7.8h, v7.8h
+
+    add             x0, x0, x1
+    add             x2, x2, x3
+    cbnz            w12, .Loop_sse_ss_32
+
+    uaddlp          v0.2d, v0.4s
+    uadalp          v0.2d, v1.4s
+    uadalp          v0.2d, v2.4s
+    uadalp          v0.2d, v3.4s
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+
+function PFX(pixel_sse_ss_64x64_neon)
+    movi            v0.4s, #0
+    movi            v31.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    mov             w12, #2
+.Loop_sse_ss_64x32:
+    sub             w12, w12, #1
+    movi            v1.4s, #0
+    movi            v2.4s, #0
+    movi            v3.4s, #0
+    movi            v4.4s, #0
+    movi            v5.4s, #0
+    movi            v6.4s, #0
+    movi            v7.4s, #0
+    movi            v8.4s, #0
+    mov             w11, #32
+.Loop_sse_ss_64x1:
+    sub             w11, w11, #1
+
+    ld1             {v16.8h-v17.8h}, [x0]
+    ld1             {v18.8h-v19.8h}, [x2]
+    sub             v20.8h, v16.8h, v18.8h
+    smlal           v1.4s, v20.4h, v20.4h
+    smlal2          v2.4s, v20.8h, v20.8h
+    sub             v21.8h, v17.8h, v19.8h
+    smlal           v1.4s, v21.4h, v21.4h
+    smlal2          v2.4s, v21.8h, v21.8h
+
+    ldp             q16, q17, [x0, #32]
+    ldp             q18, q19, [x2, #32]
+    sub             v22.8h, v16.8h, v18.8h
+    smlal           v3.4s, v22.4h, v22.4h
+    smlal2          v4.4s, v22.8h, v22.8h
+    sub             v23.8h, v17.8h, v19.8h
+    smlal           v3.4s, v23.4h, v23.4h
+    smlal2          v4.4s, v23.8h, v23.8h
+
+    ldp             q16, q17, [x0, #64]
+    ldp             q18, q19, [x2, #64]
+    sub             v20.8h, v16.8h, v18.8h
+    smlal           v5.4s, v20.4h, v20.4h
+    smlal2          v6.4s, v20.8h, v20.8h
+    sub             v21.8h, v17.8h, v19.8h
+    smlal           v5.4s, v21.4h, v21.4h
+    smlal2          v6.4s, v21.8h, v21.8h
+
+    ldp             q16, q17, [x0, #96]
+    ldp             q18, q19, [x2, #96]
+    sub             v22.8h, v16.8h, v18.8h
+    smlal           v7.4s, v22.4h, v22.4h
+    smlal2          v8.4s, v22.8h, v22.8h
+    sub             v23.8h, v17.8h, v19.8h
+    smlal           v7.4s, v23.4h, v23.4h
+    smlal2          v8.4s, v23.8h, v23.8h
+
+    add             x0, x0, x1
+    add             x2, x2, x3
+    cbnz            w11, .Loop_sse_ss_64x1
+
+    uadalp          v0.2d, v1.4s
+    uadalp          v0.2d, v2.4s
+    uadalp          v0.2d, v3.4s
+    uadalp          v0.2d, v4.4s
+    uadalp          v0.2d, v5.4s
+    uadalp          v0.2d, v6.4s
+    uadalp          v0.2d, v7.4s
+    uadalp          v0.2d, v8.4s
+    cbnz            w12, .Loop_sse_ss_64x32
+
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+
 #endif // !HIGH_BIT_DEPTH
-- 
2.39.5 (Apple Git-154)



More information about the x265-devel mailing list