[x265] [PATCH 03/11] AArch64: Add SVE asm implementation of HBD SSE_PP

Gerda Zsejke More gerdazsejke.more at arm.com
Tue Dec 10 16:01:13 UTC 2024


Add a SVE asm implementation of high bitdepth SSE_PP functions for
all block sizes. This implementation is 14-34% faster on Neoverse
platforms compared to the existing Neon asm implementation.

Change-Id: Ic924cd51d91a5ab0f85045ea8a9013dc25b08256
---
 source/common/CMakeLists.txt             |   2 +-
 source/common/aarch64/asm-primitives.cpp |  20 ++-
 source/common/aarch64/ssd-a-sve.S        | 216 +++++++++++++++++++++++
 3 files changed, 235 insertions(+), 3 deletions(-)
 create mode 100644 source/common/aarch64/ssd-a-sve.S

diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt
index aacc0ef62..ca4282c6d 100644
--- a/source/common/CMakeLists.txt
+++ b/source/common/CMakeLists.txt
@@ -114,7 +114,7 @@ if(ENABLE_ASSEMBLY AND (ARM64 OR CROSS_COMPILE_ARM64))
     # Add Arm assembly files here.
     set(A_SRCS asm.S mc-a.S mc-a-common.S sad-a.S pixel-util.S pixel-util-common.S p2s.S p2s-common.S blockcopy8.S blockcopy8-common.S ssd-a.S ssd-a-common.S intrapred.S dct.S)
     set(A_SRCS_NEON_DOTPROD sad-neon-dotprod.S ssd-neon-dotprod.S)
-    set(A_SRCS_SVE asm-sve.S blockcopy8-sve.S p2s-sve.S pixel-util-sve.S)
+    set(A_SRCS_SVE asm-sve.S blockcopy8-sve.S p2s-sve.S pixel-util-sve.S ssd-a-sve.S)
     set(A_SRCS_SVE2 mc-a-sve2.S pixel-util-sve2.S ssd-a-sve2.S)
     set(VEC_PRIMITIVES)
 
diff --git a/source/common/aarch64/asm-primitives.cpp b/source/common/aarch64/asm-primitives.cpp
index 7012991c3..ad61a39ad 100644
--- a/source/common/aarch64/asm-primitives.cpp
+++ b/source/common/aarch64/asm-primitives.cpp
@@ -856,9 +856,25 @@ void setupSvePrimitives(EncoderPrimitives &p)
     // sa8d
     p.cu[BLOCK_4x4].sa8d   = PFX(pixel_satd_4x4_sve);
     p.chroma[X265_CSP_I420].cu[BLOCK_8x8].sa8d = PFX(pixel_satd_4x4_sve);
-#endif
+#else // HIGH_BIT_DEPTH
+    // sse_pp
+    p.cu[BLOCK_4x4].sse_pp   = PFX(pixel_sse_pp_4x4_sve);
+    p.cu[BLOCK_8x8].sse_pp   = PFX(pixel_sse_pp_8x8_sve);
+    p.cu[BLOCK_16x16].sse_pp = PFX(pixel_sse_pp_16x16_sve);
+    p.cu[BLOCK_32x32].sse_pp = PFX(pixel_sse_pp_32x32_sve);
+    p.cu[BLOCK_64x64].sse_pp = PFX(pixel_sse_pp_64x64_sve);
+
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].sse_pp   = PFX(pixel_sse_pp_4x4_sve);
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sse_pp   = PFX(pixel_sse_pp_8x8_sve);
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sse_pp = PFX(pixel_sse_pp_16x16_sve);
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sse_pp = PFX(pixel_sse_pp_32x32_sve);
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sse_pp   = PFX(pixel_sse_pp_4x8_sve);
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp  = PFX(pixel_sse_pp_8x16_sve);
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_sse_pp_16x32_sve);
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = PFX(pixel_sse_pp_32x64_sve);
+#endif // !HIGH_BIT_DEPTH
 }
-#endif
+#endif // defined(HAVE_SVE2) || defined(HAVE_SVE)
 
 #if defined(HAVE_SVE2)
 #if !HIGH_BIT_DEPTH
diff --git a/source/common/aarch64/ssd-a-sve.S b/source/common/aarch64/ssd-a-sve.S
new file mode 100644
index 000000000..8c0b4ed68
--- /dev/null
+++ b/source/common/aarch64/ssd-a-sve.S
@@ -0,0 +1,216 @@
+/*****************************************************************************
+ * Copyright (C) 2024 MulticoreWare, Inc
+ *
+ * Authors: Gerda Zsejke More <gerdazsejke.more at arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "asm.S"
+#include "ssd-a-common.S"
+
+#ifdef __APPLE__
+.section __RODATA,__rodata
+#else
+.section .rodata
+#endif
+
+.align 4
+
+.text
+
+.arch armv8-a+sve
+
+#if HIGH_BIT_DEPTH
+.macro SSE_PP_4x2
+    ldr             d16, [x0]
+    ldr             d17, [x2]
+    ldr             d18, [x0, x1]
+    ldr             d19, [x2, x3]
+    uabd            v2.4h, v16.4h, v17.4h
+    uabd            v3.4h, v18.4h, v19.4h
+    udot            z0.d, z2.h, z2.h
+    udot            z0.d, z3.h, z3.h
+.endm
+
+.macro SSE_PP_4xN h
+function PFX(pixel_sse_pp_4x\h\()_sve)
+    movi            v0.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+.rept (\h / 2) - 1
+    SSE_PP_4x2
+    add             x0, x0, x1, lsl #1
+    add             x2, x2, x3, lsl #1
+.endr
+    SSE_PP_4x2
+
+    fmov            w0, s0
+    ret
+endfunc
+.endm
+
+SSE_PP_4xN 4
+SSE_PP_4xN 8
+
+.macro SSE_PP_8xN h
+function PFX(pixel_sse_pp_8x\h\()_sve)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+.rept \h / 2
+    ld1             {v16.8h}, [x0], x1
+    ld1             {v17.8h}, [x2], x3
+    uabd            v2.8h, v16.8h, v17.8h
+    udot            z0.d, z2.h, z2.h
+    ld1             {v18.8h}, [x0], x1
+    ld1             {v19.8h}, [x2], x3
+    uabd            v3.8h, v18.8h, v19.8h
+    udot            z1.d, z3.h, z3.h
+.endr
+
+    add             v0.2d, v0.2d, v1.2d
+    addp            d0, v0.2d
+    fmov            w0, s0
+    ret
+endfunc
+.endm
+
+SSE_PP_8xN 8
+SSE_PP_8xN 16
+
+.macro SSE_PP_16xN h
+function PFX(pixel_sse_pp_16x\h\()_sve)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    mov             w12, \h
+.Loop_sse_pp_16x\h:
+    sub             w12, w12, #1
+
+    ld1             {v16.8h-v17.8h}, [x0], x1
+    ld1             {v18.8h-v19.8h}, [x2], x3
+    uabd            v2.8h, v16.8h, v18.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v17.8h, v19.8h
+    udot            z1.d, z3.h, z3.h
+    cbnz            w12, .Loop_sse_pp_16x\h
+
+    add             v0.2d, v0.2d, v1.2d
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+.endm
+
+SSE_PP_16xN 16
+SSE_PP_16xN 32
+
+.macro SSE_PP_32xN h
+function  PFX(pixel_sse_pp_32x\h\()_sve)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    mov             w12, \h
+.Loop_sse_pp_32x\h:
+    sub             w12, w12, #1
+
+    ld1             {v16.8h-v17.8h}, [x0]
+    ld1             {v20.8h-v21.8h}, [x2]
+    uabd            v2.8h, v16.8h, v20.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v17.8h, v21.8h
+    udot            z1.d, z3.h, z3.h
+
+    ldp             q18, q19, [x0, #32]
+    ldp             q22, q23, [x2, #32]
+    uabd            v2.8h, v18.8h, v22.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v19.8h, v23.8h
+    udot            z1.d, z3.h, z3.h
+
+    add             x0, x0, x1
+    add             x2, x2, x3
+    cbnz            w12, .Loop_sse_pp_32x\h
+
+    add             v0.2d, v0.2d, v1.2d
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+.endm
+
+SSE_PP_32xN 32
+SSE_PP_32xN 64
+
+function PFX(pixel_sse_pp_64x64_sve)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    mov             w12, #64
+.Loop_sse_pp_64x1:
+    sub             w12, w12, #1
+
+    ld1             {v16.8h-v17.8h}, [x0]
+    ld1             {v20.8h-v21.8h}, [x2]
+    uabd            v2.8h, v16.8h, v20.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v17.8h, v21.8h
+    udot            z1.d, z3.h, z3.h
+
+    ldp             q18, q19, [x0, #32]
+    ldp             q22, q23, [x2, #32]
+    uabd            v2.8h, v18.8h, v22.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v19.8h, v23.8h
+    udot            z1.d, z3.h, z3.h
+
+    ldp             q16, q17, [x0, #64]
+    ldp             q20, q21, [x2, #64]
+    uabd            v2.8h, v16.8h, v20.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v17.8h, v21.8h
+    udot            z1.d, z3.h, z3.h
+
+    ldp             q18, q19,  [x0, #96]
+    ldp             q22, q23, [x2, #96]
+    uabd            v2.8h, v18.8h, v22.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v19.8h, v23.8h
+    udot            z1.d, z3.h, z3.h
+
+    add             x0, x0, x1
+    add             x2, x2, x3
+    cbnz            w12, .Loop_sse_pp_64x1
+
+    add             v0.2d, v0.2d, v1.2d
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+
+#endif // HIGH_BIT_DEPTH
-- 
2.39.5 (Apple Git-154)

-------------- next part --------------
>From 3b2743bca037757dade50a0f332e38874ee4624c Mon Sep 17 00:00:00 2001
Message-Id: <3b2743bca037757dade50a0f332e38874ee4624c.1733846134.git.gerdazsejke.more at arm.com>
In-Reply-To: <cover.1733846134.git.gerdazsejke.more at arm.com>
References: <cover.1733846134.git.gerdazsejke.more at arm.com>
From: Gerda Zsejke More <gerdazsejke.more at arm.com>
Date: Sun, 24 Nov 2024 11:57:05 +0100
Subject: [PATCH 03/11] AArch64: Add SVE asm implementation of HBD SSE_PP

Add a SVE asm implementation of high bitdepth SSE_PP functions for
all block sizes. This implementation is 14-34% faster on Neoverse
platforms compared to the existing Neon asm implementation.

Change-Id: Ic924cd51d91a5ab0f85045ea8a9013dc25b08256
---
 source/common/CMakeLists.txt             |   2 +-
 source/common/aarch64/asm-primitives.cpp |  20 ++-
 source/common/aarch64/ssd-a-sve.S        | 216 +++++++++++++++++++++++
 3 files changed, 235 insertions(+), 3 deletions(-)
 create mode 100644 source/common/aarch64/ssd-a-sve.S

diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt
index aacc0ef62..ca4282c6d 100644
--- a/source/common/CMakeLists.txt
+++ b/source/common/CMakeLists.txt
@@ -114,7 +114,7 @@ if(ENABLE_ASSEMBLY AND (ARM64 OR CROSS_COMPILE_ARM64))
     # Add Arm assembly files here.
     set(A_SRCS asm.S mc-a.S mc-a-common.S sad-a.S pixel-util.S pixel-util-common.S p2s.S p2s-common.S blockcopy8.S blockcopy8-common.S ssd-a.S ssd-a-common.S intrapred.S dct.S)
     set(A_SRCS_NEON_DOTPROD sad-neon-dotprod.S ssd-neon-dotprod.S)
-    set(A_SRCS_SVE asm-sve.S blockcopy8-sve.S p2s-sve.S pixel-util-sve.S)
+    set(A_SRCS_SVE asm-sve.S blockcopy8-sve.S p2s-sve.S pixel-util-sve.S ssd-a-sve.S)
     set(A_SRCS_SVE2 mc-a-sve2.S pixel-util-sve2.S ssd-a-sve2.S)
     set(VEC_PRIMITIVES)
 
diff --git a/source/common/aarch64/asm-primitives.cpp b/source/common/aarch64/asm-primitives.cpp
index 7012991c3..ad61a39ad 100644
--- a/source/common/aarch64/asm-primitives.cpp
+++ b/source/common/aarch64/asm-primitives.cpp
@@ -856,9 +856,25 @@ void setupSvePrimitives(EncoderPrimitives &p)
     // sa8d
     p.cu[BLOCK_4x4].sa8d   = PFX(pixel_satd_4x4_sve);
     p.chroma[X265_CSP_I420].cu[BLOCK_8x8].sa8d = PFX(pixel_satd_4x4_sve);
-#endif
+#else // HIGH_BIT_DEPTH
+    // sse_pp
+    p.cu[BLOCK_4x4].sse_pp   = PFX(pixel_sse_pp_4x4_sve);
+    p.cu[BLOCK_8x8].sse_pp   = PFX(pixel_sse_pp_8x8_sve);
+    p.cu[BLOCK_16x16].sse_pp = PFX(pixel_sse_pp_16x16_sve);
+    p.cu[BLOCK_32x32].sse_pp = PFX(pixel_sse_pp_32x32_sve);
+    p.cu[BLOCK_64x64].sse_pp = PFX(pixel_sse_pp_64x64_sve);
+
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].sse_pp   = PFX(pixel_sse_pp_4x4_sve);
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sse_pp   = PFX(pixel_sse_pp_8x8_sve);
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sse_pp = PFX(pixel_sse_pp_16x16_sve);
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sse_pp = PFX(pixel_sse_pp_32x32_sve);
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].sse_pp   = PFX(pixel_sse_pp_4x8_sve);
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp  = PFX(pixel_sse_pp_8x16_sve);
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_sse_pp_16x32_sve);
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sse_pp = PFX(pixel_sse_pp_32x64_sve);
+#endif // !HIGH_BIT_DEPTH
 }
-#endif
+#endif // defined(HAVE_SVE2) || defined(HAVE_SVE)
 
 #if defined(HAVE_SVE2)
 #if !HIGH_BIT_DEPTH
diff --git a/source/common/aarch64/ssd-a-sve.S b/source/common/aarch64/ssd-a-sve.S
new file mode 100644
index 000000000..8c0b4ed68
--- /dev/null
+++ b/source/common/aarch64/ssd-a-sve.S
@@ -0,0 +1,216 @@
+/*****************************************************************************
+ * Copyright (C) 2024 MulticoreWare, Inc
+ *
+ * Authors: Gerda Zsejke More <gerdazsejke.more at arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "asm.S"
+#include "ssd-a-common.S"
+
+#ifdef __APPLE__
+.section __RODATA,__rodata
+#else
+.section .rodata
+#endif
+
+.align 4
+
+.text
+
+.arch armv8-a+sve
+
+#if HIGH_BIT_DEPTH
+.macro SSE_PP_4x2
+    ldr             d16, [x0]
+    ldr             d17, [x2]
+    ldr             d18, [x0, x1]
+    ldr             d19, [x2, x3]
+    uabd            v2.4h, v16.4h, v17.4h
+    uabd            v3.4h, v18.4h, v19.4h
+    udot            z0.d, z2.h, z2.h
+    udot            z0.d, z3.h, z3.h
+.endm
+
+.macro SSE_PP_4xN h
+function PFX(pixel_sse_pp_4x\h\()_sve)
+    movi            v0.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+.rept (\h / 2) - 1
+    SSE_PP_4x2
+    add             x0, x0, x1, lsl #1
+    add             x2, x2, x3, lsl #1
+.endr
+    SSE_PP_4x2
+
+    fmov            w0, s0
+    ret
+endfunc
+.endm
+
+SSE_PP_4xN 4
+SSE_PP_4xN 8
+
+.macro SSE_PP_8xN h
+function PFX(pixel_sse_pp_8x\h\()_sve)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+.rept \h / 2
+    ld1             {v16.8h}, [x0], x1
+    ld1             {v17.8h}, [x2], x3
+    uabd            v2.8h, v16.8h, v17.8h
+    udot            z0.d, z2.h, z2.h
+    ld1             {v18.8h}, [x0], x1
+    ld1             {v19.8h}, [x2], x3
+    uabd            v3.8h, v18.8h, v19.8h
+    udot            z1.d, z3.h, z3.h
+.endr
+
+    add             v0.2d, v0.2d, v1.2d
+    addp            d0, v0.2d
+    fmov            w0, s0
+    ret
+endfunc
+.endm
+
+SSE_PP_8xN 8
+SSE_PP_8xN 16
+
+.macro SSE_PP_16xN h
+function PFX(pixel_sse_pp_16x\h\()_sve)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    mov             w12, \h
+.Loop_sse_pp_16x\h:
+    sub             w12, w12, #1
+
+    ld1             {v16.8h-v17.8h}, [x0], x1
+    ld1             {v18.8h-v19.8h}, [x2], x3
+    uabd            v2.8h, v16.8h, v18.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v17.8h, v19.8h
+    udot            z1.d, z3.h, z3.h
+    cbnz            w12, .Loop_sse_pp_16x\h
+
+    add             v0.2d, v0.2d, v1.2d
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+.endm
+
+SSE_PP_16xN 16
+SSE_PP_16xN 32
+
+.macro SSE_PP_32xN h
+function  PFX(pixel_sse_pp_32x\h\()_sve)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    mov             w12, \h
+.Loop_sse_pp_32x\h:
+    sub             w12, w12, #1
+
+    ld1             {v16.8h-v17.8h}, [x0]
+    ld1             {v20.8h-v21.8h}, [x2]
+    uabd            v2.8h, v16.8h, v20.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v17.8h, v21.8h
+    udot            z1.d, z3.h, z3.h
+
+    ldp             q18, q19, [x0, #32]
+    ldp             q22, q23, [x2, #32]
+    uabd            v2.8h, v18.8h, v22.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v19.8h, v23.8h
+    udot            z1.d, z3.h, z3.h
+
+    add             x0, x0, x1
+    add             x2, x2, x3
+    cbnz            w12, .Loop_sse_pp_32x\h
+
+    add             v0.2d, v0.2d, v1.2d
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+.endm
+
+SSE_PP_32xN 32
+SSE_PP_32xN 64
+
+function PFX(pixel_sse_pp_64x64_sve)
+    movi            v0.4s, #0
+    movi            v1.4s, #0
+    add             x1, x1, x1
+    add             x3, x3, x3
+
+    mov             w12, #64
+.Loop_sse_pp_64x1:
+    sub             w12, w12, #1
+
+    ld1             {v16.8h-v17.8h}, [x0]
+    ld1             {v20.8h-v21.8h}, [x2]
+    uabd            v2.8h, v16.8h, v20.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v17.8h, v21.8h
+    udot            z1.d, z3.h, z3.h
+
+    ldp             q18, q19, [x0, #32]
+    ldp             q22, q23, [x2, #32]
+    uabd            v2.8h, v18.8h, v22.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v19.8h, v23.8h
+    udot            z1.d, z3.h, z3.h
+
+    ldp             q16, q17, [x0, #64]
+    ldp             q20, q21, [x2, #64]
+    uabd            v2.8h, v16.8h, v20.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v17.8h, v21.8h
+    udot            z1.d, z3.h, z3.h
+
+    ldp             q18, q19,  [x0, #96]
+    ldp             q22, q23, [x2, #96]
+    uabd            v2.8h, v18.8h, v22.8h
+    udot            z0.d, z2.h, z2.h
+    uabd            v3.8h, v19.8h, v23.8h
+    udot            z1.d, z3.h, z3.h
+
+    add             x0, x0, x1
+    add             x2, x2, x3
+    cbnz            w12, .Loop_sse_pp_64x1
+
+    add             v0.2d, v0.2d, v1.2d
+    addp            d0, v0.2d
+    fmov            x0, d0
+    ret
+endfunc
+
+#endif // HIGH_BIT_DEPTH
-- 
2.39.5 (Apple Git-154)



More information about the x265-devel mailing list