[x265] [PATCH 299 of 307] X86: AVX512 intra_pred_ang32 mode 8 and 28 high bit depth

mythreyi at multicorewareinc.com mythreyi at multicorewareinc.com
Sat Apr 7 04:34:57 CEST 2018


# HG changeset patch
# User Jayashri Murugan <jayashri at multicorewareinc.com>
# Date 1515789616 28800
#      Fri Jan 12 12:40:16 2018 -0800
# Node ID 624c83571d1df840e1206c46e589044fbf87ff32
# Parent  b0d00ca83af0cb2053d6eda82b6d4081236a0f5f
X86: AVX512 intra_pred_ang32 mode 8 and 28 high bit depth

Mode | AVX2 performance | AVX512 performance
---------------------------------------------------
 8   |    9.15x         |    9.60x
 28  |    11.30x        |    12.13x

diff -r b0d00ca83af0 -r 624c83571d1d source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Jan 16 15:38:58 2018 +0530
+++ b/source/common/x86/asm-primitives.cpp	Fri Jan 12 12:40:16 2018 -0800
@@ -3115,6 +3115,9 @@
         p.cu[BLOCK_16x16].intra_pred[11]    = PFX(intra_pred_ang16_11_avx512);
         p.cu[BLOCK_16x16].intra_pred[25]    = PFX(intra_pred_ang16_25_avx512);
         p.cu[BLOCK_16x16].intra_pred[27]    = PFX(intra_pred_ang16_27_avx512);
+        p.cu[BLOCK_32x32].intra_pred[8]    = PFX(intra_pred_ang32_8_avx512);
+        p.cu[BLOCK_32x32].intra_pred[28]    = PFX(intra_pred_ang32_28_avx512);
+
         p.cu[BLOCK_16x16].intra_pred[5] = PFX(intra_pred_ang16_5_avx512);
         p.cu[BLOCK_16x16].intra_pred[31] = PFX(intra_pred_ang16_31_avx512);
         p.cu[BLOCK_16x16].intra_pred[4] = PFX(intra_pred_ang16_4_avx512);
diff -r b0d00ca83af0 -r 624c83571d1d source/common/x86/intrapred16.asm
--- a/source/common/x86/intrapred16.asm	Tue Jan 16 15:38:58 2018 +0530
+++ b/source/common/x86/intrapred16.asm	Fri Jan 12 12:40:16 2018 -0800
@@ -20016,9 +20016,302 @@
     lea         r3,        [ang_table_avx2 + 15 * 32]
      shl         r1d,       1
     lea         r4,        [r1 * 3]
-
     call        ang16_mode_6_30
     RET
+
+;; angle 16, modes 8 and 28
+cglobal ang16_mode_8_28
+    test            r6d, r6d
+
+    vbroadcasti32x8            m0, [r2 + 2]         ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    vbroadcasti32x8            m1, [r2 + 4]         ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    vbroadcasti32x8            m2, [r2 + 18]        ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    vbroadcasti32x8            m4, [r2 + 20]        ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m4                          ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+
+    movu            ym14, [r3 - 10 * 32]
+    vinserti32x8    m14, [r3 - 5 * 32], 1
+    pmaddwd         m4, m3, m14                    ; [5], [10]
+    paddd           m4, m15
+    psrld           m4, 5
+    pmaddwd         m5, m0, m14
+    paddd           m5, m15
+    psrld           m5, 5
+    packusdw        m4, m5
+    vextracti32x8   ym5, m4, 1
+
+    movu            ym14, [r3]
+    vinserti32x8    m14, [r3 + 5 * 32], 1
+    pmaddwd         m6, m3, m14                    ; [15], [20]
+    paddd           m6, m15
+    psrld           m6, 5
+    pmaddwd         m9, m0, m14
+    paddd           m9, m15
+    psrld           m9, 5
+    packusdw        m6, m9
+    vextracti32x8   ym7, m6, 1
+
+    movu            ym14, [r3 + 10 * 32]
+    vinserti32x8    m14, [r3 +  15 * 32], 1
+    pmaddwd         m8, m3, m14                     ; [25], [30]
+    paddd           m8, m15
+    psrld           m8, 5
+    pmaddwd         m9, m0, m14
+    paddd           m9, m15
+    psrld           m9, 5
+    packusdw        m8, m9
+    vextracti32x8   ym9, m8, 1
+
+    palignr         m11, m0, m3, 4
+    movu            ym14, [r3 - 12 * 32]
+    vinserti32x8    m14, [r3 - 7 * 32], 1
+    pmaddwd         m10, m11, m14                     ; [3], [8]
+    paddd           m10, m15
+    psrld           m10, 5
+    palignr         m1, m2, m0, 4
+    pmaddwd         m12, m1, m14
+    paddd           m12, m15
+    psrld           m12, 5
+    packusdw        m10, m12
+    vextracti32x8   ym11, m10, 1
+
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 0
+
+    palignr         m7, m0, m3, 4
+    movu            ym14, [r3 - 2 * 32]
+    vinserti32x8    m14, [r3 + 3 * 32], 1
+    pmaddwd         m4, m7, m14                      ; [13], [18]
+    paddd           m4, m15
+    psrld           m4, 5
+    palignr         m1, m2, m0, 4
+    pmaddwd         m5, m1, m14
+    paddd           m5, m15
+    psrld           m5, 5
+    packusdw        m4, m5
+    vextracti32x8   ym5, m4, 1
+
+    movu            ym14, [r3 + 8 * 32]
+    vinserti32x8    m14, [r3 + 13 * 32], 1
+    pmaddwd         m6, m7, m14                      ; [23], [28]
+    paddd           m6, m15
+    psrld           m6, 5
+    pmaddwd         m8, m1, m14
+    paddd           m8, m15
+    psrld           m8, 5
+    packusdw        m6, m8
+    vextracti32x8   ym7, m6, 1
+
+    movu            ym14, [r3 - 14 * 32]
+    vinserti32x8    m14, [r3 - 9 * 32], 1
+    palignr         m1, m0, m3, 8
+    pmaddwd         m8, m1, m14                      ; [1], [6]
+    paddd           m8, m15
+    psrld           m8, 5
+    palignr         m2, m0, 8
+    pmaddwd         m9, m2, m14
+    paddd           m9, m15
+    psrld           m9, 5
+    packusdw        m8, m9
+    vextracti32x8   ym9, m8, 1
+
+    movu            ym14, [r3 - 4 * 32]
+    vinserti32x8    m14, [r3 + 1 * 32], 1
+    pmaddwd         m3, m1, m14                      ; [11], [16]
+    paddd           m3, m15
+    psrld           m3, 5
+    pmaddwd         m0, m2, m14
+    paddd           m0, m15
+    psrld           m0, 5
+    packusdw        m3, m0
+    vextracti32x8   ym1, m3, 1
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 3, 1, 0, 2, 16
+    ret
+
+;; angle 32, modes 8 and 28
+cglobal ang32_mode_8_28
+    test            r6d, r6d
+
+    vbroadcasti32x8            m0, [r2 + 2]         ; [16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1]
+    vbroadcasti32x8            m1, [r2 + 4]         ; [17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2]
+
+    punpcklwd       m3, m0, m1                      ; [13 12 12 11 11 10 10  9  5  4  4  3  3  2  2  1]
+    punpckhwd       m0, m1                          ; [17 16 16 15 15 14 14 13  9  8  8  7  7  6  6  5]
+
+    vbroadcasti32x8            m2, [r2 + 18]        ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10  9]
+    vbroadcasti32x8            m4, [r2 + 20]        ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+    punpcklwd       m2, m4                          ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10  9]
+
+    movu            ym14, [r3 + 6 * 32]
+    vinserti32x8    m14, [r3 + 11 * 32], 1
+    pmaddwd         m4, m3, m14                     ; [21], [26]
+    paddd           m4, m15
+    psrld           m4, 5
+    pmaddwd         m5, m0, m14
+    paddd           m5, m15
+    psrld           m5, 5
+    packusdw        m4, m5
+    vextracti32x8   ym5, m4, 1
+
+    pmaddwd         m6, m3, [r3 + 16 * 32]          ; [31]
+    paddd           m6, [pd_16]
+    psrld           m6, 5
+    pmaddwd         m9, m0, [r3 + 16 * 32]
+    paddd           m9, [pd_16]
+    psrld           m9, 5
+    packusdw        m6, m9
+
+    palignr         m11, m0, m3, 4
+    movu            ym14, [r3 - 11 * 32]
+    vinserti32x8    m14, [r3 - 6 * 32], 1
+    pmaddwd         m7, m11, m14                    ; [4], [9]
+    paddd           m7, m15
+    psrld           m7, 5
+    palignr         m1, m2, m0, 4
+    pmaddwd         m8, m1, m14
+    paddd           m8, m15
+    psrld           m8, 5
+    packusdw        m7, m8
+    vextracti32x8   ym8, m7, 1
+
+    movu            ym14, [r3 - 1 * 32]
+    vinserti32x8    m14, [r3 + 4 * 32], 1
+    pmaddwd         m9, m11, m14                     ; [14], [19]
+    paddd           m9, m15
+    psrld           m9, 5
+    pmaddwd         m10, m1, m14
+    paddd           m10, m15
+    psrld           m10, 5
+    packusdw        m9, m10
+    vextracti32x8   ym10, m9, 1
+
+    pmaddwd         m11, [r3 + 9 * 32]              ; [24]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    pmaddwd         m1, [r3 + 9 * 32]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    packusdw        m11, m1
+
+TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 0
+
+    palignr         m4, m0, m3, 4
+    pmaddwd         m4, [r3 + 14 * 32]              ; [29]
+    paddd           m4, m15
+    psrld           m4, 5
+    palignr         m5, m2, m0, 4
+    pmaddwd         m5, [r3 + 14 * 32]
+    paddd           m5, m15
+    psrld           m5, 5
+    packusdw        m4, m5
+
+    palignr         m1, m0, m3, 8
+    pmaddwd         m5, m1, [r3 - 13 * 32]          ; [2]
+    paddd           m5, m15
+    psrld           m5, 5
+    palignr         m10, m2, m0, 8
+    pmaddwd         m6, m10, [r3 - 13 * 32]
+    paddd           m6, m15
+    psrld           m6, 5
+    packusdw        m5, m6
+
+    movu            ym14, [r3 - 8 * 32]
+    vinserti32x8    m14, [r3 - 3 * 32], 1
+    pmaddwd         m6, m1, m14                     ; [7], [12]
+    paddd           m6, m15
+    psrld           m6, 5
+    pmaddwd         m8, m10, m14
+    paddd           m8, m15
+    psrld           m8, 5
+    packusdw        m6, m8
+    vextracti32x8   ym7, m6, 1
+
+    movu            ym14, [r3 + 2 * 32]
+    vinserti32x8    m14, [r3 + 7 * 32], 1
+    pmaddwd         m8, m1, m14                     ; [17], [22]
+    paddd           m8, m15
+    psrld           m8, 5
+    pmaddwd         m9, m10, m14
+    paddd           m9, m15
+    psrld           m9, 5
+    packusdw        m8, m9
+    vextracti32x8   ym9, m8, 1
+
+    pmaddwd         m1, [r3 + 12 * 32]              ; [27]
+    paddd           m1, [pd_16]
+    psrld           m1, 5
+    pmaddwd         m10, [r3 + 12 * 32]
+    paddd           m10, [pd_16]
+    psrld           m10, 5
+    packusdw        m1, m10
+
+    palignr         m11, m0, m3, 12
+    pmaddwd         m11, [r3 - 15 * 32]             ; [0]
+    paddd           m11, [pd_16]
+    psrld           m11, 5
+    palignr         m2, m0, 12
+    pmaddwd         m2, [r3 - 15 * 32]
+    paddd           m2, [pd_16]
+    psrld           m2, 5
+    packusdw        m11, m2
+    TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 1, 11, 0, 2, 16
+    ret
+
+
+cglobal intra_pred_ang32_8, 3,8,16
+    add         r2,        128
+    xor         r6d,       r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r7,        [r0 + 8 * r1]
+    vbroadcasti32x8        m15, [pd_16]
+
+    call        ang16_mode_8_28
+
+    add         r2,        4
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_8_28
+
+    add         r2,        28
+    lea         r0,        [r7 + 8 * r1]
+
+    call        ang16_mode_8_28
+
+    add         r2,        4
+    lea         r0,        [r0 + 32]
+
+    call        ang32_mode_8_28
+    RET
+
+cglobal intra_pred_ang32_28, 3,7,16
+    xor         r6d,       r6d
+    inc         r6d
+    lea         r3,        [ang_table_avx2 + 15 * 32]
+    add         r1d,       r1d
+    lea         r4,        [r1 * 3]
+    lea         r5,        [r0 + 32]
+    vbroadcasti32x8  m15,  [pd_16]
+    call        ang16_mode_8_28
+
+    add         r2,        4
+
+    call        ang32_mode_8_28
+
+    add         r2,        28
+    mov         r0,        r5
+
+    call        ang16_mode_8_28
+
+    add         r2,        4
+
+    call        ang32_mode_8_28
+    RET
+
 ;-------------------------------------------------------------------------------------------------------
 ; avx512 code for intra_pred_ang32 mode 2 to 34 end
 ;-------------------------------------------------------------------------------------------------------


More information about the x265-devel mailing list