[x265] [PATCH] asm: avx2 code for dct8x8

yuvaraj at multicorewareinc.com yuvaraj at multicorewareinc.com
Mon Sep 22 14:22:10 CEST 2014


# HG changeset patch
# User Yuvaraj Venkatesh<yuvaraj at multicorewareinc.com>
# Date 1411388499 -19800
#      Mon Sep 22 17:51:39 2014 +0530
# Node ID 5f5e4bd47c56ee011f39ed6052531bf7eefe0f77
# Parent  fd435504f15e0b13dabba9efe0aa94e7047060b5
asm: avx2 code for dct8x8

diff -r fd435504f15e -r 5f5e4bd47c56 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Sep 22 13:14:54 2014 +0530
+++ b/source/common/x86/asm-primitives.cpp	Mon Sep 22 17:51:39 2014 +0530
@@ -1441,6 +1441,7 @@
     if (cpuMask & X265_CPU_AVX2)
     {
         p.dct[DCT_4x4] = x265_dct4_avx2;
+        p.dct[DCT_8x8] = x265_dct8_avx2;
         p.quant = x265_quant_avx2;
         p.nquant = x265_nquant_avx2;
         p.dequant_normal = x265_dequant_normal_avx2;
@@ -1741,6 +1742,7 @@
         p.cvt32to16_shl[BLOCK_32x32] = x265_cvt32to16_shl_32_avx2;
         p.denoiseDct = x265_denoise_dct_avx2;
         p.dct[DCT_4x4] = x265_dct4_avx2;
+        p.dct[DCT_8x8] = x265_dct8_avx2;
         p.quant = x265_quant_avx2;
         p.nquant = x265_nquant_avx2;
         p.dequant_normal = x265_dequant_normal_avx2;
diff -r fd435504f15e -r 5f5e4bd47c56 source/common/x86/dct8.asm
--- a/source/common/x86/dct8.asm	Mon Sep 22 13:14:54 2014 +0530
+++ b/source/common/x86/dct8.asm	Mon Sep 22 17:51:39 2014 +0530
@@ -30,6 +30,17 @@
 %include "x86util.asm"
 
 SECTION_RODATA 32
+tab_dct8:       dw 64, 64, 64, 64
+                dw 89, 75, 50, 18
+                dw 83, 36, -36, -83
+                dw 75, -18, -89, -50
+                dw 64, -64, -64, 64
+                dw 50, -89, 18, 75
+                dw 36, -83, 83, -36
+                dw 18, -50, 75, -89
+
+dct8_shuf:      times 2 db 6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9
+
 tab_dct16_1:    dw 64, 64, 64, 64, 64, 64, 64, 64
                 dw 90, 87, 80, 70, 57, 43, 25,  9
                 dw 89, 75, 50, 18, -18, -50, -75, -89
@@ -1126,6 +1137,123 @@
     jnz .loop
     RET
 
+%if ARCH_X86_64 == 1
+%macro DCT8_PASS_1 4
+    vpbroadcastq    m0,                 [r6 + %1]
+    pmaddwd         m2,                 m%3, m0
+    pmaddwd         m0,                 m%4
+    phaddd          m2,                 m0
+    paddd           m2,                 m5
+    psrad           m2,                 DCT_SHIFT
+    packssdw        m2,                 m2
+    vpermq          m2,                 m2, 0x08
+    mova            [r5 + %2],          xm2
+%endmacro
+
+%macro DCT8_PASS_2 3
+    vpbroadcastq    m0,                 [r6 + %1]
+    pmaddwd         m2,                 m%2, m0
+    pmaddwd         m0,                 m%3
+    phaddd          m2,                 m0
+    paddd           m2,                 m5
+    psrad           m2,                 DCT_SHIFT2
+%endmacro
+
+INIT_YMM avx2
+cglobal dct8, 3, 7, 8, 0-8*16
+%if BIT_DEPTH == 10
+    %define         DCT_SHIFT          4
+    vbroadcasti128  m5,                [pd_8]
+%elif BIT_DEPTH == 8
+    %define         DCT_SHIFT          2
+    vbroadcasti128  m5,                [pd_2]
+%else
+    %error Unsupported BIT_DEPTH!
+%endif
+%define             DCT_SHIFT2         9
+
+    add             r2d,               r2d
+    lea             r3,                [r2 * 3]
+    lea             r4,                [r0 + r2 * 4]
+    mov             r5,                rsp
+    lea             r6,                [tab_dct8]
+    mova            m6,                [dct8_shuf]
+
+    ;pass1
+    mova            xm0,               [r0]
+    vinserti128     m0,                m0, [r4], 1
+    mova            xm1,               [r0 + r2]
+    vinserti128     m1,                m1, [r4 + r2], 1
+    mova            xm2,               [r0 + r2 * 2]
+    vinserti128     m2,                m2, [r4 + r2 * 2], 1
+    mova            xm3,               [r0 + r3]
+    vinserti128     m3,                m3,  [r4 + r3], 1
+
+    punpcklqdq      m4,                m0, m1
+    punpckhqdq      m0,                m1
+    punpcklqdq      m1,                m2, m3
+    punpckhqdq      m2,                m3
+
+    pshufb          m0,                m6
+    pshufb          m2,                m6
+
+    paddw           m3,                m4, m0
+    paddw           m7,                m1, m2
+
+    psubw           m4,                m0
+    psubw           m1,                m2
+
+    DCT8_PASS_1     0 * 8,             0 * 16, 3, 7
+    DCT8_PASS_1     1 * 8,             2 * 16, 4, 1
+    DCT8_PASS_1     2 * 8,             4 * 16, 3, 7
+    DCT8_PASS_1     3 * 8,             6 * 16, 4, 1
+    DCT8_PASS_1     4 * 8,             1 * 16, 3, 7
+    DCT8_PASS_1     5 * 8,             3 * 16, 4, 1
+    DCT8_PASS_1     6 * 8,             5 * 16, 3, 7
+    DCT8_PASS_1     7 * 8,             7 * 16, 4, 1
+
+    ;pass2
+    mov             r2d,               32
+    lea             r3,                [r2 * 3]
+    lea             r4,                [r1 + r2 * 4]
+    vbroadcasti128  m5,                [pd_256]
+
+    mova            m0,                [r5]
+    mova            m1,                [r5 + 32]
+    mova            m2,                [r5 + 64]
+    mova            m3,                [r5 + 96]
+
+    punpcklqdq      m4,                m0, m1
+    punpckhqdq      m0,                m1
+    punpcklqdq      m1,                m2, m3
+    punpckhqdq      m2,                m3
+
+    pshufb          m0,                m6
+    pshufb          m2,                m6
+
+    paddw           m3,                m4, m0
+    paddw           m7,                m1, m2
+
+    psubw           m4,                m0
+    psubw           m1,                m2
+
+    DCT8_PASS_2     0 * 8,             3, 7
+    movu            [r1],              m2
+    DCT8_PASS_2     1 * 8,             4, 1
+    movu            [r1 + r2],         m2
+    DCT8_PASS_2     2 * 8,             3, 7
+    movu            [r1 + r2 * 2],     m2
+    DCT8_PASS_2     3 * 8,             4, 1
+    movu            [r1 + r3],         m2
+    DCT8_PASS_2     4 * 8,             3, 7
+    movu            [r4],              m2
+    DCT8_PASS_2     5 * 8,             4, 1
+    movu            [r4 + r2],         m2
+    DCT8_PASS_2     6 * 8,             3, 7
+    movu            [r4 + r2 * 2],     m2
+    DCT8_PASS_2     7 * 8,             4, 1
+    movu            [r4 + r3],         m2
+    RET
 
 %macro DCT16_PASS_1_E 2
     vpbroadcastq    m7,                [r7 + %1]
@@ -1192,7 +1320,6 @@
     psrad           m10,               DCT_SHIFT2
 %endmacro
 
-%if ARCH_X86_64 == 1
 INIT_YMM avx2
 cglobal dct16, 3, 9, 15, 0-16*mmsize
 %if BIT_DEPTH == 10
diff -r fd435504f15e -r 5f5e4bd47c56 source/common/x86/dct8.h
--- a/source/common/x86/dct8.h	Mon Sep 22 13:14:54 2014 +0530
+++ b/source/common/x86/dct8.h	Mon Sep 22 17:51:39 2014 +0530
@@ -31,6 +31,7 @@
 void x265_dst4_ssse3(int16_t *src, int32_t *dst, intptr_t stride);
 void x265_idst4_sse2(int32_t *src, int16_t *dst, intptr_t stride);
 void x265_dct8_sse4(int16_t *src, int32_t *dst, intptr_t stride);
+void x265_dct8_avx2(int16_t *src, int32_t *dst, intptr_t stride);
 void x265_dct16_avx2(int16_t *src, int32_t *dst, intptr_t stride);
 void x265_dct32_avx2(int16_t *src, int32_t *dst, intptr_t stride);
 


More information about the x265-devel mailing list