[x265] [PATCH] intrinsic: Added dct16 sse3 intrinsic, 55288.92 -> 45139.28

dave dtyx265 at gmail.com
Wed Feb 11 19:17:19 CET 2015


55288.92 is the c code and 45139.28 the intrinsic, that is if I am 
reading the testbench output correctly.

dct16x16        1.22x      45139.28      55288.92

My system is old so these numbers are probably large compared to testing 
on a newer system.

On 02/11/2015 09:56 AM, Steve Borho wrote:
> On 02/11, dtyx265 at gmail.com wrote:
>> # HG changeset patch
>> # User David T Yuen <dtyx265 at gmail.com>
>> # Date 1423675655 28800
>> # Node ID 5b1611feda7249643af85e595098bc95e9e4eee2
>> # Parent  f52d723d8c46e9bca77bb8f3113004d449456559
>> intrinsic: Added dct16 sse3 intrinsic, 55288.92 -> 45139.28
> which two numbers are being compared there? which is 55288.92 and which
> is 45139.28?  If we have sse3 and ssse3 functions available, most modern
> CPUs will only use the ssse3 function.
>
>> This is backported from the ssse3 dct16 intrinsic
>>
>> diff -r f52d723d8c46 -r 5b1611feda72 source/common/vec/dct-sse3.cpp
>> --- a/source/common/vec/dct-sse3.cpp	Mon Feb 09 17:07:02 2015 +0530
>> +++ b/source/common/vec/dct-sse3.cpp	Wed Feb 11 09:27:35 2015 -0800
>> @@ -36,6 +36,536 @@
>>   using namespace x265;
>>   
>>   namespace {
>> +ALIGN_VAR_32(static const int16_t, tab_dct_8[][8]) =
>> +{
>> +    { 0x0100, 0x0F0E, 0x0706, 0x0908, 0x0302, 0x0D0C, 0x0504, 0x0B0A },
>> +
>> +    { 64, 64, 64, 64, 64, 64, 64, 64 },
>> +    { 64, -64, 64, -64, 64, -64, 64, -64 },
>> +    { 83, 36, 83, 36, 83, 36, 83, 36 },
>> +    { 36, -83, 36, -83, 36, -83, 36, -83 },
>> +    { 89, 18, 75, 50, 89, 18, 75, 50 },
>> +    { 75, -50, -18, -89, 75, -50, -18, -89 },
>> +    { 50, 75, -89, 18, 50, 75, -89, 18 },
>> +    { 18, -89, -50, 75, 18, -89, -50, 75 },
>> +
>> +    { 83, 83, -83, -83, 36, 36, -36, -36 },
>> +    { 36, 36, -36, -36, -83, -83, 83, 83 },
>> +    { 89, -89, 18, -18, 75, -75, 50, -50 },
>> +    { 75, -75, -50, 50, -18, 18, -89, 89 },
>> +    { 50, -50, 75, -75, -89, 89, 18, -18 },
>> +    { 18, -18, -89, 89, -50, 50, 75, -75 },
>> +};
>> +
>> +ALIGN_VAR_32(static const int16_t, tab_dct_16_1[][8]) =
>> +{
>> +    { 90, 87, 80, 70, 57, 43, 25,  9 },  //  0
>> +    { 87, 57,  9, -43, -80, -90, -70, -25 },  //  1
>> +    { 80,  9, -70, -87, -25, 57, 90, 43 },  //  2
>> +    { 70, -43, -87,  9, 90, 25, -80, -57 },  //  3
>> +    { 57, -80, -25, 90, -9, -87, 43, 70 },  //  4
>> +    { 43, -90, 57, 25, -87, 70,  9, -80 },  //  5
>> +    { 25, -70, 90, -80, 43,  9, -57, 87 },  //  6
>> +    {  9, -25, 43, -57, 70, -80, 87, -90 },  //  7
>> +    { 83, 83, -83, -83, 36, 36, -36, -36 },  //  8
>> +    { 36, 36, -36, -36, -83, -83, 83, 83 },  //  9
>> +    { 89, 89, 18, 18, 75, 75, 50, 50 },  // 10
>> +    { 75, 75, -50, -50, -18, -18, -89, -89 },  // 11
>> +    { 50, 50, 75, 75, -89, -89, 18, 18 },  // 12
>> +    { 18, 18, -89, -89, -50, -50, 75, 75 },  // 13
>> +
>> +#define MAKE_COEF(a0, a1, a2, a3, a4, a5, a6, a7) \
>> +    { (a0), -(a0), (a3), -(a3), (a1), -(a1), (a2), -(a2) \
>> +    }, \
>> +    { (a7), -(a7), (a4), -(a4), (a6), -(a6), (a5), -(a5) },
>> +
>> +    MAKE_COEF(90, 87, 80, 70, 57, 43, 25,  9)
>> +    MAKE_COEF(87, 57,  9, -43, -80, -90, -70, -25)
>> +    MAKE_COEF(80,  9, -70, -87, -25, 57, 90, 43)
>> +    MAKE_COEF(70, -43, -87,  9, 90, 25, -80, -57)
>> +    MAKE_COEF(57, -80, -25, 90, -9, -87, 43, 70)
>> +    MAKE_COEF(43, -90, 57, 25, -87, 70,  9, -80)
>> +    MAKE_COEF(25, -70, 90, -80, 43,  9, -57, 87)
>> +    MAKE_COEF(9, -25, 43, -57, 70, -80, 87, -90)
>> +#undef MAKE_COEF
>> +};
>> +
>> +void dct16(const int16_t *src, int16_t *dst, intptr_t stride)
>> +{
>> +#if HIGH_BIT_DEPTH
>> +#define SHIFT1  5
>> +#define ADD1    16
>> +#else
>> +#define SHIFT1  3
>> +#define ADD1    4
>> +#endif
>> +
>> +#define SHIFT2  10
>> +#define ADD2    512
>> +
>> +    // Const
>> +    __m128i c_4     = _mm_set1_epi32(ADD1);
>> +    __m128i c_512   = _mm_set1_epi32(ADD2);
>> +
>> +    ALIGN_VAR_32(int16_t, tmp[16 * 16]);
>> +
>> +    __m128i T00A, T01A, T02A, T03A;
>> +    __m128i T00B, T01B, T02B, T03B, T04B, T05B, T06B, T07B;
>> +    __m128i T10, T11, T12, T13, T14, T15, T16, T17;
>> +    __m128i T20, T21, T22, T23, T24, T25, T26, T27;
>> +    __m128i T30, T31, T32, T33, T34, T35, T36, T37;
>> +    __m128i T40, T41, T42, T43, T44, T45, T46, T47;
>> +    __m128i T50, T51, T52, T53;
>> +    __m128i T60, T61, T62, T63, T64, T65, T66, T67;
>> +    __m128i T70;
>> +
>> +    // DCT1
>> +    {
>> +#define part 0
>> +#define SHUFFLE1(R1, SRC)  \
>> +    R1 = _mm_shuffle_epi32(SRC, 0x1B); \
>> +    R1 = _mm_shufflehi_epi16(R1, 0xB1); \
>> +    R1 = _mm_shufflelo_epi16(R1, 0xB1);
>> +        SHUFFLE1(T00B, _mm_load_si128((__m128i*)&src[(part + 0) * stride + 8]))
>> +        SHUFFLE1(T01B, _mm_load_si128((__m128i*)&src[(part + 1) * stride + 8]))
>> +        SHUFFLE1(T02B, _mm_load_si128((__m128i*)&src[(part + 2) * stride + 8]))
>> +        SHUFFLE1(T03B, _mm_load_si128((__m128i*)&src[(part + 3) * stride + 8]))
>> +        SHUFFLE1(T04B, _mm_load_si128((__m128i*)&src[(part + 4) * stride + 8]))
>> +        SHUFFLE1(T05B, _mm_load_si128((__m128i*)&src[(part + 5) * stride + 8]))
>> +        SHUFFLE1(T06B, _mm_load_si128((__m128i*)&src[(part + 6) * stride + 8]))
>> +        SHUFFLE1(T07B, _mm_load_si128((__m128i*)&src[(part + 7) * stride + 8]))
>> +
>> +#define READ_ADD_SUB(INDEX) \
>> +    T10  = _mm_add_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 0) * stride]), T00B); \
>> +    T11  = _mm_add_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 1) * stride]), T01B); \
>> +    T12  = _mm_add_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 2) * stride]), T02B); \
>> +    T13  = _mm_add_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 3) * stride]), T03B); \
>> +    T14  = _mm_add_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 4) * stride]), T04B); \
>> +    T15  = _mm_add_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 5) * stride]), T05B); \
>> +    T16  = _mm_add_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 6) * stride]), T06B); \
>> +    T17  = _mm_add_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 7) * stride]), T07B); \
>> +    \
>> +    T20  = _mm_sub_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 0) * stride]), T00B); \
>> +    T21  = _mm_sub_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 1) * stride]), T01B); \
>> +    T22  = _mm_sub_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 2) * stride]), T02B); \
>> +    T23  = _mm_sub_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 3) * stride]), T03B); \
>> +    T24  = _mm_sub_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 4) * stride]), T04B); \
>> +    T25  = _mm_sub_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 5) * stride]), T05B); \
>> +    T26  = _mm_sub_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 6) * stride]), T06B); \
>> +    T27  = _mm_sub_epi16(_mm_load_si128((__m128i*)&src[(INDEX + 7) * stride]), T07B);
>> +        READ_ADD_SUB(part)
>> +
>> +#define SHUFFLE2(R1, R2, R3, R4) \
>> +    R1 = _mm_shufflehi_epi16(R1, 0x63); \
>> +    R2 = _mm_shufflehi_epi16(R2, 0x63); \
>> +    R1 = _mm_shufflelo_epi16(R1, 0x9C); \
>> +    R2 = _mm_shufflelo_epi16(R2, 0x9C); \
>> +    R3 = _mm_unpacklo_epi64(R1, R2); \
>> +    R4 = _mm_unpackhi_epi64(R1, R2);
>> +
>> +        SHUFFLE2(T10, T11, T30, T31)
>> +        SHUFFLE2(T12, T13, T32, T33)
>> +        SHUFFLE2(T14, T15, T34, T35)
>> +        SHUFFLE2(T16, T17, T36, T37)
>> +
>> +        T40 = _mm_add_epi16(T30, T31);
>> +        T41 = _mm_add_epi16(T32, T33);
>> +        T42 = _mm_add_epi16(T34, T35);
>> +        T43 = _mm_add_epi16(T36, T37);
>> +        T44 = _mm_sub_epi16(T30, T31);
>> +        T45 = _mm_sub_epi16(T32, T33);
>> +        T46 = _mm_sub_epi16(T34, T35);
>> +        T47 = _mm_sub_epi16(T36, T37);
>> +
>> +#define PHADDW_PHSUBW(R1, R2, R3, R4) \
>> +    R1 = _mm_shufflehi_epi16(R1, 0xD8); \
>> +    R1 = _mm_shufflelo_epi16(R1, 0xD8); \
>> +    R1 = _mm_shuffle_epi32(R1, 0xD8); \
>> +    R2 = _mm_shufflehi_epi16(R2, 0xD8); \
>> +    R2 = _mm_shufflelo_epi16(R2, 0xD8); \
>> +    R2 = _mm_shuffle_epi32(R2, 0xD8); \
>> +    R4 = _mm_unpacklo_epi64(R1, R2); \
>> +    R1 = _mm_unpackhi_epi64(R1, R2); \
>> +    R3 = _mm_add_epi16(R4, R1); \
>> +    R4 = _mm_sub_epi16(R4, R1);
>> +
>> +        PHADDW_PHSUBW(T40, T41, T50, T52)
>> +        PHADDW_PHSUBW(T42, T43, T51, T53)
>> +
>> +#define MADD_SHIFT(R1, R2, tab, index) \
>> +    T60  = _mm_madd_epi16(R1, _mm_load_si128((__m128i*)tab_dct_8[tab])); \
>> +    T61  = _mm_madd_epi16(R2, _mm_load_si128((__m128i*)tab_dct_8[tab])); \
>> +    T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), SHIFT1); \
>> +    T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), SHIFT1); \
>> +    T70  = _mm_packs_epi32(T60, T61); \
>> +    _mm_store_si128((__m128i*)&tmp[index * 16 + part], T70);
>> +
>> +        MADD_SHIFT(T50, T51, 1, 0)
>> +        MADD_SHIFT(T50, T51, 2, 8)
>> +        MADD_SHIFT(T52, T53, 3, 4)
>> +        MADD_SHIFT(T52, T53, 4, 12)
>> +
>> +#define PHADDD(R1, R2, R3) \
>> +    R2 = _mm_shuffle_epi32(R2, 0xD8); \
>> +    R3 = _mm_shuffle_epi32(R3, 0xD8); \
>> +    T00A = _mm_unpacklo_epi64(R2, R3); \
>> +    T00B = _mm_unpackhi_epi64(R2, R3); \
>> +    R1 = _mm_add_epi32(T00A, T00B);
>> +
>> +#define PHADDD_SHIFT(TAB, ROW) \
>> +    T60  = _mm_madd_epi16(T44, _mm_load_si128((__m128i*)tab_dct_8[TAB])); \
>> +    T61  = _mm_madd_epi16(T45, _mm_load_si128((__m128i*)tab_dct_8[TAB])); \
>> +    T62  = _mm_madd_epi16(T46, _mm_load_si128((__m128i*)tab_dct_8[TAB])); \
>> +    T63  = _mm_madd_epi16(T47, _mm_load_si128((__m128i*)tab_dct_8[TAB])); \
>> +    PHADDD(T60, T60, T61) \
>> +    PHADDD(T61, T62, T63) \
>> +    T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), SHIFT1); \
>> +    T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), SHIFT1); \
>> +    T70  = _mm_packs_epi32(T60, T61); \
>> +    _mm_store_si128((__m128i*)&tmp[ROW * 16 + part], T70);
>> +
>> +        PHADDD_SHIFT(5, 2)
>> +        PHADDD_SHIFT(6, 6)
>> +        PHADDD_SHIFT(7, 10)
>> +        PHADDD_SHIFT(8, 14)
>> +
>> +#define MAKE_ODD1(tab, dstPos) \
>> +    T60  = _mm_madd_epi16(T20, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T61  = _mm_madd_epi16(T21, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T62  = _mm_madd_epi16(T22, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T63  = _mm_madd_epi16(T23, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T64  = _mm_madd_epi16(T24, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T65  = _mm_madd_epi16(T25, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T66  = _mm_madd_epi16(T26, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T67  = _mm_madd_epi16(T27, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    PHADDD(T60, T60, T61) \
>> +    PHADDD(T61, T62, T63) \
>> +    PHADDD(T62, T64, T65) \
>> +    PHADDD(T63, T66, T67) \
>> +    PHADDD(T60, T60, T61) \
>> +    PHADDD(T61, T62, T63) \
>> +    T60  = _mm_srai_epi32(_mm_add_epi32(T60, c_4), SHIFT1); \
>> +    T61  = _mm_srai_epi32(_mm_add_epi32(T61, c_4), SHIFT1); \
>> +    T70  = _mm_packs_epi32(T60, T61); \
>> +    _mm_store_si128((__m128i*)&tmp[(dstPos) * 16 + part], T70);
>> +
>> +        MAKE_ODD1(0, 1);
>> +        MAKE_ODD1(1, 3);
>> +        MAKE_ODD1(2, 5);
>> +        MAKE_ODD1(3, 7);
>> +        MAKE_ODD1(4, 9);
>> +        MAKE_ODD1(5, 11);
>> +        MAKE_ODD1(6, 13);
>> +        MAKE_ODD1(7, 15);
>> +#undef part
>> +    }
>> +    {
>> +#define part 8
>> +        SHUFFLE1(T00B, _mm_load_si128((__m128i*)&src[(part + 0) * stride + 8]))
>> +        SHUFFLE1(T01B, _mm_load_si128((__m128i*)&src[(part + 1) * stride + 8]))
>> +        SHUFFLE1(T02B, _mm_load_si128((__m128i*)&src[(part + 2) * stride + 8]))
>> +        SHUFFLE1(T03B, _mm_load_si128((__m128i*)&src[(part + 3) * stride + 8]))
>> +        SHUFFLE1(T04B, _mm_load_si128((__m128i*)&src[(part + 4) * stride + 8]))
>> +        SHUFFLE1(T05B, _mm_load_si128((__m128i*)&src[(part + 5) * stride + 8]))
>> +        SHUFFLE1(T06B, _mm_load_si128((__m128i*)&src[(part + 6) * stride + 8]))
>> +        SHUFFLE1(T07B, _mm_load_si128((__m128i*)&src[(part + 7) * stride + 8]))
>> +
>> +        READ_ADD_SUB(part)
>> +
>> +        SHUFFLE2(T10, T11, T30, T31)
>> +        SHUFFLE2(T12, T13, T32, T33)
>> +        SHUFFLE2(T14, T15, T34, T35)
>> +        SHUFFLE2(T16, T17, T36, T37)
>> +
>> +        T40 = _mm_add_epi16(T30, T31);
>> +        T41 = _mm_add_epi16(T32, T33);
>> +        T42 = _mm_add_epi16(T34, T35);
>> +        T43 = _mm_add_epi16(T36, T37);
>> +        T44 = _mm_sub_epi16(T30, T31);
>> +        T45 = _mm_sub_epi16(T32, T33);
>> +        T46 = _mm_sub_epi16(T34, T35);
>> +        T47 = _mm_sub_epi16(T36, T37);
>> +
>> +        PHADDW_PHSUBW(T40, T41, T50, T52)
>> +        PHADDW_PHSUBW(T42, T43, T51, T53)
>> +
>> +        MADD_SHIFT(T50, T51, 1, 0)
>> +        MADD_SHIFT(T50, T51, 2, 8)
>> +        MADD_SHIFT(T52, T53, 3, 4)
>> +        MADD_SHIFT(T52, T53, 4, 12)
>> +
>> +        PHADDD_SHIFT(5, 2)
>> +        PHADDD_SHIFT(6, 6)
>> +        PHADDD_SHIFT(7, 10)
>> +        PHADDD_SHIFT(8, 14)
>> +        MAKE_ODD1(0, 1);
>> +        MAKE_ODD1(1, 3);
>> +        MAKE_ODD1(2, 5);
>> +        MAKE_ODD1(3, 7);
>> +        MAKE_ODD1(4, 9);
>> +        MAKE_ODD1(5, 11);
>> +        MAKE_ODD1(6, 13);
>> +        MAKE_ODD1(7, 15);
>> +#undef part
>> +    }
>> +    // DCT2
>> +    {
>> +#define part 0
>> +#define SHUFFLE3(R1, SRC)  \
>> +    R1 = _mm_shufflehi_epi16(SRC, 0x63); \
>> +    R1 = _mm_shufflelo_epi16(R1, 0x9C);
>> +        SHUFFLE3(T00A, _mm_load_si128((__m128i*)&tmp[(part + 0) * 16 + 0]))
>> +        SHUFFLE3(T01A, _mm_load_si128((__m128i*)&tmp[(part + 1) * 16 + 0]))
>> +        SHUFFLE3(T02A, _mm_load_si128((__m128i*)&tmp[(part + 2) * 16 + 0]))
>> +        SHUFFLE3(T03A, _mm_load_si128((__m128i*)&tmp[(part + 3) * 16 + 0]))
>> +
>> +#define SHUFFLE4(R1, SRC)  \
>> +    R1 = _mm_shuffle_epi32(SRC, 0x4E); \
>> +    R1 = _mm_shufflehi_epi16(R1, 0x9C); \
>> +    R1 = _mm_shufflelo_epi16(R1, 0x63);
>> +        SHUFFLE4(T00B, _mm_load_si128((__m128i*)&tmp[(part + 0) * 16 + 8]))
>> +        SHUFFLE4(T01B, _mm_load_si128((__m128i*)&tmp[(part + 1) * 16 + 8]))
>> +        SHUFFLE4(T02B, _mm_load_si128((__m128i*)&tmp[(part + 2) * 16 + 8]))
>> +        SHUFFLE4(T03B, _mm_load_si128((__m128i*)&tmp[(part + 3) * 16 + 8]))
>> +
>> +#define UNPCK_MADD(INDEX) \
>> +        T10  = _mm_unpacklo_epi16(T00A, T00B); \
>> +        T11  = _mm_unpackhi_epi16(T00A, T00B); \
>> +        T12  = _mm_unpacklo_epi16(T01A, T01B); \
>> +        T13  = _mm_unpackhi_epi16(T01A, T01B); \
>> +        T14  = _mm_unpacklo_epi16(T02A, T02B); \
>> +        T15  = _mm_unpackhi_epi16(T02A, T02B); \
>> +        T16  = _mm_unpacklo_epi16(T03A, T03B); \
>> +        T17  = _mm_unpackhi_epi16(T03A, T03B); \
>> +        \
>> +        T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_8[1])); \
>> +        T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_8[1])); \
>> +        T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_8[1])); \
>> +        T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_8[1])); \
>> +        T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_8[1])); \
>> +        T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_8[1])); \
>> +        T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_8[1])); \
>> +        T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_8[1])); \
>> +        \
>> +        T30  = _mm_add_epi32(T20, T21); \
>> +        T31  = _mm_add_epi32(T22, T23); \
>> +        T32  = _mm_add_epi32(T24, T25); \
>> +        T33  = _mm_add_epi32(T26, T27); \
>> +        \
>> +        PHADDD(T30, T30, T31) \
>> +        PHADDD(T31, T32, T33) \
>> +        \
>> +        PHADDD(T40, T30, T31) \
>> +        T41 = _mm_sub_epi32(T00A, T00B); \
>> +        T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), SHIFT2); \
>> +        T41  = _mm_srai_epi32(_mm_add_epi32(T41, c_512), SHIFT2); \
>> +        T40  = _mm_packs_epi32(T40, T40); \
>> +        T41  = _mm_packs_epi32(T41, T41); \
>> +        _mm_storel_epi64((__m128i*)&dst[0 * 16 + INDEX], T40); \
>> +        _mm_storel_epi64((__m128i*)&dst[8 * 16 + INDEX], T41);
>> +
>> +        UNPCK_MADD(part)
>> +#define MADD_PHADDD(TAB, ROW, INDEX) \
>> +    T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    \
>> +    T30  = _mm_add_epi32(T20, T21); \
>> +    T31  = _mm_add_epi32(T22, T23); \
>> +    T32  = _mm_add_epi32(T24, T25); \
>> +    T33  = _mm_add_epi32(T26, T27); \
>> +    \
>> +    PHADDD(T30, T30, T31) \
>> +    PHADDD(T31, T32, T33) \
>> +    \
>> +    PHADDD(T40, T30, T31) \
>> +    T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), SHIFT2); \
>> +    T40  = _mm_packs_epi32(T40, T40); \
>> +    _mm_storel_epi64((__m128i*)&dst[ROW * 16 + INDEX], T40);
>> +
>> +        MADD_PHADDD(8, 4, part)
>> +        MADD_PHADDD(9, 12, part)
>> +
>> +#define SUB_PHADDD(TAB, ROW, INDEX) \
>> +    T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[TAB])); \
>> +    \
>> +    T30  = _mm_sub_epi32(T20, T21); \
>> +    T31  = _mm_sub_epi32(T22, T23); \
>> +    T32  = _mm_sub_epi32(T24, T25); \
>> +    T33  = _mm_sub_epi32(T26, T27); \
>> +    \
>> +    PHADDD(T30, T30, T31) \
>> +    PHADDD(T31, T32, T33) \
>> +    \
>> +    PHADDD(T40, T30, T31) \
>> +    T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), SHIFT2); \
>> +    T40  = _mm_packs_epi32(T40, T40); \
>> +    _mm_storel_epi64((__m128i*)&dst[ROW * 16 + INDEX], T40);
>> +
>> +        SUB_PHADDD(10, 2, part)
>> +        SUB_PHADDD(11, 6, part)
>> +        SUB_PHADDD(12, 10, part)
>> +        SUB_PHADDD(13, 14, part)
>> +
>> +#define MAKE_ODD2(tab, dstPos, INDEX) \
>> +    T20  = _mm_madd_epi16(T10, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)]));       /* [*O2_0 *O1_0 *O3_0 *O0_0] */ \
>> +    T21  = _mm_madd_epi16(T11, _mm_load_si128((__m128i*)tab_dct_16_1[(tab) + 1]));   /* [*O5_0 *O6_0 *O4_0 *O7_0] */ \
>> +    T22  = _mm_madd_epi16(T12, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T23  = _mm_madd_epi16(T13, _mm_load_si128((__m128i*)tab_dct_16_1[(tab) + 1])); \
>> +    T24  = _mm_madd_epi16(T14, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T25  = _mm_madd_epi16(T15, _mm_load_si128((__m128i*)tab_dct_16_1[(tab) + 1])); \
>> +    T26  = _mm_madd_epi16(T16, _mm_load_si128((__m128i*)tab_dct_16_1[(tab)])); \
>> +    T27  = _mm_madd_epi16(T17, _mm_load_si128((__m128i*)tab_dct_16_1[(tab) + 1])); \
>> +    \
>> +    T30  = _mm_add_epi32(T20, T21); \
>> +    T31  = _mm_add_epi32(T22, T23); \
>> +    T32  = _mm_add_epi32(T24, T25); \
>> +    T33  = _mm_add_epi32(T26, T27); \
>> +    \
>> +    PHADDD(T30, T30, T31) \
>> +    PHADDD(T31, T32, T33) \
>> +    \
>> +    PHADDD(T40, T30, T31); \
>> +    T40  = _mm_srai_epi32(_mm_add_epi32(T40, c_512), SHIFT2); \
>> +    T40  = _mm_packs_epi32(T40, T40); \
>> +    _mm_storel_epi64((__m128i*)&dst[(dstPos) * 16 + INDEX], T40);
>> +
>> +        MAKE_ODD2(14,  1, part);
>> +        MAKE_ODD2(16,  3, part);
>> +        MAKE_ODD2(18,  5, part);
>> +        MAKE_ODD2(20,  7, part);
>> +        MAKE_ODD2(22,  9, part);
>> +        MAKE_ODD2(24, 11, part);
>> +        MAKE_ODD2(26, 13, part);
>> +        MAKE_ODD2(28, 15, part);
>> +#undef part
>> +    }
>> +    {
>> +#define part 4
>> +        SHUFFLE3(T00A, _mm_load_si128((__m128i*)&tmp[(part + 0) * 16 + 0]))
>> +        SHUFFLE3(T01A, _mm_load_si128((__m128i*)&tmp[(part + 1) * 16 + 0]))
>> +        SHUFFLE3(T02A, _mm_load_si128((__m128i*)&tmp[(part + 2) * 16 + 0]))
>> +        SHUFFLE3(T03A, _mm_load_si128((__m128i*)&tmp[(part + 3) * 16 + 0]))
>> +
>> +        SHUFFLE4(T00B, _mm_load_si128((__m128i*)&tmp[(part + 0) * 16 + 8]))
>> +        SHUFFLE4(T01B, _mm_load_si128((__m128i*)&tmp[(part + 1) * 16 + 8]))
>> +        SHUFFLE4(T02B, _mm_load_si128((__m128i*)&tmp[(part + 2) * 16 + 8]))
>> +        SHUFFLE4(T03B, _mm_load_si128((__m128i*)&tmp[(part + 3) * 16 + 8]))
>> +
>> +        UNPCK_MADD(part)
>> +
>> +        MADD_PHADDD(8, 4, part)
>> +        MADD_PHADDD(9, 12, part)
>> +
>> +        SUB_PHADDD(10, 2, part)
>> +        SUB_PHADDD(11, 6, part)
>> +        SUB_PHADDD(12, 10, part)
>> +        SUB_PHADDD(13, 14, part)
>> +
>> +        MAKE_ODD2(14,  1, part);
>> +        MAKE_ODD2(16,  3, part);
>> +        MAKE_ODD2(18,  5, part);
>> +        MAKE_ODD2(20,  7, part);
>> +        MAKE_ODD2(22,  9, part);
>> +        MAKE_ODD2(24, 11, part);
>> +        MAKE_ODD2(26, 13, part);
>> +        MAKE_ODD2(28, 15, part);
>> +#undef part
>> +    }
>> +    {
>> +#define part 8
>> +        SHUFFLE3(T00A, _mm_load_si128((__m128i*)&tmp[(part + 0) * 16 + 0]))
>> +        SHUFFLE3(T01A, _mm_load_si128((__m128i*)&tmp[(part + 1) * 16 + 0]))
>> +        SHUFFLE3(T02A, _mm_load_si128((__m128i*)&tmp[(part + 2) * 16 + 0]))
>> +        SHUFFLE3(T03A, _mm_load_si128((__m128i*)&tmp[(part + 3) * 16 + 0]))
>> +
>> +        SHUFFLE4(T00B, _mm_load_si128((__m128i*)&tmp[(part + 0) * 16 + 8]))
>> +        SHUFFLE4(T01B, _mm_load_si128((__m128i*)&tmp[(part + 1) * 16 + 8]))
>> +        SHUFFLE4(T02B, _mm_load_si128((__m128i*)&tmp[(part + 2) * 16 + 8]))
>> +        SHUFFLE4(T03B, _mm_load_si128((__m128i*)&tmp[(part + 3) * 16 + 8]))
>> +
>> +        UNPCK_MADD(part)
>> +
>> +        MADD_PHADDD(8, 4, part)
>> +        MADD_PHADDD(9, 12, part)
>> +
>> +        SUB_PHADDD(10, 2, part)
>> +        SUB_PHADDD(11, 6, part)
>> +        SUB_PHADDD(12, 10, part)
>> +        SUB_PHADDD(13, 14, part)
>> +
>> +        MAKE_ODD2(14,  1, part);
>> +        MAKE_ODD2(16,  3, part);
>> +        MAKE_ODD2(18,  5, part);
>> +        MAKE_ODD2(20,  7, part);
>> +        MAKE_ODD2(22,  9, part);
>> +        MAKE_ODD2(24, 11, part);
>> +        MAKE_ODD2(26, 13, part);
>> +        MAKE_ODD2(28, 15, part);
>> +#undef part
>> +    }
>> +    {
>> +#define part 12
>> +        SHUFFLE3(T00A, _mm_load_si128((__m128i*)&tmp[(part + 0) * 16 + 0]))
>> +        SHUFFLE3(T01A, _mm_load_si128((__m128i*)&tmp[(part + 1) * 16 + 0]))
>> +        SHUFFLE3(T02A, _mm_load_si128((__m128i*)&tmp[(part + 2) * 16 + 0]))
>> +        SHUFFLE3(T03A, _mm_load_si128((__m128i*)&tmp[(part + 3) * 16 + 0]))
>> +
>> +        SHUFFLE4(T00B, _mm_load_si128((__m128i*)&tmp[(part + 0) * 16 + 8]))
>> +        SHUFFLE4(T01B, _mm_load_si128((__m128i*)&tmp[(part + 1) * 16 + 8]))
>> +        SHUFFLE4(T02B, _mm_load_si128((__m128i*)&tmp[(part + 2) * 16 + 8]))
>> +        SHUFFLE4(T03B, _mm_load_si128((__m128i*)&tmp[(part + 3) * 16 + 8]))
>> +
>> +        UNPCK_MADD(part)
>> +
>> +        MADD_PHADDD(8, 4, part)
>> +        MADD_PHADDD(9, 12, part)
>> +
>> +        SUB_PHADDD(10, 2, part)
>> +        SUB_PHADDD(11, 6, part)
>> +        SUB_PHADDD(12, 10, part)
>> +        SUB_PHADDD(13, 14, part)
>> +
>> +        MAKE_ODD2(14,  1, part);
>> +        MAKE_ODD2(16,  3, part);
>> +        MAKE_ODD2(18,  5, part);
>> +        MAKE_ODD2(20,  7, part);
>> +        MAKE_ODD2(22,  9, part);
>> +        MAKE_ODD2(24, 11, part);
>> +        MAKE_ODD2(26, 13, part);
>> +        MAKE_ODD2(28, 15, part);
>> +#undef part
>> +    }
>> +#undef SHIFT1
>> +#undef ADD1
>> +#undef SHIFT2
>> +#undef ADD2
>> +#undef SHUFFLE1
>> +#undef SHUFFLE2
>> +#undef PHADDW_PHSUBW
>> +#undef MAKE_ODD1
>> +#undef MAKE_ODD2
>> +#undef PHADDD
>> +#undef SHUFFLE3
>> +#undef SHUFFLE4
>> +#undef PHADDD_SHIFT
>> +#undef MADD_SHIFT
>> +#undef MADD_PHADDD
>> +#undef SUB_PHADDD
>> +}
>> +
>>   #define SHIFT1  7
>>   #define ADD1    64
>>   
>> @@ -1426,6 +1956,7 @@
>>       /* Note: We have AVX2 assembly for these functions, but since AVX2 is still
>>        * somewhat rare on end-user PCs we still compile and link these SSE3
>>        * intrinsic SIMD functions */
>> +    p.cu[BLOCK_16x16].dct   = dct16;
>>       p.cu[BLOCK_8x8].idct   = idct8;
>>       p.cu[BLOCK_16x16].idct = idct16;
>>       p.cu[BLOCK_32x32].idct = idct32;
>> _______________________________________________
>> x265-devel mailing list
>> x265-devel at videolan.org
>> https://mailman.videolan.org/listinfo/x265-devel



More information about the x265-devel mailing list