[x264-devel] [PATCH 2/9] MIPS MSA Common Macros Slide and Transpose

Rishikesh More rishikesh.more at imgtec.com
Thu Jun 18 14:18:39 CEST 2015


This patch includes restructuring of existing macros and addition of new generic macros which was necessary to avoid repeated review comments. 
It reduces number of code lines due to maximum use of generic macros, allows better code alignment & readability.
Overall, this patch set is just upgrading the code with styling changes and will bring it in sync with MIPS-SIMD optimized latest codebase at our end.

This patch contains macros for slide, shift and transpose operations. 

Signed-off-by: Rishikesh More <rishikesh.more at imgtec.com>
---
 common/mips/macros.h | 556 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 556 insertions(+)

diff --git a/common/mips/macros.h b/common/mips/macros.h
index 8de9028..991cfb1 100644
--- a/common/mips/macros.h
+++ b/common/mips/macros.h
@@ -1393,4 +1393,560 @@
 }
 #define ADDS_SH4_UH( ... ) ADDS_SH4( v8u16, __VA_ARGS__ )
 
+/* Description : Shift left all elements of vector (generic for all data types)
+   Arguments   : Inputs  - in0, in1, in2, in3, shift
+                 Outputs - in place operation
+                 Return Type - as per input vector RTYPE
+   Details     : Each element of vector 'in0' is left shifted by 'shift' and
+                 the result is written in-place.
+*/
+#define SLLI_4V( in0, in1, in2, in3, shift )  \
+{                                             \
+    in0 = in0 << shift;                       \
+    in1 = in1 << shift;                       \
+    in2 = in2 << shift;                       \
+    in3 = in3 << shift;                       \
+}
+
+/* Description : Arithmetic shift right all elements of vector
+                 (generic for all data types)
+   Arguments   : Inputs  - in0, in1, in2, in3, shift
+                 Outputs - in place operation
+                 Return Type - as per input vector RTYPE
+   Details     : Each element of vector 'in0' is right shifted by 'shift' and
+                 the result is written in-place. 'shift' is a GP variable.
+*/
+#define SRA_4V( in0, in1, in2, in3, shift )  \
+{                                            \
+    in0 = in0 >> shift;                      \
+    in1 = in1 >> shift;                      \
+    in2 = in2 >> shift;                      \
+    in3 = in3 >> shift;                      \
+}
+
+/* Description : Shift right arithmetic rounded halfwords
+   Arguments   : Inputs  - in0, in1, shift
+                 Outputs - in place operation
+                 Return Type - as per RTYPE
+   Details     : Each element of vector 'in0' is shifted right arithmetic by
+                 number of bits respective element holds in vector 'shift'.
+                 The last discarded bit is added to shifted value for rounding
+                 and the result is written in-place.
+                 'shift' is a vector.
+*/
+#define SRAR_H2( RTYPE, in0, in1, shift )                            \
+{                                                                    \
+    in0 = ( RTYPE ) __msa_srar_h( ( v8i16 ) in0, ( v8i16 ) shift );  \
+    in1 = ( RTYPE ) __msa_srar_h( ( v8i16 ) in1, ( v8i16 ) shift );  \
+}
+#define SRAR_H2_SH( ... ) SRAR_H2( v8i16, __VA_ARGS__ )
+
+#define SRAR_H4( RTYPE, in0, in1, in2, in3, shift )  \
+{                                                    \
+    SRAR_H2( RTYPE, in0, in1, shift )                \
+    SRAR_H2( RTYPE, in2, in3, shift )                \
+}
+#define SRAR_H4_SH( ... ) SRAR_H4( v8i16, __VA_ARGS__ )
+
+/* Description : Shift right logical all halfword elements of vector
+   Arguments   : Inputs  - in0, in1, in2, in3, shift
+                 Outputs - in place operation
+                 Return Type - as per RTYPE
+   Details     : Each element of vector 'in0' is shifted right logical by
+                 number of bits respective element holds in vector 'shift' and
+                 the result is stored in-place.'shift' is a vector.
+*/
+#define SRL_H4( RTYPE, in0, in1, in2, in3, shift )                  \
+{                                                                   \
+    in0 = ( RTYPE ) __msa_srl_h( ( v8i16 ) in0, ( v8i16 ) shift );  \
+    in1 = ( RTYPE ) __msa_srl_h( ( v8i16 ) in1, ( v8i16 ) shift );  \
+    in2 = ( RTYPE ) __msa_srl_h( ( v8i16 ) in2, ( v8i16 ) shift );  \
+    in3 = ( RTYPE ) __msa_srl_h( ( v8i16 ) in3, ( v8i16 ) shift );  \
+}
+#define SRL_H4_UH( ... ) SRL_H4( v8u16, __VA_ARGS__ )
+
+/* Description : Shift right arithmetic rounded (immediate)
+   Arguments   : Inputs  - in0, in1, shift
+                 Outputs - in place operation
+                 Return Type - as per RTYPE
+   Details     : Each element of vector 'in0' is shifted right arithmetic by
+                 value in 'shift'. The last discarded bit is added to shifted
+                 value for rounding and the result is written in-place.
+                 'shift' is an immediate value.
+*/
+#define SRARI_H2( RTYPE, in0, in1, shift )                  \
+{                                                           \
+    in0 = ( RTYPE ) __msa_srari_h( ( v8i16 ) in0, shift );  \
+    in1 = ( RTYPE ) __msa_srari_h( ( v8i16 ) in1, shift );  \
+}
+#define SRARI_H2_UH( ... ) SRARI_H2( v8u16, __VA_ARGS__ )
+#define SRARI_H2_SH( ... ) SRARI_H2( v8i16, __VA_ARGS__ )
+
+#define SRARI_H4( RTYPE, in0, in1, in2, in3, shift )    \
+{                                                       \
+    SRARI_H2( RTYPE, in0, in1, shift );                 \
+    SRARI_H2( RTYPE, in2, in3, shift );                 \
+}
+#define SRARI_H4_UH( ... ) SRARI_H4( v8u16, __VA_ARGS__ )
+#define SRARI_H4_SH( ... ) SRARI_H4( v8i16, __VA_ARGS__ )
+
+#define SRARI_W2( RTYPE, in0, in1, shift )                  \
+{                                                           \
+    in0 = ( RTYPE ) __msa_srari_w( ( v4i32 ) in0, shift );  \
+    in1 = ( RTYPE ) __msa_srari_w( ( v4i32 ) in1, shift );  \
+}
+#define SRARI_W2_SW( ... ) SRARI_W2( v4i32, __VA_ARGS__ )
+
+#define SRARI_W4( RTYPE, in0, in1, in2, in3, shift )  \
+{                                                     \
+    SRARI_W2( RTYPE, in0, in1, shift );               \
+    SRARI_W2( RTYPE, in2, in3, shift );               \
+}
+#define SRARI_W4_SW( ... ) SRARI_W4( v4i32, __VA_ARGS__ )
+
+/* Description : Multiplication of pairs of vectors
+   Arguments   : Inputs  - in0, in1, in2, in3
+                 Outputs - out0, out1
+   Details     : Each element from 'in0' is multiplied with elements from 'in1'
+                 and the result is written to 'out0'
+*/
+#define MUL2( in0, in1, in2, in3, out0, out1 )  \
+{                                               \
+    out0 = in0 * in1;                           \
+    out1 = in2 * in3;                           \
+}
+#define MUL4( in0, in1, in2, in3, in4, in5, in6, in7,  \
+              out0, out1, out2, out3 )                 \
+{                                                      \
+    MUL2( in0, in1, in2, in3, out0, out1 );            \
+    MUL2( in4, in5, in6, in7, out2, out3 );            \
+}
+
+/* Description : Addition of 2 pairs of vectors
+   Arguments   : Inputs  - in0, in1, in2, in3
+                 Outputs - out0, out1
+   Details     : Each element in 'in0' is added to 'in1' and result is written
+                 to 'out0'.
+*/
+#define ADD2( in0, in1, in2, in3, out0, out1 )  \
+{                                               \
+    out0 = in0 + in1;                           \
+    out1 = in2 + in3;                           \
+}
+#define ADD4( in0, in1, in2, in3, in4, in5, in6, in7,  \
+              out0, out1, out2, out3 )                 \
+{                                                      \
+    ADD2( in0, in1, in2, in3, out0, out1 );            \
+    ADD2( in4, in5, in6, in7, out2, out3 );            \
+}
+
+#define SUB4( in0, in1, in2, in3, in4, in5, in6, in7,  \
+              out0, out1, out2, out3 )                 \
+{                                                      \
+    out0 = in0 - in1;                                  \
+    out1 = in2 - in3;                                  \
+    out2 = in4 - in5;                                  \
+    out3 = in6 - in7;                                  \
+}
+
+/* Description : Sign extend halfword elements from right half of the vector
+   Arguments   : Input  - in    (halfword vector)
+                 Output - out   (sign extended word vector)
+                 Return Type - signed word
+   Details     : Sign bit of halfword elements from input vector 'in' is
+                 extracted and interleaved with same vector 'in0' to generate
+                 4 word elements keeping sign intact
+*/
+#define UNPCK_R_SH_SW( in, out )                           \
+{                                                          \
+    v8i16 sign_m;                                          \
+                                                           \
+    sign_m = __msa_clti_s_h( ( v8i16 ) in, 0 );            \
+    out = ( v4i32 ) __msa_ilvr_h( sign_m, ( v8i16 ) in );  \
+}
+
+/* Description : Zero extend unsigned byte elements to halfword elements
+   Arguments   : Input  - in           (unsigned byte vector)
+                 Outputs - out0, out1  (unsigned  halfword vectors)
+                 Return Type - signed halfword
+   Details     : Zero extended right half of vector is returned in 'out0'
+                 Zero extended left half of vector is returned in 'out1'
+*/
+#define UNPCK_UB_SH( in, out0, out1 )       \
+{                                           \
+    v16i8 zero_m = { 0 };                   \
+                                            \
+    ILVRL_B2_SH( zero_m, in, out0, out1 );  \
+}
+
+/* Description : Sign extend halfword elements from input vector and return
+                 the result in pair of vectors
+   Arguments   : Input  - in            (halfword vector)
+                 Outputs - out0, out1   (sign extended word vectors)
+                 Return Type - signed word
+   Details     : Sign bit of halfword elements from input vector 'in' is
+                 extracted and interleaved right with same vector 'in0' to
+                 generate 4 signed word elements in 'out0'
+                 Then interleaved left with same vector 'in0' to
+                 generate 4 signed word elements in 'out1'
+*/
+#define UNPCK_SH_SW( in, out0, out1 )           \
+{                                               \
+    v8i16 tmp_m;                                \
+                                                \
+    tmp_m = __msa_clti_s_h( ( v8i16 ) in, 0 );  \
+    ILVRL_H2_SW( tmp_m, in, out0, out1 );       \
+}
+
+/* Description : Butterfly of 4 input vectors
+   Arguments   : Inputs  - in0, in1, in2, in3
+                 Outputs - out0, out1, out2, out3
+   Details     : Butterfly operation
+*/
+#define BUTTERFLY_4( in0, in1, in2, in3, out0, out1, out2, out3 )  \
+{                                                                  \
+    out0 = in0 + in3;                                              \
+    out1 = in1 + in2;                                              \
+                                                                   \
+    out2 = in1 - in2;                                              \
+    out3 = in0 - in3;                                              \
+}
+
+/* Description : Butterfly of 8 input vectors
+   Arguments   : Inputs  - in0 ...  in7
+                 Outputs - out0 .. out7
+   Details     : Butterfly operation
+*/
+#define BUTTERFLY_8( in0, in1, in2, in3, in4, in5, in6, in7,           \
+                     out0, out1, out2, out3, out4, out5, out6, out7 )  \
+{                                                                      \
+    out0 = in0 + in7;                                                  \
+    out1 = in1 + in6;                                                  \
+    out2 = in2 + in5;                                                  \
+    out3 = in3 + in4;                                                  \
+                                                                       \
+    out4 = in3 - in4;                                                  \
+    out5 = in2 - in5;                                                  \
+    out6 = in1 - in6;                                                  \
+    out7 = in0 - in7;                                                  \
+}
+
+/* Description : Transpose input 8x8 byte block
+   Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
+                 Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+                 Return Type - as per RTYPE
+*/
+#define TRANSPOSE8x8_UB( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,    \
+                         out0, out1, out2, out3, out4, out5, out6, out7 )  \
+{                                                                          \
+    v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                  \
+    v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                  \
+                                                                           \
+    ILVR_B4_SB( in2, in0, in3, in1, in6, in4, in7, in5,                    \
+                tmp0_m, tmp1_m, tmp2_m, tmp3_m );                          \
+    ILVRL_B2_SB( tmp1_m, tmp0_m, tmp4_m, tmp5_m );                         \
+    ILVRL_B2_SB( tmp3_m, tmp2_m, tmp6_m, tmp7_m );                         \
+    ILVRL_W2( RTYPE, tmp6_m, tmp4_m, out0, out2 );                         \
+    ILVRL_W2( RTYPE, tmp7_m, tmp5_m, out4, out6 );                         \
+    SLDI_B2_0( RTYPE, out0, out2, out1, out3, 8 );                         \
+    SLDI_B2_0( RTYPE, out4, out6, out5, out7, 8 );                         \
+}
+#define TRANSPOSE8x8_UB_UB( ... ) TRANSPOSE8x8_UB( v16u8, __VA_ARGS__ )
+
+/* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
+   Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7,
+                           in8, in9, in10, in11, in12, in13, in14, in15
+                 Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+                 Return Type - unsigned byte
+*/
+#define TRANSPOSE16x8_UB_UB( in0, in1, in2, in3, in4, in5, in6, in7,           \
+                             in8, in9, in10, in11, in12, in13, in14, in15,     \
+                             out0, out1, out2, out3, out4, out5, out6, out7 )  \
+{                                                                              \
+    v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                      \
+    v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                      \
+                                                                               \
+    ILVEV_D2_UB( in0, in8, in1, in9, out7, out6 );                             \
+    ILVEV_D2_UB( in2, in10, in3, in11, out5, out4 );                           \
+    ILVEV_D2_UB( in4, in12, in5, in13, out3, out2 );                           \
+    ILVEV_D2_UB( in6, in14, in7, in15, out1, out0 );                           \
+                                                                               \
+    tmp0_m = ( v16u8 ) __msa_ilvev_b( ( v16i8 ) out6, ( v16i8 ) out7 );        \
+    tmp4_m = ( v16u8 ) __msa_ilvod_b( ( v16i8 ) out6, ( v16i8 ) out7 );        \
+    tmp1_m = ( v16u8 ) __msa_ilvev_b( ( v16i8 ) out4, ( v16i8 ) out5 );        \
+    tmp5_m = ( v16u8 ) __msa_ilvod_b( ( v16i8 ) out4, ( v16i8 ) out5 );        \
+    out5 = ( v16u8 ) __msa_ilvev_b( ( v16i8 ) out2, ( v16i8 ) out3 );          \
+    tmp6_m = ( v16u8 ) __msa_ilvod_b( ( v16i8 ) out2, ( v16i8 ) out3 );        \
+    out7 = ( v16u8 ) __msa_ilvev_b( ( v16i8 ) out0, ( v16i8 ) out1 );          \
+    tmp7_m = ( v16u8 ) __msa_ilvod_b( ( v16i8 ) out0, ( v16i8 ) out1 );        \
+                                                                               \
+    ILVEV_H2_UB( tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m );                 \
+    out0 = ( v16u8 ) __msa_ilvev_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m );      \
+    out4 = ( v16u8 ) __msa_ilvod_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m );      \
+                                                                               \
+    tmp2_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp1_m, ( v8i16 ) tmp0_m );    \
+    tmp3_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) out7, ( v8i16 ) out5 );        \
+    out2 = ( v16u8 ) __msa_ilvev_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m );      \
+    out6 = ( v16u8 ) __msa_ilvod_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m );      \
+                                                                               \
+    ILVEV_H2_UB( tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m );             \
+    out1 = ( v16u8 ) __msa_ilvev_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m );      \
+    out5 = ( v16u8 ) __msa_ilvod_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m );      \
+                                                                               \
+    tmp2_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp5_m, ( v8i16 ) tmp4_m );    \
+    tmp2_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp5_m, ( v8i16 ) tmp4_m );    \
+    tmp3_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp7_m, ( v8i16 ) tmp6_m );    \
+    tmp3_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp7_m, ( v8i16 ) tmp6_m );    \
+    out3 = ( v16u8 ) __msa_ilvev_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m );      \
+    out7 = ( v16u8 ) __msa_ilvod_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m );      \
+}
+
+/* Description : Transpose 4x4 block with half word elements in vectors
+   Arguments   : Inputs  - in0, in1, in2, in3
+                 Outputs - out0, out1, out2, out3
+                 Return Type - signed halfword
+*/
+#define TRANSPOSE4x4_SH_SH( in0, in1, in2, in3, out0, out1, out2, out3 )  \
+{                                                                         \
+    v8i16 s0_m, s1_m;                                                     \
+                                                                          \
+    ILVR_H2_SH( in1, in0, in3, in2, s0_m, s1_m );                         \
+    ILVRL_W2_SH( s1_m, s0_m, out0, out2 );                                \
+    out1 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) out0, ( v2i64 ) out0 );      \
+    out3 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) out0, ( v2i64 ) out2 );      \
+}
+
+/* Description : Transpose 4x8 block with half word elements in vectors
+   Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
+                 Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+                 Return Type - signed halfword
+*/
+#define TRANSPOSE4X8_SH_SH( in0, in1, in2, in3, in4, in5, in6, in7,           \
+                            out0, out1, out2, out3, out4, out5, out6, out7 )  \
+{                                                                             \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                     \
+    v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n;                                     \
+    v8i16 zero_m = { 0 };                                                     \
+                                                                              \
+    ILVR_H4_SH( in1, in0, in3, in2, in5, in4, in7, in6,                       \
+                tmp0_n, tmp1_n, tmp2_n, tmp3_n );                             \
+    ILVRL_W2_SH( tmp1_n, tmp0_n, tmp0_m, tmp2_m );                            \
+    ILVRL_W2_SH( tmp3_n, tmp2_n, tmp1_m, tmp3_m );                            \
+                                                                              \
+    out0 = ( v8i16 ) __msa_ilvr_d( ( v2i64 ) tmp1_m, ( v2i64 ) tmp0_m );      \
+    out1 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) tmp1_m, ( v2i64 ) tmp0_m );      \
+    out2 = ( v8i16 ) __msa_ilvr_d( ( v2i64 ) tmp3_m, ( v2i64 ) tmp2_m );      \
+    out3 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) tmp3_m, ( v2i64 ) tmp2_m );      \
+                                                                              \
+    out4 = zero_m;                                                            \
+    out5 = zero_m;                                                            \
+    out6 = zero_m;                                                            \
+    out7 = zero_m;                                                            \
+}
+
+/* Description : Transpose 8x4 block with half word elements in vectors
+   Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
+                 Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+                 Return Type - signed halfword
+*/
+#define TRANSPOSE8X4_SH_SH( in0, in1, in2, in3, out0, out1, out2, out3 )  \
+{                                                                         \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                 \
+                                                                          \
+    ILVR_H2_SH( in1, in0, in3, in2, tmp0_m, tmp1_m );                     \
+    ILVL_H2_SH( in1, in0, in3, in2, tmp2_m, tmp3_m );                     \
+    ILVR_W2_SH( tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2 );             \
+    ILVL_W2_SH( tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3 );             \
+}
+
+/* Description : Transpose 8x8 block with half word elements in vectors
+   Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
+                 Outputs - out0, out1, out2, out3, out4, out5, out6, out7
+                 Return Type - as per RTYPE
+*/
+#define TRANSPOSE8x8_H( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7,     \
+                        out0, out1, out2, out3, out4, out5, out6, out7 )   \
+{                                                                          \
+    v8i16 s0_m, s1_m;                                                      \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                  \
+    v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                                  \
+                                                                           \
+    ILVR_H2_SH( in6, in4, in7, in5, s0_m, s1_m );                          \
+    ILVRL_H2_SH( s1_m, s0_m, tmp0_m, tmp1_m );                             \
+    ILVL_H2_SH( in6, in4, in7, in5, s0_m, s1_m );                          \
+    ILVRL_H2_SH( s1_m, s0_m, tmp2_m, tmp3_m );                             \
+    ILVR_H2_SH( in2, in0, in3, in1, s0_m, s1_m );                          \
+    ILVRL_H2_SH( s1_m, s0_m, tmp4_m, tmp5_m );                             \
+    ILVL_H2_SH( in2, in0, in3, in1, s0_m, s1_m );                          \
+    ILVRL_H2_SH( s1_m, s0_m, tmp6_m, tmp7_m );                             \
+    PCKEV_D4( RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m,       \
+              tmp3_m, tmp7_m, out0, out2, out4, out6 );                    \
+    out1 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) tmp0_m, ( v2i64 ) tmp4_m );  \
+    out3 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) tmp1_m, ( v2i64 ) tmp5_m );  \
+    out5 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) tmp2_m, ( v2i64 ) tmp6_m );  \
+    out7 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) tmp3_m, ( v2i64 ) tmp7_m );  \
+}
+#define TRANSPOSE8x8_SH_SH( ... ) TRANSPOSE8x8_H( v8i16, __VA_ARGS__ )
+
+/* Description : Transpose 4x4 block with word elements in vectors
+   Arguments   : Inputs  - in0, in1, in2, in3
+                 Outputs - out0, out1, out2, out3
+                 Return Type - signed word
+*/
+#define TRANSPOSE4x4_SW_SW( in0, in1, in2, in3, out0, out1, out2, out3 )  \
+{                                                                         \
+    v4i32 s0_m, s1_m, s2_m, s3_m;                                         \
+                                                                          \
+    ILVRL_W2_SW( in1, in0, s0_m, s1_m );                                  \
+    ILVRL_W2_SW( in3, in2, s2_m, s3_m );                                  \
+                                                                          \
+    out0 = ( v4i32 ) __msa_ilvr_d( ( v2i64 ) s2_m, ( v2i64 ) s0_m );      \
+    out1 = ( v4i32 ) __msa_ilvl_d( ( v2i64 ) s2_m, ( v2i64 ) s0_m );      \
+    out2 = ( v4i32 ) __msa_ilvr_d( ( v2i64 ) s3_m, ( v2i64 ) s1_m );      \
+    out3 = ( v4i32 ) __msa_ilvl_d( ( v2i64 ) s3_m, ( v2i64 ) s1_m );      \
+}
+
+/* Description : Add block 4x4
+   Arguments   : Inputs  - in0, in1, in2, in3, pdst, stride
+   Details     : Least significant 4 bytes from each input vector are added to
+                 the destination bytes, clipped between 0-255 and stored.
+*/
+#define ADDBLK_ST4x4_UB( in0, in1, in2, in3, p_dst, stride )        \
+{                                                                   \
+    uint32_t src0_m, src1_m, src2_m, src3_m;                        \
+    uint32_t out0_m, out1_m, out2_m, out3_m;                        \
+    v8i16 inp0_m, inp1_m, res0_m, res1_m;                           \
+    v16i8 dst0_m = { 0 };                                           \
+    v16i8 dst1_m = { 0 };                                           \
+    v16i8 zero_m = { 0 };                                           \
+                                                                    \
+    ILVR_D2_SH( in1, in0, in3, in2, inp0_m, inp1_m )                \
+    LW4( p_dst, stride,  src0_m, src1_m, src2_m, src3_m );          \
+    INSERT_W2_SB( src0_m, src1_m, dst0_m );                         \
+    INSERT_W2_SB( src2_m, src3_m, dst1_m );                         \
+    ILVR_B2_SH( zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m );   \
+    ADD2( res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m );         \
+    CLIP_SH2_0_255( res0_m, res1_m );                               \
+    PCKEV_B2_SB( res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m );  \
+                                                                    \
+    out0_m = __msa_copy_u_w( ( v4i32 ) dst0_m, 0 );                 \
+    out1_m = __msa_copy_u_w( ( v4i32 ) dst0_m, 1 );                 \
+    out2_m = __msa_copy_u_w( ( v4i32 ) dst1_m, 0 );                 \
+    out3_m = __msa_copy_u_w( ( v4i32 ) dst1_m, 1 );                 \
+    SW4( out0_m, out1_m, out2_m, out3_m, p_dst, stride );           \
+}
+
+/* Description : Dot product and addition of 3 signed halfword input vectors
+   Arguments   : Inputs  - in0, in1, in2, coeff0, coeff1, coeff2
+                 Output - out0_m
+                 Return Type - signed halfword
+   Details     : Dot product of 'in0' with 'coeff0'
+                 Dot product of 'in1' with 'coeff1'
+                 Dot product of 'in2' with 'coeff2'
+                 Addition of all the 3 vector results
+                 out0_m = (in0 * coeff0) + (in1 * coeff1) + (in2 * coeff2)
+*/
+#define DPADD_SH3_SH( in0, in1, in2, coeff0, coeff1, coeff2 )             \
+( {                                                                       \
+    v8i16 tmp1_m;                                                         \
+    v8i16 out0_m;                                                         \
+                                                                          \
+    out0_m = __msa_dotp_s_h( ( v16i8 ) in0, ( v16i8 ) coeff0 );           \
+    out0_m = __msa_dpadd_s_h( out0_m, ( v16i8 ) in1, ( v16i8 ) coeff1 );  \
+    tmp1_m = __msa_dotp_s_h( ( v16i8 ) in2, ( v16i8 ) coeff2 );           \
+    out0_m = __msa_adds_s_h( out0_m, tmp1_m );                            \
+                                                                          \
+    out0_m;                                                               \
+} )
+
+/* Description : Pack even elements of input vectors & xor with 128
+   Arguments   : Inputs  - in0, in1
+                 Output - out_m
+                 Return Type - unsigned byte
+   Details     : Signed byte even elements from 'in0' and 'in1' are packed
+                 together in one vector and the resulting vector is xor'ed with
+                 128 to shift the range from signed to unsigned byte
+*/
+#define PCKEV_XORI128_UB( in0, in1 )                                  \
+( {                                                                   \
+    v16u8 out_m;                                                      \
+    out_m = ( v16u8 ) __msa_pckev_b( ( v16i8 ) in1, ( v16i8 ) in0 );  \
+    out_m = ( v16u8 ) __msa_xori_b( ( v16u8 ) out_m, 128 );           \
+    out_m;                                                            \
+} )
+
+/* Description : Pack even byte elements, extract 0 & 2 index words from pair
+                 of results and store 4 words in destination memory as per
+                 stride
+   Arguments   : Inputs  - in0, in1, in2, in3, pdst, stride
+*/
+#define PCKEV_ST4x4_UB( in0, in1, in2, in3, p_dst, stride )  \
+{                                                            \
+    uint32_t out0_m, out1_m, out2_m, out3_m;                 \
+    v16i8 tmp0_m, tmp1_m;                                    \
+                                                             \
+    PCKEV_B2_SB( in1, in0, in3, in2, tmp0_m, tmp1_m );       \
+                                                             \
+    out0_m = __msa_copy_u_w( ( v4i32 ) tmp0_m, 0 );          \
+    out1_m = __msa_copy_u_w( ( v4i32 ) tmp0_m, 2 );          \
+    out2_m = __msa_copy_u_w( ( v4i32 ) tmp1_m, 0 );          \
+    out3_m = __msa_copy_u_w( ( v4i32 ) tmp1_m, 2 );          \
+                                                             \
+    SW4( out0_m, out1_m, out2_m, out3_m, p_dst, stride );    \
+}
+
+/* Description : Pack even byte elements and store byte vector in destination
+                 memory
+   Arguments   : Inputs  - in0, in1, pdst
+*/
+#define PCKEV_ST_SB( in0, in1, p_dst )                      \
+{                                                           \
+    v16i8 tmp_m;                                            \
+    tmp_m = __msa_pckev_b( ( v16i8 ) in1, ( v16i8 ) in0 );  \
+    ST_SB( tmp_m, ( p_dst ) );                              \
+}
+
+#define AVC_CALC_DPADD_H_6PIX_2COEFF_SH( in0, in1, in2, in3, in4, in5 )    \
+( {                                                                        \
+    v4i32 tmp0_m, tmp1_m;                                                  \
+    v8i16 out0_m, out1_m, out2_m, out3_m;                                  \
+    v8i16 minus5h_m = __msa_ldi_h( -5 );                                   \
+    v8i16 plus20h_m = __msa_ldi_h( 20 );                                   \
+                                                                           \
+    ILVRL_H2_SW( in5, in0, tmp0_m, tmp1_m );                               \
+                                                                           \
+    tmp0_m = __msa_hadd_s_w( ( v8i16 ) tmp0_m, ( v8i16 ) tmp0_m );         \
+    tmp1_m = __msa_hadd_s_w( ( v8i16 ) tmp1_m, ( v8i16 ) tmp1_m );         \
+                                                                           \
+    ILVRL_H2_SH( in1, in4, out0_m, out1_m );                               \
+    DPADD_SH2_SW( out0_m, out1_m, minus5h_m, minus5h_m, tmp0_m, tmp1_m );  \
+    ILVRL_H2_SH( in2, in3, out2_m, out3_m );                               \
+    DPADD_SH2_SW( out2_m, out3_m, plus20h_m, plus20h_m, tmp0_m, tmp1_m );  \
+                                                                           \
+    SRARI_W2_SW( tmp0_m, tmp1_m, 10 );                                     \
+    SAT_SW2_SW( tmp0_m, tmp1_m, 7 );                                       \
+    out0_m = __msa_pckev_h( ( v8i16 ) tmp1_m, ( v8i16 ) tmp0_m );          \
+                                                                           \
+    out0_m;                                                                \
+} )
+
+#define AVC_HORZ_FILTER_SH( in, mask0, mask1, mask2 )      \
+( {                                                        \
+    v8i16 out0_m, out1_m;                                  \
+    v16i8 tmp0_m, tmp1_m;                                  \
+    v16i8 minus5b = __msa_ldi_b( -5 );                     \
+    v16i8 plus20b = __msa_ldi_b( 20 );                     \
+                                                           \
+    tmp0_m = __msa_vshf_b( ( v16i8 ) mask0, in, in );      \
+    out0_m = __msa_hadd_s_h( tmp0_m, tmp0_m );             \
+                                                           \
+    tmp0_m = __msa_vshf_b( ( v16i8 ) mask1, in, in );      \
+    out0_m = __msa_dpadd_s_h( out0_m, minus5b, tmp0_m );   \
+                                                           \
+    tmp1_m = __msa_vshf_b( ( v16i8 ) ( mask2 ), in, in );  \
+    out1_m = __msa_dpadd_s_h( out0_m, plus20b, tmp1_m );   \
+                                                           \
+    out1_m;                                                \
+} )
+
 #endif  /* X264_MIPS_MACROS_H */
-- 
2.3.7



More information about the x264-devel mailing list