[x265] [PATCH] fix warnings in Power8

mahesh at multicorewareinc.com mahesh at multicorewareinc.com
Tue Nov 15 06:03:25 CET 2016


# HG changeset patch
# User Mahesh Pittala <mahesh at multicorewareinc.com>
# Date 1478159451 -19800
#      Thu Nov 03 13:20:51 2016 +0530
# Node ID b8c19c95ec69bd8d483a58551ef151ab73beebd7
# Parent  a378efc939e37f13ec1673fda055b1c3d0632e68
fix warnings in Power8

diff -r a378efc939e3 -r b8c19c95ec69 source/common/CMakeLists.txt
--- a/source/common/CMakeLists.txt	Sun Nov 06 23:24:16 2016 +0100
+++ b/source/common/CMakeLists.txt	Thu Nov 03 13:20:51 2016 +0530
@@ -107,7 +107,7 @@
             set(ALTIVEC_PRIMITIVES ${ALTIVEC_PRIMITIVES} ppc/${SRC})
         endforeach()
         source_group(Intrinsics_altivec FILES ${ALTIVEC_PRIMITIVES})
-        set_source_files_properties(${ALTIVEC_PRIMITIVES} PROPERTIES COMPILE_FLAGS -Wno-unused)
+        set_source_files_properties(${ALTIVEC_PRIMITIVES} PROPERTIES COMPILE_FLAGS "-Wno-unused  -Wno-unknown-pragmas -Wno-maybe-uninitialized")
     endif()
 endif()
 
diff -r a378efc939e3 -r b8c19c95ec69 source/common/ppc/intrapred_altivec.cpp
--- a/source/common/ppc/intrapred_altivec.cpp	Sun Nov 06 23:24:16 2016 +0100
+++ b/source/common/ppc/intrapred_altivec.cpp	Thu Nov 03 13:20:51 2016 +0530
@@ -206,7 +206,7 @@
 vmlo1 = vec_mulo(s1, vf);\
 vsume = vec_add(vec_add(vmle0, vmle1), u16_16);\
 ve = vec_sra(vsume, u16_5);\
-vsumo = vec_add(vec_add(vmlo0, vmlo1), u16_16);\ 
+vsumo = vec_add(vec_add(vmlo0, vmlo1), u16_16);\
 vo = vec_sra(vsumo, u16_5);\
 vout = vec_pack(vec_mergeh(ve, vo), vec_mergel(ve, vo));\
 }
@@ -15755,13 +15755,13 @@
 
 #if 0
     #define one_line(s0, s1, vf32, vf, vout) {\
-        vmle0 = vec_mule(s0, vf32);\ 
-        vmlo0 = vec_mulo(s0, vf32);\ 
+        vmle0 = vec_mule(s0, vf32);\
+        vmlo0 = vec_mulo(s0, vf32);\
         vmle1 = vec_mule(s1, vf);\
-        vmlo1 = vec_mulo(s1, vf);\ 
+        vmlo1 = vec_mulo(s1, vf);\
         vsume = vec_add(vec_add(vmle0, vmle1), u16_16);\
         ve = vec_sra(vsume, u16_5);\
-        vsumo = vec_add(vec_add(vmlo0, vmlo1), u16_16);\ 
+        vsumo = vec_add(vec_add(vmlo0, vmlo1), u16_16);\
         vo = vec_sra(vsumo, u16_5);\
         vout = vec_pack(vec_mergeh(ve, vo), vec_mergel(ve, vo));\
     }
@@ -16896,13 +16896,13 @@
 
 #if 0
     #define one_line(s0, s1, vf32, vf, vout) {\
-        vmle0 = vec_mule(s0, vf32);\ 
-        vmlo0 = vec_mulo(s0, vf32);\ 
+        vmle0 = vec_mule(s0, vf32);\
+        vmlo0 = vec_mulo(s0, vf32);\
         vmle1 = vec_mule(s1, vf);\
-        vmlo1 = vec_mulo(s1, vf);\ 
+        vmlo1 = vec_mulo(s1, vf);\
         vsume = vec_add(vec_add(vmle0, vmle1), u16_16);\
         ve = vec_sra(vsume, u16_5);\
-        vsumo = vec_add(vec_add(vmlo0, vmlo1), u16_16);\ 
+        vsumo = vec_add(vec_add(vmlo0, vmlo1), u16_16);\
         vo = vec_sra(vsumo, u16_5);\
         vout = vec_pack(vec_mergeh(ve, vo), vec_mergel(ve, vo));\
     }
@@ -30741,9 +30741,9 @@
 
 #define ONE_ANG(log2Size, mode, dest, refPix, filtPix, bLuma)\
 {\
-    const int width = 1<< log2Size;\  
+    const int width = 1<< log2Size;\
     pixel *srcPix0  = (g_intraFilterFlags[mode] & width ? filtPix  : refPix);\
-    pixel *dst = dest + ((mode - 2) << (log2Size * 2));\ 
+    pixel *dst = dest + ((mode - 2) << (log2Size * 2));\
     srcPix0  = refPix;\
     dst = dest;\
     one_ang_pred_altivec<width, mode>(dst, srcPix0, bLuma);\
diff -r a378efc939e3 -r b8c19c95ec69 source/common/ppc/ipfilter_altivec.cpp
--- a/source/common/ppc/ipfilter_altivec.cpp	Sun Nov 06 23:24:16 2016 +0100
+++ b/source/common/ppc/ipfilter_altivec.cpp	Thu Nov 03 13:20:51 2016 +0530
@@ -319,11 +319,11 @@
             vsumH += vsrcH * vcoeff7;
             vsumL += vsrcL * vcoeff7;
 
-            vector short vvalH = vsumH + voffset >> vshift;
+            vector short vvalH = (vsumH + voffset) >> vshift;
             vvalH = vec_max( vvalH, vzero_s16 );
             vvalH = vec_min( vvalH, vmaxVal   );
 
-            vector short vvalL = vsumL + voffset >> vshift;
+            vector short vvalL = (vsumL + voffset) >> vshift;
             vvalL = vec_max( vvalL, vzero_s16 );
             vvalL = vec_min( vvalL, vmaxVal   );
 
@@ -378,11 +378,11 @@
             vsum2H += vsrc2H * vcoeff7;
             vsum2L += vsrc2L * vcoeff7;
 
-            vector short vval2H = vsum2H + voffset >> vshift;
+            vector short vval2H = (vsum2H + voffset) >> vshift;
             vval2H = vec_max( vval2H, vzero_s16 );
             vval2H = vec_min( vval2H, vmaxVal   );
 
-            vector short vval2L = vsum2L + voffset >> vshift;
+            vector short vval2L = (vsum2L + voffset) >> vshift;
             vval2L = vec_max( vval2L, vzero_s16 );
             vval2L = vec_min( vval2L, vmaxVal   );
 
@@ -452,9 +452,9 @@
 void filterVertical_sp_altivec(const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx)
 {
     int headRoom = IF_INTERNAL_PREC - X265_DEPTH;
-    int shift = IF_FILTER_PREC + headRoom;
+    unsigned int shift = IF_FILTER_PREC + headRoom;
     int offset = (1 << (shift - 1)) + (IF_INTERNAL_OFFS << IF_FILTER_PREC);
-    uint16_t maxVal = (1 << X265_DEPTH) - 1;
+    const uint16_t maxVal = (1 << X265_DEPTH) - 1;
     const int16_t* coeff = (N == 8 ? g_lumaFilter[coeffIdx] : g_chromaFilter[coeffIdx]);
 
     src -= (N / 2 - 1) * srcStride;
@@ -594,7 +594,7 @@
 
     const int16_t* coeff = (N == 4) ? g_chromaFilter[coeffIdx] : g_lumaFilter[coeffIdx];
     int headRoom = IF_INTERNAL_PREC - X265_DEPTH;
-    int shift = IF_FILTER_PREC - headRoom;
+    unsigned int shift = IF_FILTER_PREC - headRoom;
     int offset = -IF_INTERNAL_OFFS << shift;
     int blkheight = height;
 
@@ -1148,11 +1148,11 @@
             vsumH  += vsrcH * vcoeff7;
             vsumL  += vsrcL * vcoeff7;
 
-            vector short vvalH = vsumH + voffset >> vheadRoom;
+            vector short vvalH = (vsumH + voffset) >> vheadRoom;
             vvalH = vec_max( vvalH, vzero_s16 );
             vvalH = vec_min( vvalH, vmaxVal   );
 
-            vector short vvalL = vsumL + voffset >> vheadRoom;
+            vector short vvalL = (vsumL + voffset) >> vheadRoom;
             vvalL = vec_max( vvalL, vzero_s16 );
             vvalL = vec_min( vvalL, vmaxVal   );
 
@@ -1210,11 +1210,11 @@
             vsum2H  += vsrc2H * vcoeff7;
             vsum2L  += vsrc2L * vcoeff7;
 
-            vector short vval2H = vsum2H + voffset >> vheadRoom;
+            vector short vval2H = (vsum2H + voffset) >> vheadRoom;
             vval2H = vec_max( vval2H, vzero_s16 );
             vval2H = vec_min( vval2H, vmaxVal   );
 
-            vector short vval2L = vsum2L + voffset >> vheadRoom;
+            vector short vval2L = (vsum2L + voffset) >> vheadRoom;
             vval2L = vec_max( vval2L, vzero_s16 );
             vval2L = vec_min( vval2L, vmaxVal   );
 
@@ -1255,7 +1255,7 @@
 //
 //    vector unsigned char v_pixel_char_0, v_pixel_char_1, v_pixel_char_2 ;
 //    vector signed short v_pixel_short_0, v_pixel_short_1, v_pixel_short_2, v_pixel_short_3, v_pixel_short_4 ;
-//    const vector signed short v_mask_unisgned_char_to_short = {0x00FF, 0x00FF, 0x00FF, 0x00FF, 0x00FF, 0x00FF, 0x00FF, 0x00FF} ; \
+//    const vector signed short v_mask_unisgned_char_to_short = {0x00FF, 0x00FF, 0x00FF, 0x00FF, 0x00FF, 0x00FF, 0x00FF, 0x00FF} ;
 //    const vector signed int v_zeros_int = {0, 0, 0, 0} ;
 //    const vector signed short v_zeros_short = {0, 0, 0, 0, 0, 0, 0, 0} ;
 //
diff -r a378efc939e3 -r b8c19c95ec69 source/common/ppc/pixel_altivec.cpp
--- a/source/common/ppc/pixel_altivec.cpp	Sun Nov 06 23:24:16 2016 +0100
+++ b/source/common/ppc/pixel_altivec.cpp	Thu Nov 03 13:20:51 2016 +0530
@@ -303,7 +303,7 @@
 }
 
 template<int lx, int ly>
-int inline sad_altivec(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2){}
+int inline sad_altivec(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2){ return 0; }
 
 template<>
 int inline sad_altivec<24, 32>(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
@@ -3335,6 +3335,7 @@
               +satd_16x16_altivec(pix3, stride_pix1, pix4, stride_pix2)
               + satd_16x16_altivec(pix3+16, stride_pix1, pix4+16, stride_pix2)
               + satd_16x16_altivec(pix3+32, stride_pix1, pix4+32, stride_pix2);
+    return satd;
 }
 
 template<>
@@ -3349,6 +3350,7 @@
               +satd_16x16_altivec(pix3, stride_pix1, pix4, stride_pix2)
               + satd_16x16_altivec(pix3+16, stride_pix1, pix4+16, stride_pix2)
               + satd_16x16_altivec(pix3+32, stride_pix1, pix4+32, stride_pix2);
+    return satd;
 }
 
 template<>
@@ -3373,6 +3375,7 @@
               +satd_16x16_altivec(pix7, stride_pix1, pix8, stride_pix2)
               + satd_16x16_altivec(pix7+16, stride_pix1,pix8+16, stride_pix2)
               + satd_16x16_altivec(pix7+32, stride_pix1, pix8+32, stride_pix2);
+    return satd;
 }
 
 template<>
@@ -3657,7 +3660,7 @@
 
 int sa8d_8x8_altivec(const int16_t* pix1, intptr_t i_pix1)
 {
-    int sum;
+    int sum = 0;
     return ((sum+2)>>2);
 }
 


More information about the x265-devel mailing list