[x265] [PATCH] multilib: static functions to avoid link conflict (closes #184)

Steve Borho steve at borho.org
Thu Jul 2 18:32:21 CEST 2015


# HG changeset patch
# User Min Chen <chenm003 at 163.com>
# Date 1435619821 25200
#      Mon Jun 29 16:17:01 2015 -0700
# Node ID a632a7b1b90e8d8a6eccfc72317c4e34ea1935a2
# Parent  c2ee1d9a69d7b1ed401d52c1695a89869965cac6
multilib: static functions to avoid link conflict (closes #184)

MSVC's implementation of anonymous namespaces prevents linking two builds of the
same library together (they are not properly file-local, they are just given a
file-unique namespace which is the same unique namespace in both builds).

Because of this, we must use static declarators

diff -r c2ee1d9a69d7 -r a632a7b1b90e build/vc10-x86_64/multilib.bat
--- a/build/vc10-x86_64/multilib.bat	Wed Jul 01 13:58:25 2015 -0500
+++ b/build/vc10-x86_64/multilib.bat	Mon Jun 29 16:17:01 2015 -0700
@@ -36,7 +36,7 @@
   exit 1
 )
 if not exist x265.sln (
-  cmake  -G "Visual Studio 10 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib" -DEXTRA_LINK_FLAGS="/FORCE:MULTIPLE"
+  cmake  -G "Visual Studio 10 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib"
 )
 if exist x265.sln (
   call "%VS100COMNTOOLS%\..\..\VC\vcvarsall.bat"
diff -r c2ee1d9a69d7 -r a632a7b1b90e build/vc11-x86_64/multilib.bat
--- a/build/vc11-x86_64/multilib.bat	Wed Jul 01 13:58:25 2015 -0500
+++ b/build/vc11-x86_64/multilib.bat	Mon Jun 29 16:17:01 2015 -0700
@@ -36,7 +36,7 @@
   exit 1
 )
 if not exist x265.sln (
-  cmake  -G "Visual Studio 11 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib" -DEXTRA_LINK_FLAGS="/FORCE:MULTIPLE"
+  cmake  -G "Visual Studio 11 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib"
 )
 if exist x265.sln (
   call "%VS110COMNTOOLS%\..\..\VC\vcvarsall.bat"
diff -r c2ee1d9a69d7 -r a632a7b1b90e build/vc12-x86_64/multilib.bat
--- a/build/vc12-x86_64/multilib.bat	Wed Jul 01 13:58:25 2015 -0500
+++ b/build/vc12-x86_64/multilib.bat	Mon Jun 29 16:17:01 2015 -0700
@@ -36,7 +36,7 @@
   exit 1
 )
 if not exist x265.sln (
-  cmake  -G "Visual Studio 12 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib" -DEXTRA_LINK_FLAGS="/FORCE:MULTIPLE"
+  cmake  -G "Visual Studio 12 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib"
 )
 if exist x265.sln (
   call "%VS120COMNTOOLS%\..\..\VC\vcvarsall.bat"
diff -r c2ee1d9a69d7 -r a632a7b1b90e build/vc9-x86_64/multilib.bat
--- a/build/vc9-x86_64/multilib.bat	Wed Jul 01 13:58:25 2015 -0500
+++ b/build/vc9-x86_64/multilib.bat	Mon Jun 29 16:17:01 2015 -0700
@@ -36,7 +36,7 @@
   exit 1
 )
 if not exist x265.sln (
-  cmake  -G "Visual Studio 9 2008 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib" -DEXTRA_LINK_FLAGS="/FORCE:MULTIPLE"
+  cmake  -G "Visual Studio 9 2008 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB="x265-static-main10.lib;x265-static-main12.lib"
 )
 if exist x265.sln (
   call "%VS90COMNTOOLS%\..\..\VC\vcvarsall.bat"
diff -r c2ee1d9a69d7 -r a632a7b1b90e source/common/cudata.cpp
--- a/source/common/cudata.cpp	Wed Jul 01 13:58:25 2015 -0500
+++ b/source/common/cudata.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -30,31 +30,31 @@
 
 using namespace X265_NS;
 
-namespace {
-// file private namespace
-
 /* for all bcast* and copy* functions, dst and src are aligned to MIN(size, 32) */
 
-void bcast1(uint8_t* dst, uint8_t val)  { dst[0] = val; }
+static void bcast1(uint8_t* dst, uint8_t val)  { dst[0] = val; }
 
-void copy4(uint8_t* dst, uint8_t* src)  { ((uint32_t*)dst)[0] = ((uint32_t*)src)[0]; }
-void bcast4(uint8_t* dst, uint8_t val)  { ((uint32_t*)dst)[0] = 0x01010101u * val; }
+static void copy4(uint8_t* dst, uint8_t* src)  { ((uint32_t*)dst)[0] = ((uint32_t*)src)[0]; }
+static void bcast4(uint8_t* dst, uint8_t val)  { ((uint32_t*)dst)[0] = 0x01010101u * val; }
 
-void copy16(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; }
-void bcast16(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val; ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; }
+static void copy16(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; }
+static void bcast16(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val; ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; }
 
-void copy64(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; 
-                                          ((uint64_t*)dst)[2] = ((uint64_t*)src)[2]; ((uint64_t*)dst)[3] = ((uint64_t*)src)[3];
-                                          ((uint64_t*)dst)[4] = ((uint64_t*)src)[4]; ((uint64_t*)dst)[5] = ((uint64_t*)src)[5];
-                                          ((uint64_t*)dst)[6] = ((uint64_t*)src)[6]; ((uint64_t*)dst)[7] = ((uint64_t*)src)[7]; }
-void bcast64(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val;
-                                          ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; ((uint64_t*)dst)[2] = bval; ((uint64_t*)dst)[3] = bval;
-                                          ((uint64_t*)dst)[4] = bval; ((uint64_t*)dst)[5] = bval; ((uint64_t*)dst)[6] = bval; ((uint64_t*)dst)[7] = bval; }
+static void copy64(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; 
+                                                 ((uint64_t*)dst)[2] = ((uint64_t*)src)[2]; ((uint64_t*)dst)[3] = ((uint64_t*)src)[3];
+                                                 ((uint64_t*)dst)[4] = ((uint64_t*)src)[4]; ((uint64_t*)dst)[5] = ((uint64_t*)src)[5];
+                                                 ((uint64_t*)dst)[6] = ((uint64_t*)src)[6]; ((uint64_t*)dst)[7] = ((uint64_t*)src)[7]; }
+static void bcast64(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val;
+                                                 ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; ((uint64_t*)dst)[2] = bval; ((uint64_t*)dst)[3] = bval;
+                                                 ((uint64_t*)dst)[4] = bval; ((uint64_t*)dst)[5] = bval; ((uint64_t*)dst)[6] = bval; ((uint64_t*)dst)[7] = bval; }
 
 /* at 256 bytes, memset/memcpy will probably use SIMD more effectively than our uint64_t hack,
  * but hand-written assembly would beat it. */
-void copy256(uint8_t* dst, uint8_t* src) { memcpy(dst, src, 256); }
-void bcast256(uint8_t* dst, uint8_t val) { memset(dst, val, 256); }
+static void copy256(uint8_t* dst, uint8_t* src) { memcpy(dst, src, 256); }
+static void bcast256(uint8_t* dst, uint8_t val) { memset(dst, val, 256); }
+
+namespace {
+// file private namespace
 
 /* Check whether 2 addresses point to the same column */
 inline bool isEqualCol(int addrA, int addrB, int numUnits)
diff -r c2ee1d9a69d7 -r a632a7b1b90e source/common/dct.cpp
--- a/source/common/dct.cpp	Wed Jul 01 13:58:25 2015 -0500
+++ b/source/common/dct.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -38,12 +38,9 @@
 #pragma warning(disable: 4127) // conditional expression is constant, typical for templated functions
 #endif
 
-namespace {
-// anonymous file-static namespace
-
 // Fast DST Algorithm. Full matrix multiplication for DST and Fast DST algorithm
 // give identical results
-void fastForwardDst(const int16_t* block, int16_t* coeff, int shift)  // input block, output coeff
+static void fastForwardDst(const int16_t* block, int16_t* coeff, int shift)  // input block, output coeff
 {
     int c[4];
     int rnd_factor = 1 << (shift - 1);
@@ -63,7 +60,7 @@
     }
 }
 
-void inversedst(const int16_t* tmp, int16_t* block, int shift)  // input tmp, output block
+static void inversedst(const int16_t* tmp, int16_t* block, int shift)  // input tmp, output block
 {
     int i, c[4];
     int rnd_factor = 1 << (shift - 1);
@@ -83,7 +80,7 @@
     }
 }
 
-void partialButterfly16(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterfly16(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[8], O[8];
@@ -136,7 +133,7 @@
     }
 }
 
-void partialButterfly32(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterfly32(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[16], O[16];
@@ -205,7 +202,7 @@
     }
 }
 
-void partialButterfly8(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterfly8(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[4], O[4];
@@ -242,7 +239,7 @@
     }
 }
 
-void partialButterflyInverse4(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterflyInverse4(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j;
     int E[2], O[2];
@@ -267,7 +264,7 @@
     }
 }
 
-void partialButterflyInverse8(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterflyInverse8(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[4], O[4];
@@ -303,7 +300,7 @@
     }
 }
 
-void partialButterflyInverse16(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterflyInverse16(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[8], O[8];
@@ -354,7 +351,7 @@
     }
 }
 
-void partialButterflyInverse32(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterflyInverse32(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[16], O[16];
@@ -418,7 +415,7 @@
     }
 }
 
-void partialButterfly4(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterfly4(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j;
     int E[2], O[2];
@@ -442,7 +439,7 @@
     }
 }
 
-void dst4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dst4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 1 + X265_DEPTH - 8;
     const int shift_2nd = 8;
@@ -459,7 +456,7 @@
     fastForwardDst(coef, dst, shift_2nd);
 }
 
-void dct4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dct4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 1 + X265_DEPTH - 8;
     const int shift_2nd = 8;
@@ -476,7 +473,7 @@
     partialButterfly4(coef, dst, shift_2nd, 4);
 }
 
-void dct8_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dct8_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 2 + X265_DEPTH - 8;
     const int shift_2nd = 9;
@@ -493,7 +490,7 @@
     partialButterfly8(coef, dst, shift_2nd, 8);
 }
 
-void dct16_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dct16_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 3 + X265_DEPTH - 8;
     const int shift_2nd = 10;
@@ -510,7 +507,7 @@
     partialButterfly16(coef, dst, shift_2nd, 16);
 }
 
-void dct32_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dct32_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 4 + X265_DEPTH - 8;
     const int shift_2nd = 11;
@@ -527,7 +524,7 @@
     partialButterfly32(coef, dst, shift_2nd, 32);
 }
 
-void idst4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idst4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -544,7 +541,7 @@
     }
 }
 
-void idct4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idct4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -561,7 +558,7 @@
     }
 }
 
-void idct8_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idct8_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -578,7 +575,7 @@
     }
 }
 
-void idct16_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idct16_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -595,7 +592,7 @@
     }
 }
 
-void idct32_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idct32_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -612,7 +609,7 @@
     }
 }
 
-void dequant_normal_c(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift)
+static void dequant_normal_c(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift)
 {
 #if HIGH_BIT_DEPTH
     X265_CHECK(scale < 32768 || ((scale & 3) == 0 && shift > 2), "dequant invalid scale %d\n", scale);
@@ -636,7 +633,7 @@
     }
 }
 
-void dequant_scaling_c(const int16_t* quantCoef, const int32_t* deQuantCoef, int16_t* coef, int num, int per, int shift)
+static void dequant_scaling_c(const int16_t* quantCoef, const int32_t* deQuantCoef, int16_t* coef, int num, int per, int shift)
 {
     X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num);
 
@@ -664,7 +661,7 @@
     }
 }
 
-uint32_t quant_c(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff)
+static uint32_t quant_c(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff)
 {
     X265_CHECK(qBits >= 8, "qBits less than 8\n");
     X265_CHECK((numCoeff % 16) == 0, "numCoeff must be multiple of 16\n");
@@ -688,7 +685,7 @@
     return numSig;
 }
 
-uint32_t nquant_c(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff)
+static uint32_t nquant_c(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff)
 {
     X265_CHECK((numCoeff % 16) == 0, "number of quant coeff is not multiple of 4x4\n");
     X265_CHECK((uint32_t)add < ((uint32_t)1 << qBits), "2 ^ qBits less than add\n");
@@ -741,7 +738,7 @@
     return numSig;
 }
 
-void denoiseDct_c(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff)
+static void denoiseDct_c(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff)
 {
     for (int i = 0; i < numCoeff; i++)
     {
@@ -754,7 +751,7 @@
     }
 }
 
-int scanPosLast_c(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* /*scanCG4x4*/, const int /*trSize*/)
+static int scanPosLast_c(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* /*scanCG4x4*/, const int /*trSize*/)
 {
     memset(coeffNum, 0, MLS_GRP_NUM * sizeof(*coeffNum));
     memset(coeffFlag, 0, MLS_GRP_NUM * sizeof(*coeffFlag));
@@ -787,7 +784,7 @@
     return scanPosLast - 1;
 }
 
-uint32_t findPosFirstLast_c(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16])
+static uint32_t findPosFirstLast_c(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16])
 {
     int n;
 
@@ -820,7 +817,7 @@
 }
 
 
-uint32_t costCoeffNxN_c(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase)
+static uint32_t costCoeffNxN_c(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase)
 {
     ALIGN_VAR_32(uint16_t, tmpCoeff[SCAN_SET_SIZE]);
     uint32_t numNonZero = (scanPosSigOff < (SCAN_SET_SIZE - 1) ? 1 : 0);
@@ -874,7 +871,7 @@
     return (sum & 0xFFFFFF);
 }
 
-uint32_t costCoeffRemain_c(uint16_t *absCoeff, int numNonZero, int idx)
+static uint32_t costCoeffRemain_c(uint16_t *absCoeff, int numNonZero, int idx)
 {
     uint32_t goRiceParam = 0;
 
@@ -921,7 +918,7 @@
 }
 
 
-uint32_t costC1C2Flag_c(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset)
+static uint32_t costC1C2Flag_c(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset)
 {
     uint32_t sum = 0;
     uint32_t c1 = 1;
@@ -974,8 +971,6 @@
     return (sum & 0x00FFFFFF) + (c1 << 26) + (firstC2Idx << 28);
 }
 
-}  // closing - anonymous file-static namespace
-
 namespace X265_NS {
 // x265 private namespace
 
diff -r c2ee1d9a69d7 -r a632a7b1b90e source/common/pixel.cpp
--- a/source/common/pixel.cpp	Wed Jul 01 13:58:25 2015 -0500
+++ b/source/common/pixel.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -159,7 +159,7 @@
     return (a + s) ^ s;
 }
 
-int satd_4x4(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+static int satd_4x4(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
 {
     sum2_t tmp[4][2];
     sum2_t a0, a1, a2, a3, b0, b1;
@@ -308,7 +308,7 @@
     return (int)sum;
 }
 
-int sa8d_8x8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+inline int sa8d_8x8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
 {
     return (int)((_sa8d_8x8(pix1, i_pix1, pix2, i_pix2) + 2) >> 2);
 }
@@ -359,12 +359,12 @@
     return (int)sum;
 }
 
-int sa8d_8x8(const int16_t* pix1, intptr_t i_pix1)
+static int sa8d_8x8(const int16_t* pix1, intptr_t i_pix1)
 {
     return (int)((_sa8d_8x8(pix1, i_pix1) + 2) >> 2);
 }
 
-int sa8d_16x16(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+static int sa8d_16x16(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
 {
     int sum = _sa8d_8x8(pix1, i_pix1, pix2, i_pix2)
         + _sa8d_8x8(pix1 + 8, i_pix1, pix2 + 8, i_pix2)
@@ -516,7 +516,7 @@
             dst[k * blockSize + l] = src[l * stride + k];
 }
 
-void weight_sp_c(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset)
+static void weight_sp_c(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset)
 {
     int x, y;
 
@@ -541,7 +541,7 @@
     }
 }
 
-void weight_pp_c(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset)
+static void weight_pp_c(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset)
 {
     int x, y;
 
@@ -582,7 +582,7 @@
     }
 }
 
-void scale1D_128to64(pixel *dst, const pixel *src)
+static void scale1D_128to64(pixel *dst, const pixel *src)
 {
     int x;
     const pixel* src1 = src;
@@ -608,7 +608,7 @@
     }
 }
 
-void scale2D_64to32(pixel* dst, const pixel* src, intptr_t stride)
+static void scale2D_64to32(pixel* dst, const pixel* src, intptr_t stride)
 {
     uint32_t x, y;
 
@@ -627,6 +627,7 @@
     }
 }
 
+static
 void frame_init_lowres_core(const pixel* src0, pixel* dst0, pixel* dsth, pixel* dstv, pixel* dstc,
                             intptr_t src_stride, intptr_t dst_stride, int width, int height)
 {
@@ -653,7 +654,7 @@
 }
 
 /* structural similarity metric */
-void ssim_4x4x2_core(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4])
+static void ssim_4x4x2_core(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4])
 {
     for (int z = 0; z < 2; z++)
     {
@@ -681,7 +682,7 @@
     }
 }
 
-float ssim_end_1(int s1, int s2, int ss, int s12)
+static float ssim_end_1(int s1, int s2, int ss, int s12)
 {
 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
  * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
@@ -711,7 +712,7 @@
 #undef PIXEL_MAX
 }
 
-float ssim_end_4(int sum0[5][4], int sum1[5][4], int width)
+static float ssim_end_4(int sum0[5][4], int sum1[5][4], int width)
 {
     float ssim = 0.0;
 
@@ -920,7 +921,7 @@
     }
 }
 
-void planecopy_cp_c(const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift)
+static void planecopy_cp_c(const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift)
 {
     for (int r = 0; r < height; r++)
     {
@@ -932,7 +933,7 @@
     }
 }
 
-void planecopy_sp_c(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+static void planecopy_sp_c(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
 {
     for (int r = 0; r < height; r++)
     {
@@ -946,7 +947,7 @@
 
 /* Estimate the total amount of influence on future quality that could be had if we
  * were to improve the reference samples used to inter predict any given CU. */
-void estimateCUPropagateCost(int* dst, const uint16_t* propagateIn, const int32_t* intraCosts, const uint16_t* interCosts,
+static void estimateCUPropagateCost(int* dst, const uint16_t* propagateIn, const int32_t* intraCosts, const uint16_t* interCosts,
                              const int32_t* invQscales, const double* fpsFactor, int len)
 {
     double fps = *fpsFactor / 256;
diff -r c2ee1d9a69d7 -r a632a7b1b90e source/common/scalinglist.cpp
--- a/source/common/scalinglist.cpp	Wed Jul 01 13:58:25 2015 -0500
+++ b/source/common/scalinglist.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -80,7 +80,7 @@
     },
 };
 
-int quantTSDefault4x4[16] =
+static int quantTSDefault4x4[16] =
 {
     16, 16, 16, 16,
     16, 16, 16, 16,
@@ -88,7 +88,7 @@
     16, 16, 16, 16
 };
 
-int quantIntraDefault8x8[64] =
+static int quantIntraDefault8x8[64] =
 {
     16, 16, 16, 16, 17, 18, 21, 24,
     16, 16, 16, 16, 17, 19, 22, 25,
@@ -100,7 +100,7 @@
     24, 25, 29, 36, 47, 65, 88, 115
 };
 
-int quantInterDefault8x8[64] =
+static int quantInterDefault8x8[64] =
 {
     16, 16, 16, 16, 17, 18, 20, 24,
     16, 16, 16, 17, 18, 20, 24, 25,
diff -r c2ee1d9a69d7 -r a632a7b1b90e source/common/vec/dct-sse3.cpp
--- a/source/common/vec/dct-sse3.cpp	Wed Jul 01 13:58:25 2015 -0500
+++ b/source/common/vec/dct-sse3.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -35,7 +35,6 @@
 
 using namespace X265_NS;
 
-namespace {
 #define SHIFT1  7
 #define ADD1    64
 
@@ -62,7 +61,8 @@
     {  83,  36,  83,  36, 83,  36, 83,  36 },
     {  36, -83,  36, -83, 36, -83, 36, -83 }
 };
-void idct8(const int16_t* src, int16_t* dst, intptr_t stride)
+
+static void idct8(const int16_t* src, int16_t* dst, intptr_t stride)
 {
     __m128i m128iS0, m128iS1, m128iS2, m128iS3, m128iS4, m128iS5, m128iS6, m128iS7, m128iAdd, m128Tmp0, m128Tmp1, m128Tmp2, m128Tmp3, E0h, E1h, E2h, E3h, E0l, E1l, E2l, E3l, O0h, O1h, O2h, O3h, O0l, O1l, O2l, O3l, EE0l, EE1l, E00l, E01l, EE0h, EE1h, E00h, E01h;
     __m128i T00, T01, T02, T03, T04, T05, T06, T07;
@@ -299,7 +299,7 @@
     _mm_storeh_pi((__m64*)&dst[7 * stride +  4], _mm_castsi128_ps(T11));
 }
 
-void idct16(const int16_t *src, int16_t *dst, intptr_t stride)
+static void idct16(const int16_t *src, int16_t *dst, intptr_t stride)
 {
 #define READ_UNPACKHILO(offset)\
     const __m128i T_00_00A = _mm_unpacklo_epi16(*(__m128i*)&src[1 * 16 + offset], *(__m128i*)&src[3 * 16 + offset]);\
@@ -677,7 +677,7 @@
 #undef UNPACKHILO
 #undef READ_UNPACKHILO
 
-void idct32(const int16_t *src, int16_t *dst, intptr_t stride)
+static void idct32(const int16_t *src, int16_t *dst, intptr_t stride)
 {
     //Odd
     const __m128i c16_p90_p90   = _mm_set1_epi32(0x005A005A); //column 0
@@ -1418,8 +1418,6 @@
     }
 }
 
-}
-
 namespace X265_NS {
 void setupIntrinsicDCT_sse3(EncoderPrimitives &p)
 {
diff -r c2ee1d9a69d7 -r a632a7b1b90e source/common/vec/dct-sse41.cpp
--- a/source/common/vec/dct-sse41.cpp	Wed Jul 01 13:58:25 2015 -0500
+++ b/source/common/vec/dct-sse41.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -35,8 +35,7 @@
 
 using namespace X265_NS;
 
-namespace {
-void dequant_scaling(const int16_t* quantCoef, const int32_t *deQuantCoef, int16_t* coef, int num, int per, int shift)
+static void dequant_scaling(const int16_t* quantCoef, const int32_t *deQuantCoef, int16_t* coef, int num, int per, int shift)
 {
     X265_CHECK(num <= 32 * 32, "dequant num too large\n");
 
@@ -100,7 +99,6 @@
         }
     }
 }
-}
 
 namespace X265_NS {
 void setupIntrinsicDCT_sse41(EncoderPrimitives &p)
diff -r c2ee1d9a69d7 -r a632a7b1b90e source/common/vec/dct-ssse3.cpp
--- a/source/common/vec/dct-ssse3.cpp	Wed Jul 01 13:58:25 2015 -0500
+++ b/source/common/vec/dct-ssse3.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -36,7 +36,6 @@
 
 using namespace X265_NS;
 
-namespace {
 ALIGN_VAR_32(static const int16_t, tab_dct_8[][8]) =
 {
     { 0x0100, 0x0F0E, 0x0706, 0x0908, 0x0302, 0x0D0C, 0x0504, 0x0B0A },
@@ -99,7 +98,7 @@
 #undef MAKE_COEF
 };
 
-void dct16(const int16_t *src, int16_t *dst, intptr_t stride)
+static void dct16(const int16_t *src, int16_t *dst, intptr_t stride)
 {
 #if HIGH_BIT_DEPTH
 #define SHIFT1  5
@@ -680,7 +679,7 @@
 #undef MAKE_COEF16
 };
 
-void dct32(const int16_t *src, int16_t *dst, intptr_t stride)
+static void dct32(const int16_t *src, int16_t *dst, intptr_t stride)
 {
 #if HIGH_BIT_DEPTH
 #define SHIFT1  6
@@ -1130,7 +1129,6 @@
 #undef SHIFT2
 #undef ADD2
 }
-}
 
 namespace X265_NS {
 void setupIntrinsicDCT_ssse3(EncoderPrimitives &p)
diff -r c2ee1d9a69d7 -r a632a7b1b90e source/encoder/motion.cpp
--- a/source/encoder/motion.cpp	Wed Jul 01 13:58:25 2015 -0500
+++ b/source/encoder/motion.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -56,7 +56,7 @@
     { 2, 8, 2, 8, true },  // 2x8 SATD HPEL + 2x8 SATD QPEL
 };
 
-int sizeScale[NUM_PU_SIZES];
+static int sizeScale[NUM_PU_SIZES];
 #define SAD_THRESH(v) (bcost < (((v >> 4) * sizeScale[partEnum])))
 
 /* radius 2 hexagon. repeated entries are to avoid having to compute mod6 every time. */


More information about the x265-devel mailing list