[x265] [PATCH] multilib: Static functions to avoid link conflict (VC anonmous name space link bug)

Min Chen chenm003 at 163.com
Tue Jun 30 01:17:08 CEST 2015


# HG changeset patch
# User Min Chen <chenm003 at 163.com>
# Date 1435619821 25200
# Node ID 8f7b35226a03ea21131cb93c60bbaddf62411e50
# Parent  e332f1657e95a8f3ef9dcc66a6e0e22ce00a23f6
multilib: Static functions to avoid link conflict (VC anonmous name space link bug)
---
 build/vc10-x86_64/multilib.bat  |    2 +-
 build/vc11-x86_64/multilib.bat  |    2 +-
 build/vc12-x86_64/multilib.bat  |    2 +-
 build/vc9-x86_64/multilib.bat   |    2 +-
 source/common/cudata.cpp        |   28 +++++++++---------
 source/common/dct.cpp           |   60 +++++++++++++++++++-------------------
 source/common/pixel.cpp         |   27 +++++++++--------
 source/common/scalinglist.cpp   |    6 ++--
 source/common/vec/dct-sse3.cpp  |    7 ++--
 source/common/vec/dct-sse41.cpp |    2 +-
 source/common/vec/dct-ssse3.cpp |    4 +-
 source/encoder/motion.cpp       |    2 +-
 12 files changed, 73 insertions(+), 71 deletions(-)

diff -r e332f1657e95 -r 8f7b35226a03 build/vc10-x86_64/multilib.bat
--- a/build/vc10-x86_64/multilib.bat	Mon Jun 29 12:18:38 2015 -0700
+++ b/build/vc10-x86_64/multilib.bat	Mon Jun 29 16:17:01 2015 -0700
@@ -24,7 +24,7 @@
   exit 1

 )

 if not exist x265.sln (

-  cmake  -G "Visual Studio 10 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB=x265-static-main10.lib -DEXTRA_LINK_FLAGS="/FORCE:MULTIPLE"

+  cmake  -G "Visual Studio 10 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB=x265-static-main10.lib -DEXTRA_LINK_FLAGS=""

 )

 if exist x265.sln (

   call "%VS100COMNTOOLS%\..\..\VC\vcvarsall.bat"

diff -r e332f1657e95 -r 8f7b35226a03 build/vc11-x86_64/multilib.bat
--- a/build/vc11-x86_64/multilib.bat	Mon Jun 29 12:18:38 2015 -0700
+++ b/build/vc11-x86_64/multilib.bat	Mon Jun 29 16:17:01 2015 -0700
@@ -24,7 +24,7 @@
   exit 1

 )

 if not exist x265.sln (

-  cmake  -G "Visual Studio 11 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB=x265-static-main10.lib -DEXTRA_LINK_FLAGS="/FORCE:MULTIPLE"

+  cmake  -G "Visual Studio 11 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB=x265-static-main10.lib -DEXTRA_LINK_FLAGS=""

 )

 if exist x265.sln (

   call "%VS110COMNTOOLS%\..\..\VC\vcvarsall.bat"

diff -r e332f1657e95 -r 8f7b35226a03 build/vc12-x86_64/multilib.bat
--- a/build/vc12-x86_64/multilib.bat	Mon Jun 29 12:18:38 2015 -0700
+++ b/build/vc12-x86_64/multilib.bat	Mon Jun 29 16:17:01 2015 -0700
@@ -24,7 +24,7 @@
   exit 1

 )

 if not exist x265.sln (

-  cmake  -G "Visual Studio 12 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB=x265-static-main10.lib -DEXTRA_LINK_FLAGS="/FORCE:MULTIPLE"

+  cmake  -G "Visual Studio 12 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB=x265-static-main10.lib -DEXTRA_LINK_FLAGS=""

 )

 if exist x265.sln (

   call "%VS120COMNTOOLS%\..\..\VC\vcvarsall.bat"

diff -r e332f1657e95 -r 8f7b35226a03 build/vc9-x86_64/multilib.bat
--- a/build/vc9-x86_64/multilib.bat	Mon Jun 29 12:18:38 2015 -0700
+++ b/build/vc9-x86_64/multilib.bat	Mon Jun 29 16:17:01 2015 -0700
@@ -24,7 +24,7 @@
   exit 1

 )

 if not exist x265.sln (

-  cmake  -G "Visual Studio 9 2008 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB=x265-static-main10.lib -DEXTRA_LINK_FLAGS="/FORCE:MULTIPLE"

+  cmake  -G "Visual Studio 9 2008 Win64" ../../../source -DHIGH_BIT_DEPTH=OFF -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=ON -DEXTRA_LIB=x265-static-main10.lib -DEXTRA_LINK_FLAGS=""

 )

 if exist x265.sln (

   call "%VS90COMNTOOLS%\..\..\VC\vcvarsall.bat"

diff -r e332f1657e95 -r 8f7b35226a03 source/common/cudata.cpp
--- a/source/common/cudata.cpp	Mon Jun 29 12:18:38 2015 -0700
+++ b/source/common/cudata.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -35,26 +35,26 @@
 
 /* for all bcast* and copy* functions, dst and src are aligned to MIN(size, 32) */
 
-void bcast1(uint8_t* dst, uint8_t val)  { dst[0] = val; }
+inline void bcast1(uint8_t* dst, uint8_t val)  { dst[0] = val; }
 
-void copy4(uint8_t* dst, uint8_t* src)  { ((uint32_t*)dst)[0] = ((uint32_t*)src)[0]; }
-void bcast4(uint8_t* dst, uint8_t val)  { ((uint32_t*)dst)[0] = 0x01010101u * val; }
+inline void copy4(uint8_t* dst, uint8_t* src)  { ((uint32_t*)dst)[0] = ((uint32_t*)src)[0]; }
+inline void bcast4(uint8_t* dst, uint8_t val)  { ((uint32_t*)dst)[0] = 0x01010101u * val; }
 
-void copy16(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; }
-void bcast16(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val; ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; }
+inline void copy16(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; }
+inline void bcast16(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val; ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; }
 
-void copy64(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; 
-                                          ((uint64_t*)dst)[2] = ((uint64_t*)src)[2]; ((uint64_t*)dst)[3] = ((uint64_t*)src)[3];
-                                          ((uint64_t*)dst)[4] = ((uint64_t*)src)[4]; ((uint64_t*)dst)[5] = ((uint64_t*)src)[5];
-                                          ((uint64_t*)dst)[6] = ((uint64_t*)src)[6]; ((uint64_t*)dst)[7] = ((uint64_t*)src)[7]; }
-void bcast64(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val;
-                                          ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; ((uint64_t*)dst)[2] = bval; ((uint64_t*)dst)[3] = bval;
-                                          ((uint64_t*)dst)[4] = bval; ((uint64_t*)dst)[5] = bval; ((uint64_t*)dst)[6] = bval; ((uint64_t*)dst)[7] = bval; }
+inline void copy64(uint8_t* dst, uint8_t* src) { ((uint64_t*)dst)[0] = ((uint64_t*)src)[0]; ((uint64_t*)dst)[1] = ((uint64_t*)src)[1]; 
+                                                 ((uint64_t*)dst)[2] = ((uint64_t*)src)[2]; ((uint64_t*)dst)[3] = ((uint64_t*)src)[3];
+                                                 ((uint64_t*)dst)[4] = ((uint64_t*)src)[4]; ((uint64_t*)dst)[5] = ((uint64_t*)src)[5];
+                                                 ((uint64_t*)dst)[6] = ((uint64_t*)src)[6]; ((uint64_t*)dst)[7] = ((uint64_t*)src)[7]; }
+inline void bcast64(uint8_t* dst, uint8_t val) { uint64_t bval = 0x0101010101010101ULL * val;
+                                                 ((uint64_t*)dst)[0] = bval; ((uint64_t*)dst)[1] = bval; ((uint64_t*)dst)[2] = bval; ((uint64_t*)dst)[3] = bval;
+                                                 ((uint64_t*)dst)[4] = bval; ((uint64_t*)dst)[5] = bval; ((uint64_t*)dst)[6] = bval; ((uint64_t*)dst)[7] = bval; }
 
 /* at 256 bytes, memset/memcpy will probably use SIMD more effectively than our uint64_t hack,
  * but hand-written assembly would beat it. */
-void copy256(uint8_t* dst, uint8_t* src) { memcpy(dst, src, 256); }
-void bcast256(uint8_t* dst, uint8_t val) { memset(dst, val, 256); }
+inline void copy256(uint8_t* dst, uint8_t* src) { memcpy(dst, src, 256); }
+inline void bcast256(uint8_t* dst, uint8_t val) { memset(dst, val, 256); }
 
 /* Check whether 2 addresses point to the same column */
 inline bool isEqualCol(int addrA, int addrB, int numUnits)
diff -r e332f1657e95 -r 8f7b35226a03 source/common/dct.cpp
--- a/source/common/dct.cpp	Mon Jun 29 12:18:38 2015 -0700
+++ b/source/common/dct.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -43,7 +43,7 @@
 
 // Fast DST Algorithm. Full matrix multiplication for DST and Fast DST algorithm
 // give identical results
-void fastForwardDst(const int16_t* block, int16_t* coeff, int shift)  // input block, output coeff
+static void fastForwardDst(const int16_t* block, int16_t* coeff, int shift)  // input block, output coeff
 {
     int c[4];
     int rnd_factor = 1 << (shift - 1);
@@ -63,7 +63,7 @@
     }
 }
 
-void inversedst(const int16_t* tmp, int16_t* block, int shift)  // input tmp, output block
+static void inversedst(const int16_t* tmp, int16_t* block, int shift)  // input tmp, output block
 {
     int i, c[4];
     int rnd_factor = 1 << (shift - 1);
@@ -83,7 +83,7 @@
     }
 }
 
-void partialButterfly16(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterfly16(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[8], O[8];
@@ -136,7 +136,7 @@
     }
 }
 
-void partialButterfly32(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterfly32(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[16], O[16];
@@ -205,7 +205,7 @@
     }
 }
 
-void partialButterfly8(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterfly8(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[4], O[4];
@@ -242,7 +242,7 @@
     }
 }
 
-void partialButterflyInverse4(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterflyInverse4(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j;
     int E[2], O[2];
@@ -267,7 +267,7 @@
     }
 }
 
-void partialButterflyInverse8(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterflyInverse8(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[4], O[4];
@@ -303,7 +303,7 @@
     }
 }
 
-void partialButterflyInverse16(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterflyInverse16(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[8], O[8];
@@ -354,7 +354,7 @@
     }
 }
 
-void partialButterflyInverse32(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterflyInverse32(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j, k;
     int E[16], O[16];
@@ -418,7 +418,7 @@
     }
 }
 
-void partialButterfly4(const int16_t* src, int16_t* dst, int shift, int line)
+static void partialButterfly4(const int16_t* src, int16_t* dst, int shift, int line)
 {
     int j;
     int E[2], O[2];
@@ -442,7 +442,7 @@
     }
 }
 
-void dst4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dst4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 1 + X265_DEPTH - 8;
     const int shift_2nd = 8;
@@ -459,7 +459,7 @@
     fastForwardDst(coef, dst, shift_2nd);
 }
 
-void dct4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dct4_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 1 + X265_DEPTH - 8;
     const int shift_2nd = 8;
@@ -476,7 +476,7 @@
     partialButterfly4(coef, dst, shift_2nd, 4);
 }
 
-void dct8_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dct8_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 2 + X265_DEPTH - 8;
     const int shift_2nd = 9;
@@ -493,7 +493,7 @@
     partialButterfly8(coef, dst, shift_2nd, 8);
 }
 
-void dct16_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dct16_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 3 + X265_DEPTH - 8;
     const int shift_2nd = 10;
@@ -510,7 +510,7 @@
     partialButterfly16(coef, dst, shift_2nd, 16);
 }
 
-void dct32_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
+static void dct32_c(const int16_t* src, int16_t* dst, intptr_t srcStride)
 {
     const int shift_1st = 4 + X265_DEPTH - 8;
     const int shift_2nd = 11;
@@ -527,7 +527,7 @@
     partialButterfly32(coef, dst, shift_2nd, 32);
 }
 
-void idst4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idst4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -544,7 +544,7 @@
     }
 }
 
-void idct4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idct4_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -561,7 +561,7 @@
     }
 }
 
-void idct8_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idct8_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -578,7 +578,7 @@
     }
 }
 
-void idct16_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idct16_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -595,7 +595,7 @@
     }
 }
 
-void idct32_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
+static void idct32_c(const int16_t* src, int16_t* dst, intptr_t dstStride)
 {
     const int shift_1st = 7;
     const int shift_2nd = 12 - (X265_DEPTH - 8);
@@ -612,7 +612,7 @@
     }
 }
 
-void dequant_normal_c(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift)
+static void dequant_normal_c(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift)
 {
 #if HIGH_BIT_DEPTH
     X265_CHECK(scale < 32768 || ((scale & 3) == 0 && shift > 2), "dequant invalid scale %d\n", scale);
@@ -636,7 +636,7 @@
     }
 }
 
-void dequant_scaling_c(const int16_t* quantCoef, const int32_t* deQuantCoef, int16_t* coef, int num, int per, int shift)
+static void dequant_scaling_c(const int16_t* quantCoef, const int32_t* deQuantCoef, int16_t* coef, int num, int per, int shift)
 {
     X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num);
 
@@ -664,7 +664,7 @@
     }
 }
 
-uint32_t quant_c(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff)
+static uint32_t quant_c(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff)
 {
     X265_CHECK(qBits >= 8, "qBits less than 8\n");
     X265_CHECK((numCoeff % 16) == 0, "numCoeff must be multiple of 16\n");
@@ -688,7 +688,7 @@
     return numSig;
 }
 
-uint32_t nquant_c(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff)
+static uint32_t nquant_c(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff)
 {
     X265_CHECK((numCoeff % 16) == 0, "number of quant coeff is not multiple of 4x4\n");
     X265_CHECK((uint32_t)add < ((uint32_t)1 << qBits), "2 ^ qBits less than add\n");
@@ -741,7 +741,7 @@
     return numSig;
 }
 
-void denoiseDct_c(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff)
+static void denoiseDct_c(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff)
 {
     for (int i = 0; i < numCoeff; i++)
     {
@@ -754,7 +754,7 @@
     }
 }
 
-int scanPosLast_c(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* /*scanCG4x4*/, const int /*trSize*/)
+static int scanPosLast_c(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* /*scanCG4x4*/, const int /*trSize*/)
 {
     memset(coeffNum, 0, MLS_GRP_NUM * sizeof(*coeffNum));
     memset(coeffFlag, 0, MLS_GRP_NUM * sizeof(*coeffFlag));
@@ -787,7 +787,7 @@
     return scanPosLast - 1;
 }
 
-uint32_t findPosFirstLast_c(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16])
+static uint32_t findPosFirstLast_c(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16])
 {
     int n;
 
@@ -820,7 +820,7 @@
 }
 
 
-uint32_t costCoeffNxN_c(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase)
+static uint32_t costCoeffNxN_c(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase)
 {
     ALIGN_VAR_32(uint16_t, tmpCoeff[SCAN_SET_SIZE]);
     uint32_t numNonZero = (scanPosSigOff < (SCAN_SET_SIZE - 1) ? 1 : 0);
@@ -874,7 +874,7 @@
     return (sum & 0xFFFFFF);
 }
 
-uint32_t costCoeffRemain_c(uint16_t *absCoeff, int numNonZero, int idx)
+static uint32_t costCoeffRemain_c(uint16_t *absCoeff, int numNonZero, int idx)
 {
     uint32_t goRiceParam = 0;
 
@@ -921,7 +921,7 @@
 }
 
 
-uint32_t costC1C2Flag_c(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset)
+static uint32_t costC1C2Flag_c(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset)
 {
     uint32_t sum = 0;
     uint32_t c1 = 1;
diff -r e332f1657e95 -r 8f7b35226a03 source/common/pixel.cpp
--- a/source/common/pixel.cpp	Mon Jun 29 12:18:38 2015 -0700
+++ b/source/common/pixel.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -308,7 +308,7 @@
     return (int)sum;
 }
 
-int sa8d_8x8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+inline int sa8d_8x8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
 {
     return (int)((_sa8d_8x8(pix1, i_pix1, pix2, i_pix2) + 2) >> 2);
 }
@@ -359,12 +359,12 @@
     return (int)sum;
 }
 
-int sa8d_8x8(const int16_t* pix1, intptr_t i_pix1)
+static int sa8d_8x8(const int16_t* pix1, intptr_t i_pix1)
 {
     return (int)((_sa8d_8x8(pix1, i_pix1) + 2) >> 2);
 }
 
-int sa8d_16x16(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+static int sa8d_16x16(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
 {
     int sum = _sa8d_8x8(pix1, i_pix1, pix2, i_pix2)
         + _sa8d_8x8(pix1 + 8, i_pix1, pix2 + 8, i_pix2)
@@ -516,7 +516,7 @@
             dst[k * blockSize + l] = src[l * stride + k];
 }
 
-void weight_sp_c(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset)
+static void weight_sp_c(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset)
 {
     int x, y;
 
@@ -541,7 +541,7 @@
     }
 }
 
-void weight_pp_c(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset)
+static void weight_pp_c(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset)
 {
     int x, y;
 
@@ -582,7 +582,7 @@
     }
 }
 
-void scale1D_128to64(pixel *dst, const pixel *src)
+static void scale1D_128to64(pixel *dst, const pixel *src)
 {
     int x;
     const pixel* src1 = src;
@@ -608,7 +608,7 @@
     }
 }
 
-void scale2D_64to32(pixel* dst, const pixel* src, intptr_t stride)
+static void scale2D_64to32(pixel* dst, const pixel* src, intptr_t stride)
 {
     uint32_t x, y;
 
@@ -627,6 +627,7 @@
     }
 }
 
+static
 void frame_init_lowres_core(const pixel* src0, pixel* dst0, pixel* dsth, pixel* dstv, pixel* dstc,
                             intptr_t src_stride, intptr_t dst_stride, int width, int height)
 {
@@ -653,7 +654,7 @@
 }
 
 /* structural similarity metric */
-void ssim_4x4x2_core(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4])
+static void ssim_4x4x2_core(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4])
 {
     for (int z = 0; z < 2; z++)
     {
@@ -681,7 +682,7 @@
     }
 }
 
-float ssim_end_1(int s1, int s2, int ss, int s12)
+static float ssim_end_1(int s1, int s2, int ss, int s12)
 {
 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
  * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
@@ -711,7 +712,7 @@
 #undef PIXEL_MAX
 }
 
-float ssim_end_4(int sum0[5][4], int sum1[5][4], int width)
+static float ssim_end_4(int sum0[5][4], int sum1[5][4], int width)
 {
     float ssim = 0.0;
 
@@ -920,7 +921,7 @@
     }
 }
 
-void planecopy_cp_c(const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift)
+static void planecopy_cp_c(const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift)
 {
     for (int r = 0; r < height; r++)
     {
@@ -932,7 +933,7 @@
     }
 }
 
-void planecopy_sp_c(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+static void planecopy_sp_c(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
 {
     for (int r = 0; r < height; r++)
     {
@@ -946,7 +947,7 @@
 
 /* Estimate the total amount of influence on future quality that could be had if we
  * were to improve the reference samples used to inter predict any given CU. */
-void estimateCUPropagateCost(int* dst, const uint16_t* propagateIn, const int32_t* intraCosts, const uint16_t* interCosts,
+static void estimateCUPropagateCost(int* dst, const uint16_t* propagateIn, const int32_t* intraCosts, const uint16_t* interCosts,
                              const int32_t* invQscales, const double* fpsFactor, int len)
 {
     double fps = *fpsFactor / 256;
diff -r e332f1657e95 -r 8f7b35226a03 source/common/scalinglist.cpp
--- a/source/common/scalinglist.cpp	Mon Jun 29 12:18:38 2015 -0700
+++ b/source/common/scalinglist.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -80,7 +80,7 @@
     },
 };
 
-int quantTSDefault4x4[16] =
+static int quantTSDefault4x4[16] =
 {
     16, 16, 16, 16,
     16, 16, 16, 16,
@@ -88,7 +88,7 @@
     16, 16, 16, 16
 };
 
-int quantIntraDefault8x8[64] =
+static int quantIntraDefault8x8[64] =
 {
     16, 16, 16, 16, 17, 18, 21, 24,
     16, 16, 16, 16, 17, 19, 22, 25,
@@ -100,7 +100,7 @@
     24, 25, 29, 36, 47, 65, 88, 115
 };
 
-int quantInterDefault8x8[64] =
+static int quantInterDefault8x8[64] =
 {
     16, 16, 16, 16, 17, 18, 20, 24,
     16, 16, 16, 17, 18, 20, 24, 25,
diff -r e332f1657e95 -r 8f7b35226a03 source/common/vec/dct-sse3.cpp
--- a/source/common/vec/dct-sse3.cpp	Mon Jun 29 12:18:38 2015 -0700
+++ b/source/common/vec/dct-sse3.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -62,7 +62,8 @@
     {  83,  36,  83,  36, 83,  36, 83,  36 },
     {  36, -83,  36, -83, 36, -83, 36, -83 }
 };
-void idct8(const int16_t* src, int16_t* dst, intptr_t stride)
+
+static void idct8(const int16_t* src, int16_t* dst, intptr_t stride)
 {
     __m128i m128iS0, m128iS1, m128iS2, m128iS3, m128iS4, m128iS5, m128iS6, m128iS7, m128iAdd, m128Tmp0, m128Tmp1, m128Tmp2, m128Tmp3, E0h, E1h, E2h, E3h, E0l, E1l, E2l, E3l, O0h, O1h, O2h, O3h, O0l, O1l, O2l, O3l, EE0l, EE1l, E00l, E01l, EE0h, EE1h, E00h, E01h;
     __m128i T00, T01, T02, T03, T04, T05, T06, T07;
@@ -299,7 +300,7 @@
     _mm_storeh_pi((__m64*)&dst[7 * stride +  4], _mm_castsi128_ps(T11));
 }
 
-void idct16(const int16_t *src, int16_t *dst, intptr_t stride)
+static void idct16(const int16_t *src, int16_t *dst, intptr_t stride)
 {
 #define READ_UNPACKHILO(offset)\
     const __m128i T_00_00A = _mm_unpacklo_epi16(*(__m128i*)&src[1 * 16 + offset], *(__m128i*)&src[3 * 16 + offset]);\
@@ -677,7 +678,7 @@
 #undef UNPACKHILO
 #undef READ_UNPACKHILO
 
-void idct32(const int16_t *src, int16_t *dst, intptr_t stride)
+static void idct32(const int16_t *src, int16_t *dst, intptr_t stride)
 {
     //Odd
     const __m128i c16_p90_p90   = _mm_set1_epi32(0x005A005A); //column 0
diff -r e332f1657e95 -r 8f7b35226a03 source/common/vec/dct-sse41.cpp
--- a/source/common/vec/dct-sse41.cpp	Mon Jun 29 12:18:38 2015 -0700
+++ b/source/common/vec/dct-sse41.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -36,7 +36,7 @@
 using namespace X265_NS;
 
 namespace {
-void dequant_scaling(const int16_t* quantCoef, const int32_t *deQuantCoef, int16_t* coef, int num, int per, int shift)
+static void dequant_scaling(const int16_t* quantCoef, const int32_t *deQuantCoef, int16_t* coef, int num, int per, int shift)
 {
     X265_CHECK(num <= 32 * 32, "dequant num too large\n");
 
diff -r e332f1657e95 -r 8f7b35226a03 source/common/vec/dct-ssse3.cpp
--- a/source/common/vec/dct-ssse3.cpp	Mon Jun 29 12:18:38 2015 -0700
+++ b/source/common/vec/dct-ssse3.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -99,7 +99,7 @@
 #undef MAKE_COEF
 };
 
-void dct16(const int16_t *src, int16_t *dst, intptr_t stride)
+static void dct16(const int16_t *src, int16_t *dst, intptr_t stride)
 {
 #if HIGH_BIT_DEPTH
 #define SHIFT1  5
@@ -680,7 +680,7 @@
 #undef MAKE_COEF16
 };
 
-void dct32(const int16_t *src, int16_t *dst, intptr_t stride)
+static void dct32(const int16_t *src, int16_t *dst, intptr_t stride)
 {
 #if HIGH_BIT_DEPTH
 #define SHIFT1  6
diff -r e332f1657e95 -r 8f7b35226a03 source/encoder/motion.cpp
--- a/source/encoder/motion.cpp	Mon Jun 29 12:18:38 2015 -0700
+++ b/source/encoder/motion.cpp	Mon Jun 29 16:17:01 2015 -0700
@@ -56,7 +56,7 @@
     { 2, 8, 2, 8, true },  // 2x8 SATD HPEL + 2x8 SATD QPEL
 };
 
-int sizeScale[NUM_PU_SIZES];
+static int sizeScale[NUM_PU_SIZES];
 #define SAD_THRESH(v) (bcost < (((v >> 4) * sizeScale[partEnum])))
 
 /* radius 2 hexagon. repeated entries are to avoid having to compute mod6 every time. */



More information about the x265-devel mailing list