[x265-commits] [x265] fix : square chroma transform expected error message

Ashok Kumar Mishra ashok at multicorewareinc.com
Tue May 20 17:06:48 CEST 2014


details:   http://hg.videolan.org/x265/rev/d050fe5f8f28
branches:  
changeset: 6880:d050fe5f8f28
user:      Ashok Kumar Mishra<ashok at multicorewareinc.com>
date:      Mon May 19 19:19:07 2014 +0530
description:
fix :  square chroma transform expected error message
Subject: [x265] help: removing the default QP value in the help message

details:   http://hg.videolan.org/x265/rev/2c722169215c
branches:  
changeset: 6881:2c722169215c
user:      Deepthi Nandakumar <deepthi at multicorewareinc.com>
date:      Tue May 20 15:05:42 2014 +0530
description:
help: removing the default QP value in the help message

Since CQP is not the default rate control mode, a default QP confuses readers.
Subject: [x265] param: initialise crf-max and crf-min values in the param structure.

details:   http://hg.videolan.org/x265/rev/279f72586069
branches:  
changeset: 6882:279f72586069
user:      Deepthi Nandakumar <deepthi at multicorewareinc.com>
date:      Tue May 20 15:13:09 2014 +0530
description:
param: initialise crf-max and crf-min values in the param structure.
Subject: [x265] param: tabs to spaces

details:   http://hg.videolan.org/x265/rev/a815df2b313f
branches:  
changeset: 6883:a815df2b313f
user:      Steve Borho <steve at borho.org>
date:      Tue May 20 08:25:45 2014 -0500
description:
param: tabs to spaces
Subject: [x265] asm: fix bug for invalid read in sa8d

details:   http://hg.videolan.org/x265/rev/0d2ec86fa28b
branches:  
changeset: 6884:0d2ec86fa28b
user:      Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
date:      Tue May 20 19:56:17 2014 +0530
description:
asm: fix bug for invalid read in sa8d

fix invalid read when different stride value given for two input buffer and
enabled 16x16 and 8x8 sa8d primitives.

diffstat:

 source/Lib/TLibEncoder/TEncSearch.cpp |    8 +-
 source/common/param.cpp               |    2 +
 source/common/x86/asm-primitives.cpp  |   10 +-
 source/common/x86/pixel-a.asm         |  180 +++++++++++++++++-----------------
 source/x265.cpp                       |    6 +-
 5 files changed, 105 insertions(+), 101 deletions(-)

diffs (truncated from 592 to 300 lines):

diff -r b35a5d8f012b -r 0d2ec86fa28b source/Lib/TLibEncoder/TEncSearch.cpp
--- a/source/Lib/TLibEncoder/TEncSearch.cpp	Sun May 18 15:02:27 2014 +0900
+++ b/source/Lib/TLibEncoder/TEncSearch.cpp	Tue May 20 19:56:17 2014 +0530
@@ -2969,7 +2969,7 @@ void TEncSearch::residualTransformQuantI
                 else
                 {
                     int16_t *ptr = resiYuv->getCbAddr(absTUPartIdxC);
-                    X265_CHECK(trWidthC == trHeightC, "square chroma transform expected\n");
+                    X265_CHECK(widthC == heightC, "square chroma transform expected\n");
                     primitives.blockfill_s[(int)g_convertToBit[trWidthC]](ptr, resiYuv->m_cwidth, 0);
                 }
                 if (absSumV)
@@ -2985,7 +2985,7 @@ void TEncSearch::residualTransformQuantI
                 else
                 {
                     int16_t *ptr =  resiYuv->getCrAddr(absTUPartIdxC);
-                    X265_CHECK(trWidthC == trHeightC, "square chroma transform expected\n");
+                    X265_CHECK(widthC == heightC, "square chroma transform expected\n");
                     primitives.blockfill_s[(int)g_convertToBit[trWidthC]](ptr, resiYuv->m_cwidth, 0);
                 }
                 cu->setCbfPartRange(absSumU ? setCbf : 0, TEXT_CHROMA_U, absTUPartIdxC, tuIterator.m_absPartIdxStep);
@@ -3342,7 +3342,7 @@ void TEncSearch::xEstimateResidualQT(TCo
                 {
                     int16_t *ptr = m_qtTempShortYuv[qtlayer].getCbAddr(tuIterator.m_absPartIdxTURelCU);
                     const uint32_t stride = m_qtTempShortYuv[qtlayer].m_cwidth;
-                    X265_CHECK(trWidthC == trHeightC, "square chroma transform expected\n");
+                    X265_CHECK(widthC == heightC, "square chroma transform expected\n");
                     primitives.blockfill_s[(int)g_convertToBit[widthC]](ptr, stride, 0);
                 }
 
@@ -3410,7 +3410,7 @@ void TEncSearch::xEstimateResidualQT(TCo
                 {
                     int16_t *ptr =  m_qtTempShortYuv[qtlayer].getCrAddr(tuIterator.m_absPartIdxTURelCU);
                     const uint32_t stride = m_qtTempShortYuv[qtlayer].m_cwidth;
-                    X265_CHECK(trWidthC == trHeightC, "square chroma transform expected\n");
+                    X265_CHECK(widthC == heightC, "square chroma transform expected\n");
                     primitives.blockfill_s[(int)g_convertToBit[widthC]](ptr, stride, 0);
                 }
 
diff -r b35a5d8f012b -r 0d2ec86fa28b source/common/param.cpp
--- a/source/common/param.cpp	Sun May 18 15:02:27 2014 +0900
+++ b/source/common/param.cpp	Tue May 20 19:56:17 2014 +0530
@@ -175,6 +175,8 @@ void x265_param_default(x265_param *para
     param->rc.aqMode = X265_AQ_AUTO_VARIANCE;
     param->rc.aqStrength = 1.0;
     param->rc.cuTree = 1;
+    param->rc.rfConstantMax = 0;
+    param->rc.rfConstantMin = 0;
 
     /* Quality Measurement Metrics */
     param->bEnablePsnr = 0;
diff -r b35a5d8f012b -r 0d2ec86fa28b source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Sun May 18 15:02:27 2014 +0900
+++ b/source/common/x86/asm-primitives.cpp	Tue May 20 19:56:17 2014 +0530
@@ -190,12 +190,14 @@ extern "C" {
     p.sse_ss[LUMA_64x64]   = x265_pixel_ssd_ss_64x64_ ## cpu;
 
 #define SA8D_INTER_FROM_BLOCK(cpu) \
-    p.sa8d_inter[LUMA_4x8]  = x265_pixel_satd_4x8_ ## cpu; \
-    p.sa8d_inter[LUMA_8x4]  = x265_pixel_satd_8x4_ ## cpu; \
+    p.sa8d_inter[LUMA_4x8]   = x265_pixel_satd_4x8_ ## cpu; \
+    p.sa8d_inter[LUMA_8x4]   = x265_pixel_satd_8x4_ ## cpu; \
     p.sa8d_inter[LUMA_4x16]  = x265_pixel_satd_4x16_ ## cpu; \
     p.sa8d_inter[LUMA_16x4]  = x265_pixel_satd_16x4_ ## cpu; \
-    p.sa8d_inter[LUMA_12x16]  = x265_pixel_satd_12x16_ ## cpu; \
-    p.sa8d_inter[LUMA_16x12]  = x265_pixel_satd_16x12_ ## cpu; \
+    p.sa8d_inter[LUMA_12x16] = x265_pixel_satd_12x16_ ## cpu; \
+    p.sa8d_inter[LUMA_8x8]   = x265_pixel_sa8d_8x8_ ## cpu; \
+    p.sa8d_inter[LUMA_16x16] = x265_pixel_sa8d_16x16_ ## cpu; \
+    p.sa8d_inter[LUMA_16x12] = x265_pixel_satd_16x12_ ## cpu; \
     p.sa8d_inter[LUMA_16x8]  = x265_pixel_sa8d_16x8_ ## cpu; \
     p.sa8d_inter[LUMA_8x16]  = x265_pixel_sa8d_8x16_ ## cpu; \
     p.sa8d_inter[LUMA_32x24] = x265_pixel_sa8d_32x24_ ## cpu; \
diff -r b35a5d8f012b -r 0d2ec86fa28b source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm	Sun May 18 15:02:27 2014 +0900
+++ b/source/common/x86/pixel-a.asm	Tue May 20 19:56:17 2014 +0530
@@ -2959,8 +2959,8 @@ cglobal pixel_sa8d_32x16, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3025,8 +3025,8 @@ cglobal pixel_sa8d_32x32, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3037,8 +3037,8 @@ cglobal pixel_sa8d_32x32, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     sub  r2, 16*SIZEOF_PIXEL
     sub  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3058,8 +3058,8 @@ cglobal pixel_sa8d_32x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3070,8 +3070,8 @@ cglobal pixel_sa8d_32x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     sub  r2, 16*SIZEOF_PIXEL
     sub  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3082,8 +3082,8 @@ cglobal pixel_sa8d_32x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3094,8 +3094,8 @@ cglobal pixel_sa8d_32x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     sub  r2, 16*SIZEOF_PIXEL
     sub  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3115,8 +3115,8 @@ cglobal pixel_sa8d_48x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3124,8 +3124,8 @@ cglobal pixel_sa8d_48x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3136,8 +3136,8 @@ cglobal pixel_sa8d_48x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     sub  r2, 16*SIZEOF_PIXEL
     sub  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3145,8 +3145,8 @@ cglobal pixel_sa8d_48x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     sub  r2, 16*SIZEOF_PIXEL
     sub  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3157,8 +3157,8 @@ cglobal pixel_sa8d_48x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3166,8 +3166,8 @@ cglobal pixel_sa8d_48x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3178,8 +3178,8 @@ cglobal pixel_sa8d_48x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     sub  r2, 16*SIZEOF_PIXEL
     sub  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3187,8 +3187,8 @@ cglobal pixel_sa8d_48x64, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     sub  r2, 16*SIZEOF_PIXEL
     sub  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3208,8 +3208,8 @@ cglobal pixel_sa8d_64x16, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3217,8 +3217,8 @@ cglobal pixel_sa8d_64x16, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3226,8 +3226,8 @@ cglobal pixel_sa8d_64x16, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3247,8 +3247,8 @@ cglobal pixel_sa8d_64x32, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3256,8 +3256,8 @@ cglobal pixel_sa8d_64x32, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]
-    sub  r2, r4
-    sub  r0, r5
+    sub  r0, r4
+    sub  r2, r5
     add  r2, 16*SIZEOF_PIXEL
     add  r0, 16*SIZEOF_PIXEL
     lea  r4, [3*r1]
@@ -3265,8 +3265,8 @@ cglobal pixel_sa8d_64x32, 4,8,13
     SA8D_16x16
     lea  r4, [8*r1]
     lea  r5, [8*r3]


More information about the x265-commits mailing list