[x264-devel] commit: Cosmetics in mvd handling (Henrik Gramner )
git at videolan.org
git at videolan.org
Sun Mar 28 04:44:35 CEST 2010
x264 | branch: master | Henrik Gramner <hengar-6 at student.ltu.se> | Wed Mar 24 23:27:30 2010 +0100| [a1edfa63597da268c850ddf769ae2fde63e132f8] | committer: Jason Garrett-Glaser
Cosmetics in mvd handling
Use a 2D array instead of doing manual pointer arithmetic.
> http://git.videolan.org/gitweb.cgi/x264.git/?a=commit;h=a1edfa63597da268c850ddf769ae2fde63e132f8
---
common/common.h | 2 +-
common/macroblock.c | 40 ++++++++++++++++++++--------------------
2 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/common/common.h b/common/common.h
index af59286..c77dfa6 100644
--- a/common/common.h
+++ b/common/common.h
@@ -551,7 +551,7 @@ struct x264_t
uint8_t (*non_zero_count)[16+4+4]; /* nzc. for I_PCM set to 16 */
int8_t *chroma_pred_mode; /* chroma_pred_mode. cabac only. for non intra I_PRED_CHROMA_DC(0) */
int16_t (*mv[2])[2]; /* mb mv. set to 0 for intra mb */
- uint8_t (*mvd[2])[2]; /* absolute value of mb mv difference with predict, clipped to [0,33]. set to 0 if intra. cabac only */
+ uint8_t (*mvd[2])[8][2]; /* absolute value of mb mv difference with predict, clipped to [0,33]. set to 0 if intra. cabac only */
int8_t *ref[2]; /* mb ref. set to -1 if non used (intra or Lx only) */
int16_t (*mvr[2][32])[2]; /* 16x16 mv for each possible ref */
int8_t *skipbp; /* block pattern for SKIP or DIRECT (sub)mbs. B-frames + cabac only */
diff --git a/common/macroblock.c b/common/macroblock.c
index f2bc31f..1009da5 100644
--- a/common/macroblock.c
+++ b/common/macroblock.c
@@ -711,8 +711,8 @@ int x264_macroblock_cache_init( x264_t *h )
if( h->param.b_cabac )
{
CHECKED_MALLOC( h->mb.chroma_pred_mode, i_mb_count * sizeof(int8_t) );
- CHECKED_MALLOC( h->mb.mvd[0], 2*8 * i_mb_count * sizeof(uint8_t) );
- CHECKED_MALLOC( h->mb.mvd[1], 2*8 * i_mb_count * sizeof(uint8_t) );
+ CHECKED_MALLOC( h->mb.mvd[0], i_mb_count * sizeof( **h->mb.mvd ) );
+ CHECKED_MALLOC( h->mb.mvd[1], i_mb_count * sizeof( **h->mb.mvd ) );
}
for( i=0; i<2; i++ )
@@ -1218,16 +1218,16 @@ void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y )
if( h->param.b_cabac )
{
if( i_top_type >= 0 )
- CP64( h->mb.cache.mvd[i_list][x264_scan8[0]-8], h->mb.mvd[i_list][i_top_xy*8] );
+ CP64( h->mb.cache.mvd[i_list][x264_scan8[0] - 8], h->mb.mvd[i_list][i_top_xy][0] );
else
- M64( h->mb.cache.mvd[i_list][x264_scan8[0]-8] ) = 0;
+ M64( h->mb.cache.mvd[i_list][x264_scan8[0] - 8] ) = 0;
if( i_left_type >= 0 )
{
- CP16( h->mb.cache.mvd[i_list][x264_scan8[0]-1+0*8], h->mb.mvd[i_list][i_left_xy*8+4] );
- CP16( h->mb.cache.mvd[i_list][x264_scan8[0]-1+1*8], h->mb.mvd[i_list][i_left_xy*8+5] );
- CP16( h->mb.cache.mvd[i_list][x264_scan8[0]-1+2*8], h->mb.mvd[i_list][i_left_xy*8+6] );
- CP16( h->mb.cache.mvd[i_list][x264_scan8[0]-1+3*8], h->mb.mvd[i_list][i_left_xy*8+3] );
+ CP16( h->mb.cache.mvd[i_list][x264_scan8[0 ] - 1], h->mb.mvd[i_list][i_left_xy][4] );
+ CP16( h->mb.cache.mvd[i_list][x264_scan8[2 ] - 1], h->mb.mvd[i_list][i_left_xy][5] );
+ CP16( h->mb.cache.mvd[i_list][x264_scan8[8 ] - 1], h->mb.mvd[i_list][i_left_xy][6] );
+ CP16( h->mb.cache.mvd[i_list][x264_scan8[10] - 1], h->mb.mvd[i_list][i_left_xy][3] );
}
else
for( i = 0; i < 4; i++ )
@@ -1408,26 +1408,26 @@ void x264_macroblock_cache_save( x264_t *h )
if( !IS_INTRA( i_mb_type ) && !IS_SKIP( i_mb_type ) && !IS_DIRECT( i_mb_type ) )
{
- CP64( h->mb.mvd[0][i_mb_xy*8+0], h->mb.cache.mvd[0][x264_scan8[0]+8*3] );
- CP16( h->mb.mvd[0][i_mb_xy*8+4], h->mb.cache.mvd[0][x264_scan8[0]+8*0+3] );
- CP16( h->mb.mvd[0][i_mb_xy*8+5], h->mb.cache.mvd[0][x264_scan8[0]+8*1+3] );
- CP16( h->mb.mvd[0][i_mb_xy*8+6], h->mb.cache.mvd[0][x264_scan8[0]+8*2+3] );
+ CP64( h->mb.mvd[0][i_mb_xy][0], h->mb.cache.mvd[0][x264_scan8[10]] );
+ CP16( h->mb.mvd[0][i_mb_xy][4], h->mb.cache.mvd[0][x264_scan8[5 ]] );
+ CP16( h->mb.mvd[0][i_mb_xy][5], h->mb.cache.mvd[0][x264_scan8[7 ]] );
+ CP16( h->mb.mvd[0][i_mb_xy][6], h->mb.cache.mvd[0][x264_scan8[13]] );
if( h->sh.i_type == SLICE_TYPE_B )
{
- CP64( h->mb.mvd[1][i_mb_xy*8+0], h->mb.cache.mvd[1][x264_scan8[0]+8*3] );
- CP16( h->mb.mvd[1][i_mb_xy*8+4], h->mb.cache.mvd[1][x264_scan8[0]+8*0+3] );
- CP16( h->mb.mvd[1][i_mb_xy*8+5], h->mb.cache.mvd[1][x264_scan8[0]+8*1+3] );
- CP16( h->mb.mvd[1][i_mb_xy*8+6], h->mb.cache.mvd[1][x264_scan8[0]+8*2+3] );
+ CP64( h->mb.mvd[1][i_mb_xy][0], h->mb.cache.mvd[1][x264_scan8[10]] );
+ CP16( h->mb.mvd[1][i_mb_xy][4], h->mb.cache.mvd[1][x264_scan8[5 ]] );
+ CP16( h->mb.mvd[1][i_mb_xy][5], h->mb.cache.mvd[1][x264_scan8[7 ]] );
+ CP16( h->mb.mvd[1][i_mb_xy][6], h->mb.cache.mvd[1][x264_scan8[13]] );
}
}
else
{
- M64( h->mb.mvd[0][i_mb_xy*8+0] ) = 0;
- M64( h->mb.mvd[0][i_mb_xy*8+4] ) = 0;
+ M64( h->mb.mvd[0][i_mb_xy][0] ) = 0;
+ M64( h->mb.mvd[0][i_mb_xy][4] ) = 0;
if( h->sh.i_type == SLICE_TYPE_B )
{
- M64( h->mb.mvd[1][i_mb_xy*8+0] ) = 0;
- M64( h->mb.mvd[1][i_mb_xy*8+4] ) = 0;
+ M64( h->mb.mvd[1][i_mb_xy][0] ) = 0;
+ M64( h->mb.mvd[1][i_mb_xy][4] ) = 0;
}
}
More information about the x264-devel
mailing list