[vlc-devel] [PATCH 1/2] Sepia video filter

Laurent Aimar fenrir at elivagar.org
Fri Nov 26 20:15:36 CET 2010


Hi,

> +/*****************************************************************************
> + * Render: displays previously rendered output
> + *****************************************************************************
> + * This function send the currently rendered image to sepia image, waits
> + * until it is displayed and switch the two rendering buffers, preparing next
> + * frame.
> + *****************************************************************************/
> +static picture_t *Filter( filter_t *p_filter, picture_t *p_pic )
> +{
> +    picture_t *p_outpic;
> +    int intensity;
> +
> +    if( !p_pic ) return NULL;
> +
> +    filter_sys_t *p_sys = p_filter->p_sys;
> +    vlc_mutex_lock( &p_sys->lock );
> +    intensity = p_sys->i_intensity;
> +    vlc_mutex_unlock( &p_sys->lock );
> +
> +    p_outpic = filter_NewPicture( p_filter );
> +    if( !p_outpic )
> +    {
> +        msg_Warn( p_filter, "can't get output picture" );
> +        picture_Release( p_pic );
> +        return NULL;
> +    }
> +
> +    switch( p_pic->format.i_chroma )
> +    {
> +        case VLC_CODEC_RGB24:
> +            RVSepia( p_pic, p_outpic, false, intensity );
> +            break;
> +        case VLC_CODEC_RGB32:
> +            RVSepia( p_pic, p_outpic, true, intensity );
> +            break;
> +        CASE_PLANAR_YUV_SQUARE
> +            PlanarYUVSepia( p_pic, p_outpic, intensity );
> +            break;
> +        CASE_PACKED_YUV_422
> +            PackedYUVSepia( p_pic, p_outpic, intensity );
> +            break;
> +        default:
> +            assert( false );
> +    }
 Storing a function pointer in the open function would avoid having two
switch.

> +/*****************************************************************************
> + * PlanarYUVSepia: Applies sepia to one frame of the planar YUV video
> + *****************************************************************************
> + * This function applies sepia effect to one frame of the video by iterating
> + * through video lines. In every pass, start of Y, U and V planes is calculated
> + * and for every pixel we calculate new values of YUV values.
> + *****************************************************************************/
> +static void PlanarYUVSepia( picture_t *p_pic, picture_t *p_outpic,
> +                               int i_intensity )
> +{
> +    uint8_t *p_in_y, *p_in_u, *p_in_v, *p_in_end_y, *p_line_end_y, *p_out_y,
> +            *p_out_u, *p_out_v;
> +    int i_current_line = 0;
> +
> +    p_in_y = p_pic->p[Y_PLANE].p_pixels;
> +    p_in_end_y = p_in_y + p_pic->p[Y_PLANE].i_visible_lines
> +        * p_pic->p[Y_PLANE].i_pitch;
> +    p_out_y = p_outpic->p[Y_PLANE].p_pixels;
> +
> +    /* iterate for every visible line in the frame */
> +    while( p_in_y < p_in_end_y )
> +    {
> +        p_line_end_y = p_in_y + p_pic->p[Y_PLANE].i_visible_pitch;
> +        /* calculate start of U plane line */
> +        p_in_u = p_pic->p[U_PLANE].p_pixels
> +            + p_pic->p[U_PLANE].i_pitch * ( i_current_line / 2 );
> +        p_out_u = p_outpic->p[U_PLANE].p_pixels
> +            + p_outpic->p[U_PLANE].i_pitch * ( i_current_line / 2 );
> +        /* calculate start of V plane line */
> +        p_in_v = p_pic->p[V_PLANE].p_pixels
> +            + p_pic->p[V_PLANE].i_pitch * ( i_current_line / 2 );
> +        p_out_v = p_outpic->p[V_PLANE].p_pixels
> +            + p_outpic->p[V_PLANE].i_pitch * ( i_current_line / 2 );
 The /2 seems to mean that only I420 is supported, while it is used for nearly
all planar YUV...

> +        /* iterate for every two pixels in line */
> +        while( p_in_y < p_line_end_y )
> +        {
> +            uint8_t y1, y2, u, v;
> +            uint8_t sepia_y1, sepia_y2, sepia_u, sepia_v;
> +            /* retrieve original YUV values */
> +            y1 = *p_in_y++;
> +            y2 = *p_in_y++;
> +            u = *p_in_u++;
> +            v = *p_in_v++;
> +            /* calculate new, sepia values */
> +            YuvSepia( &sepia_y1, &sepia_y2, &sepia_u, &sepia_v,
> +                     y1, y2, u, v, i_intensity );
> +            /* put new values */
> +            *p_out_y++ = sepia_y1;
> +            *p_out_y++ = sepia_y2;
> +            *p_out_u++ = sepia_u;
> +            *p_out_v++ = sepia_v;
> +        }
> +        p_in_y += p_pic->p[Y_PLANE].i_pitch
> +            - p_pic->p[Y_PLANE].i_visible_pitch;
> +        p_out_y += p_outpic->p[Y_PLANE].i_pitch
> +            - p_outpic->p[Y_PLANE].i_visible_pitch;
> +        i_current_line++;
 You handle twice the chroma, which is not acceptable.
> +    }
 A simple dual loop over y and x would be equaly fast but much more readable...

> +/*****************************************************************************
> + * PackedYUVSepia: Applies sepia to one frame of the packed YUV video
> + *****************************************************************************
> + * This function applies sepia effext to one frame of the video by iterating
> + * through video lines. In every pass, we calculate new values for pixels
> + * (UYVY, VYUY, YUYV and YVYU formats are supported)
> + *****************************************************************************/
> +static void PackedYUVSepia( picture_t *p_pic, picture_t *p_outpic,
> +                           int i_intensity )
> +{
> +    uint8_t *p_in, *p_in_end, *p_line_start, *p_line_end, *p_out;
> +    uint8_t y1, y2, u, v;
> +
> +    p_in = p_pic->p[0].p_pixels;
> +    p_in_end = p_in + p_pic->p[0].i_visible_lines
> +        * p_pic->p[0].i_pitch;
> +    p_out = p_outpic->p[0].p_pixels;
> +
> +    while( p_in < p_in_end )
> +    {
> +        p_line_start = p_in;
> +        p_line_end = p_in + p_pic->p[0].i_visible_pitch;
> +        while( p_in < p_line_end )
> +        {
> +            uint8_t sepia_y1, sepia_y2, sepia_u, sepia_v;
> +            /* extract proper pixel values */
> +            switch( p_pic->format.i_chroma )
> +            {
> +                case VLC_CODEC_UYVY:
> +                    u = *p_in++;
> +                    y1 = *p_in++;
> +                    v = *p_in++;
> +                    y2 = *p_in++;
> +                    break;
> +                case VLC_CODEC_VYUY:
> +                    v = *p_in++;
> +                    y1 = *p_in++;
> +                    u = *p_in++;
> +                    y2 = *p_in++;
> +                    break;
> +                case VLC_CODEC_YUYV:
> +                    y1 = *p_in++;
> +                    u = *p_in++;
> +                    y2 = *p_in++;
> +                    v = *p_in++;
> +                    break;
> +                case VLC_CODEC_YVYU:
> +                    y1 = *p_in++;
> +                    v = *p_in++;
> +                    y2 = *p_in++;
> +                    u = *p_in++;
> +                    break;
> +                default:
> +                    assert( false );
> +            }
 Storing index to y u v at the start of the function would simplify it a lot.
There is reusable code in blend.c tht could be moved to a common header.
> +            /* calculate new, sepia values */
> +            YuvSepia( &sepia_y1, &sepia_y2, &sepia_u, &sepia_v,
> +                     y1, y2, u, v, i_intensity );
> +            /* put new values in proper place */
> +            switch( p_pic->format.i_chroma )
> +            {
> +                case VLC_CODEC_UYVY:
> +                    *p_out++ = sepia_u;
> +                    *p_out++ = sepia_y1;
> +                    *p_out++ = sepia_v;
> +                    *p_out++ = sepia_y2;
> +                    break;
> +                case VLC_CODEC_VYUY:
> +                    *p_out++ = sepia_v;
> +                    *p_out++ = sepia_y1;
> +                    *p_out++ = sepia_u;
> +                    *p_out++ = sepia_y2;
> +                    break;
> +                case VLC_CODEC_YUYV:
> +                    *p_out++ = sepia_y1;
> +                    *p_out++ = sepia_u;
> +                    *p_out++ = sepia_y2;
> +                    *p_out++ = sepia_v;
> +                    break;
> +                case VLC_CODEC_YVYU:
> +                    *p_out++ = sepia_y1;
> +                    *p_out++ = sepia_v;
> +                    *p_out++ = sepia_y2;
> +                    *p_out++ = sepia_u;
> +                    break;
> +                default:
> +                    assert( false );
> +            }
> +        }
> +        p_in += p_pic->p[0].i_pitch - p_pic->p[0].i_visible_pitch;
> +        p_out += p_outpic->p[0].i_pitch
> +            - p_outpic->p[0].i_visible_pitch;
> +    }
 Again a loop over y/x ...

> +}
> +
> +/*****************************************************************************
> + * RVSepia: Applies sepia to one frame of the RV24/RV32 video
> + *****************************************************************************
> + * This function applies sepia effect to one frame of the video by iterating
> + * through video lines and calculating new values for every byte in chunks of
> + * 3 (RV24) or 4 (RV32) bytes.
> + *****************************************************************************/
> +static void RVSepia( picture_t *p_pic, picture_t *p_outpic,
> +                                 bool rv32, int i_intensity )
> +{
> +    uint8_t *p_in, *p_in_end, *p_line_start, *p_line_end, *p_out;
> +    uint8_t i_r, i_g, i_b;
> +
> +    p_in = p_pic->p[0].p_pixels;
> +    p_in_end = p_in + p_pic->p[0].i_visible_lines
> +        * p_pic->p[0].i_pitch;
> +    p_out = p_outpic->p[0].p_pixels;
> +
> +    while( p_in < p_in_end )
> +    {
> +        p_line_start = p_in;
> +        p_line_end = p_in + p_pic->p[0].i_visible_pitch;
> +        while( p_in < p_line_end )
> +        {
> +            i_b = *p_in++;
> +            i_g = *p_in++;
> +            i_r = *p_in++;
> +            Sepia( &i_r, &i_g, &i_b, i_intensity );
> +            *p_out++ = i_b;
> +            *p_out++ = i_g;
> +            *p_out++ = i_r;
> +            /* for rv32 we take 4 chunks at the time */
> +            if ( rv32 )
> +            {
> +                /* alpha channel stays the same */
> +                *p_out++ = *p_in++;
> +            }
 You cannot suppose the rgb position , you must use the RGB mask.
You can also reuse code from blend.c for that.

> +        }
> +        p_in += p_pic->p[0].i_pitch - p_pic->p[0].i_visible_pitch;
> +        p_out += p_outpic->p[0].i_pitch
> +            - p_outpic->p[0].i_visible_pitch;
> +    }
> +}
> +
> +/*****************************************************************************
> + * YuvSepia: Calculates sepia to YUV values
> + *****************************************************************************
> + * This function calculates sepia values of YUV color space for a given sepia
> + * intensity. It converts YUV color values to theirs RGB equivalents,
> + * calculates sepia values and then converts RGB values to YUV values again.
> + *****************************************************************************/
> +static void YuvSepia( uint8_t* sepia_y1, uint8_t* sepia_y2,
> +                             uint8_t* sepia_u, uint8_t* sepia_v,
> +                             uint8_t y1, uint8_t y2, uint8_t u, uint8_t v,
> +                             int i_intensity )
> +{
> +    uint8_t r1, g1, b1; /* for y1 new value */
> +    uint8_t r2, b2, g2; /* for y2 new value */
> +    uint8_t r3, g3, b3; /* for new values of u and v */
> +    /* fist convert YUV -> RGB */
> +    yuv2rgb( &r1, &g1, &b1, y1, u, v );
> +    yuv2rgb( &r2, &g2, &b2, y2, u, v );
> +    yuv2rgb( &r3, &g3, &b3, ( y1 + y2 ) / 2, u, v );
> +    /* calculates new values for r, g and b components */
> +    Sepia( &r1, &g1, &b1, i_intensity );
> +    Sepia( &r2, &g2, &b2, i_intensity );
> +    Sepia( &r3, &g3, &b3, i_intensity );
> +    /* convert from calculated RGB -> YUV */
> +    *sepia_y1 = ( ( 66 * r1 + 129 * g1 +  25 * b1 + 128 ) >> 8 ) +  16;
> +    *sepia_y2 = ( ( 66 * r2 + 129 * g2 +  25 * b2 + 128 ) >> 8 ) +  16;
> +    *sepia_u = ( ( -38 * r3 -  74 * g3 + 112 * b3 + 128 ) >> 8 ) + 128;
> +    *sepia_v = ( ( 112 * r3 -  94 * g3 -  18 * b3 + 128 ) >> 8 ) + 128;
> +}
> +
> +/*****************************************************************************
> + * Sepia: Calculates sepia of RGB values
> + *****************************************************************************
> + * This function calculates sepia values of RGB color space for a given sepia
> + * intensity. Sepia algorithm is taken from here:
> + * http://groups.google.com/group/comp.lang.java.programmer/browse_thread/
> + *   thread/9d20a72c40b119d0/18f12770ec6d9dd6
> + *****************************************************************************/
> +static void Sepia( uint8_t *p_r, uint8_t *p_g, uint8_t *p_b, int i_intensity )
> +{
> +    int16_t i_sepia_r, i_sepia_g, i_sepia_b, i_round;
> +    int i_sepia_depth = 20;
> +    i_round = ( *p_r + *p_g + *p_b ) / 3;
> +    i_sepia_r = i_round + ( i_sepia_depth * 2 );
> +    if ( i_sepia_r > 255 )
> +    {
> +        i_sepia_r = 255;
> +    }
> +    i_sepia_g = i_round + i_sepia_depth;
> +    if ( i_sepia_g > 255 )
> +    {
> +        i_sepia_g = 255;
> +    }
> +    i_sepia_b = i_round - i_intensity;
> +    if ( i_sepia_b < 0 )
> +    {
> +        i_sepia_b = 0;
> +    }
> +    else if ( i_sepia_b > 255 )
> +    {
> +        i_sepia_b = 255;
> +    }
> +    *p_r = i_sepia_r;
> +    *p_g = i_sepia_g;
> +    *p_b = i_sepia_b;
> +}
 For both YuvSepia and Sepia, using input and outpout pointer may simplify
the filter code. It's worth a try.

> +/*****************************************************************************
> + * yuv2rgb: Converts from YUV to RGB color space
> + *****************************************************************************
> + * This function converts YUV values to RGB values using function defined in:
> + * http://msdn.microsoft.com/en-us/library/ms893078
> + *****************************************************************************/
> +static void yuv2rgb( uint8_t* r, uint8_t* g, uint8_t* b, uint8_t y,
> +                    uint8_t u, uint8_t v )
> +{
> +    int16_t c = y - 16;
> +    int16_t d = u - 128;
> +    int16_t e = v - 128;
> +    int16_t noclipped_r = ( 298 * c + 409 * e + 128 ) >> 8;
> +    if ( noclipped_r < 0 )
> +    {
> +        *r=0;
> +    }
> +    else if ( noclipped_r > 255 )
> +    {
> +        *r = 255;
> +    }
> +    else
> +    {
> +        *r = noclipped_r;
> +    }
> +    int16_t noclipped_g = ( 298 * c - 100 * d - 208 * e + 128 ) >> 8;
> +    if ( noclipped_g < 0 )
> +    {
> +        *g=0;
> +    }
> +    else if ( noclipped_g > 255 )
> +    {
> +        *g = 255;
> +    }
> +    else
> +    {
> +        *g = noclipped_g;
> +    }
> +    int16_t noclipped_b = ( 298 * c + 516 * d + 128 ) >> 8;
> +    if ( noclipped_b < 0 )
> +    {
> +        *b=0;
> +    }
> +    else if ( noclipped_b > 255 )
> +    {
> +        *b = 255;
> +    }
> +    else
> +    {
> +        *b = noclipped_b;
> +    }
> +}
 Inconsitant coding style.
 You can also reuse code from blend.c

Regards,

-- 
fenrir




More information about the vlc-devel mailing list