[vlc-devel] [PATCH] Add VideoToolbox based decoder
Jean-Baptiste Kempf
jb at videolan.org
Thu Jan 22 16:34:36 CET 2015
On 22 Jan, Felix Paul Kühne wrote :
> --- a/NEWS
> +++ b/NEWS
> @@ -17,7 +17,6 @@ Access:
> * Support DVB-T2 on Windows BDA
> * Support depayloading Opus from RTP
>
> -
Useless :)
> Decoder:
> * OMX GPU-zerocopy support for decoding and display on Android using OpenMax IL
> * Support 4:4:4 and 4:2:2 chroma samplings with VDPAU hw acceleration
> @@ -28,6 +27,8 @@ Decoder:
> * Support VP9 and WMV3 decoding using OMX and performance improvements
> * New MPEG-1 & 2 audio layer I, II, III + MPEG 2.5 decoder based on libmpg123
> * New BPG decoder based on libbpg
> + * New hardware accelerated decoder for OS X and and iOS supporting H264, mp4v,
> + mp1v, mp2v, H263 and DV based on VideoToolbox
You might use the mareketing names and not the fourcc :)
> +dnl VideoToolbox sans avcodec
> +dnl
> +AC_ARG_ENABLE(videotoolbox,
> + [ --enable-videotoolbox VideoToolbox support (default auto)])
Is it really auto ?
> +if test "x${enable_videotoolbox}" != "xno" &&
> + (test "${SYS}" = "darwin" || test "${enable_videotoolbox}" = "yes")
> +then
> + VLC_ADD_PLUGIN([videotoolbox])
> +fi
> +libvideotoolbox_plugin_la_SOURCES = video_chroma/copy.c video_chroma/copy.h codec/h264_nal.c codec/h264_nal.h codec/videotoolbox.m
> +libvideotoolbox_plugin_la_CFLAGS = $(AM_CFLAGS) $(AVFORMAT_CFLAGS) -fobjc-arc
> +libvideotoolbox_plugin_la_LDFLAGS = $(AM_LDFLAGS) $(SYMBOLIC_LDFLAGS) -Wl,-framework,Foundation,-framework,VideoToolbox,-framework,CoreMedia
> +libvideotoolbox_plugin_la_LIBADD = $(AVFORMAT_LIBS)
> +# if HAVE_AVCODEC_VIDEOTOOLBOX
AVCODEC?
> +codec_LTLIBRARIES += libvideotoolbox_plugin.la
> +# endif
> diff --git a/modules/codec/videotoolbox.m b/modules/codec/videotoolbox.m
> +#import <avio.h>
Beurk.
> +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1090
> +const CFStringRef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder =
> +CFSTR("EnableHardwareAcceleratedVideoDecoder");
> +const CFStringRef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder = CFSTR("RequireHardwareAcceleratedVideoDecoder");
Why one line vs 2 lines?
> +#if !TARGET_OS_IPHONE
> +#define VT_REQUIRE_HW_DEC N_("Hardware decoding only")
> +#define VT_REQUIRE_HW_DEC_LONGTEXT N_("Use only, if hardware decoding is available.")
> +#endif
I would just use one string "Use only hardware decoders"
> +vlc_module_begin()
> +set_category(CAT_INPUT)
> +set_subcategory(SUBCAT_INPUT_VCODEC)
> +set_description(N_("VideoToolbox video decoder"))
> +set_capability("decoder",800)
> +set_callbacks(OpenDecoder, CloseDecoder)
> +#if !TARGET_OS_IPHONE
> +add_bool("videotoolbox-hw-decoder-only", false, VT_REQUIRE_HW_DEC, VT_REQUIRE_HW_DEC_LONGTEXT, false)
> +#endif
> +vlc_module_end()
> +#pragma mark - module open and close
> +
> +static int OpenDecoder(vlc_object_t *p_this)
> +{
> + decoder_t *p_dec = (decoder_t *)p_this;
> + OSStatus status;
> + CMVideoCodecType codec;
> + size_t i_profile = 0xFFFF, i_level = 0xFFFF;
> +
> + /* check for the codec we can and want to decode */
> + switch (p_dec->fmt_in.i_codec) {
> + case VLC_CODEC_H264:
> + codec = kCMVideoCodecType_H264;
> +
> + h264_get_profile_level(&p_dec->fmt_in, &i_profile, &i_level, NULL);
> +
> + msg_Dbg(p_dec, "trying to decode MPEG-4 Part 10: profile %zu, level %zu", i_profile, i_level);
> +
> + if (deviceSupports10BitH264()) {
> + if (i_profile > 110)
> + return VLC_EGENERIC;
> + } else {
> + if (i_profile > 100)
> + return VLC_EGENERIC;
> + }
identation is off.
No support for 422 from Apple? Disapointing :)
> + /* we don't do decoding beyond 4K */
> + if (i_level > 51)
> + return VLC_EGENERIC;
5.1 is also fullHD. Are you sure?
> + case VLC_CODEC_MP4V:
> + codec = kCMVideoCodecType_MPEG4Video;
> + break;
> + case VLC_CODEC_H263:
> + codec = kCMVideoCodecType_H263;
> + break;
> +
> +#if !TARGET_OS_IPHONE
> + /* there are no mp2v or DV decoders on iOS, so bailout early */
> + case VLC_CODEC_MPGV:
> + codec = kCMVideoCodecType_MPEG1Video; //kCMVideoCodecType_MPEG2Video; // kCMVideoCodecType_MPEG1Video
> + break;
> + case VLC_CODEC_MP2V:
> + codec = kCMVideoCodecType_MPEG2Video;
> + break;
> + case VLC_CODEC_DV:
> + /* the VT decoder can't differenciate between PAL and NTSC, so we need to do it */
> + switch (p_dec->fmt_in.i_original_fourcc) {
> + case VLC_FOURCC( 'd', 'v', 'c', ' '):
> + case VLC_FOURCC( 'd', 'v', ' ', ' '):
> + msg_Dbg(p_dec, "Decoding DV NTSC");
> + codec = kCMVideoCodecType_DVCNTSC;
> + break;
> +
> + case VLC_FOURCC( 'd', 'v', 's', 'd'):
> + case VLC_FOURCC( 'd', 'v', 'c', 'p'):
> + case VLC_FOURCC( 'D', 'V', 'S', 'D'):
> + msg_Dbg(p_dec, "Decoding DV PAL");
> + codec = kCMVideoCodecType_DVCPAL;
> + break;
> +
> + default:
> + break;
> + }
> + if (codec != 0)
> + break;
> +#endif
> +
> + default:
> + msg_Err(p_dec, "'%4.4s' is not supported", (char *)&p_dec->fmt_in.i_codec);
> + return VLC_EGENERIC;
> + }
> +
> + msg_Dbg(p_dec, "will try to decode '%4.4s'", (char *)&p_dec->fmt_in.i_original_fourcc);
Merge this with...
> + decoder_sys_t *p_sys;
> + p_sys = malloc(sizeof(*p_sys));
> + if (!p_sys)
> + return VLC_ENOMEM;
> + p_dec->p_sys = p_sys;
> +
> + /* setup the decoder */
> + CFMutableDictionaryRef decoderConfiguration = CFDictionaryCreateMutable(kCFAllocatorDefault,
> + 2,
> + &kCFTypeDictionaryKeyCallBacks,
> + &kCFTypeDictionaryValueCallBacks);
> + CFDictionarySetValue(decoderConfiguration,
> + CFSTR("CVImageBufferChromaLocationBottomField"),
> + CFSTR("left"));
> + CFDictionarySetValue(decoderConfiguration,
> + CFSTR("CVImageBufferChromaLocationTopField"),
> + CFSTR("left"));
> + CFDictionarySetValue(decoderConfiguration,
> + CFSTR("FullRangeVideo"),
> + kCFBooleanFalse);
> +
> + /* fetch extradata */
> + CFMutableDictionaryRef extradata_info = NULL;
> + CFDataRef extradata = NULL;
> +
> + extradata_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
> + 1,
> + &kCFTypeDictionaryKeyCallBacks,
> + &kCFTypeDictionaryValueCallBacks);
> +
> + if (codec == kCMVideoCodecType_H264) {
> + msg_Dbg(p_dec, "creating avvC for H264 playback");
This debug should be in avvCCreate() if any
> + extradata = avvCCreate(p_dec,
> + (uint8_t*)p_dec->fmt_in.p_extra,
> + p_dec->fmt_in.i_extra);
> + if (extradata)
> + CFDictionarySetValue(extradata_info, CFSTR("avcC"), extradata);
> +
> + CFDictionarySetValue(decoderConfiguration,
> + kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
> + extradata_info);
> + } else if (codec == kCMVideoCodecType_MPEG4Video) {
> + msg_Dbg(p_dec, "creating esds for mp4v playback");
idem
> + extradata = ESDSCreate(p_dec,
> + (uint8_t*)p_dec->fmt_in.p_extra,
> + p_dec->fmt_in.i_extra);
> +
> + if (extradata)
> + CFDictionarySetValue(extradata_info, CFSTR("esds"), extradata);
> +
> + CFDictionarySetValue(decoderConfiguration,
> + kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
> + extradata_info);
> + } else {
> + msg_Dbg(p_dec, "playing something non H264, non mp4v");
Sorry, what?
> + CFDictionarySetValue(decoderConfiguration,
> + kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
> + extradata_info);
> + }
> +
> + if (extradata)
> + CFRelease(extradata);
> + CFRelease(extradata_info);
> +
> + /* pixel aspect ratio */
> + CFMutableDictionaryRef par = CFDictionaryCreateMutable(kCFAllocatorDefault,
> + 2,
> + &kCFTypeDictionaryKeyCallBacks,
> + &kCFTypeDictionaryValueCallBacks);
> + VTDictionarySetInt32(par,
> + CFSTR("HorizontalSpacing"),
> + p_dec->fmt_in.video.i_sar_num > 0 ? p_dec->fmt_in.video.i_sar_num : 1);
> + VTDictionarySetInt32(par,
> + CFSTR("VerticalSpacing"),
> + p_dec->fmt_in.video.i_sar_den > 0 ? p_dec->fmt_in.video.i_sar_den : 1);
> + CFDictionarySetValue(decoderConfiguration,
> + CFSTR("CVPixelAspectRatio"),
> + par);
> + CFRelease(par);
> +
> + CFDictionarySetValue(decoderConfiguration,
> + CFSTR("FullRangeVideo"),
> + kCFBooleanFalse);
> +
> +#if !TARGET_OS_IPHONE
> + /* enable HW accelerated playback, since this is optional on OS X
> + * note that the backend may still fallback on software mode if no
> + * suitable hardware is available */
> + CFDictionarySetValue(decoderConfiguration,
> + kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder,
> + kCFBooleanTrue);
> +
> + /* on OS X, we can force VT to fail if no suitable HW decoder is available,
> + * preventing the aforementioned SW fallback */
> + if (var_InheritInteger(p_dec, "videotoolbox-hw-decoder-only"))
> + CFDictionarySetValue(decoderConfiguration,
> + kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
> + kCFBooleanTrue);
> +#endif
> +
> + /* create video format description */
> + status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
> + codec,
> + p_dec->fmt_in.video.i_width,
> + p_dec->fmt_in.video.i_height,
> + decoderConfiguration,
> + &p_sys->videoFormatDescription);
> + if (status) {
> + CFRelease(decoderConfiguration);
> + msg_Err(p_dec, "video format description creation failed (%i)", status);
> + goto error;
> + }
> +
> + /* destination pixel buffer attributes */
> + CFMutableDictionaryRef dpba = CFDictionaryCreateMutable(kCFAllocatorDefault,
> + 2,
> + &kCFTypeDictionaryKeyCallBacks,
> + &kCFTypeDictionaryValueCallBacks);
> + CFDictionarySetValue(dpba,
> + kCVPixelBufferOpenGLCompatibilityKey,
> + kCFBooleanFalse);
> + VTDictionarySetInt32(dpba,
> + kCVPixelBufferPixelFormatTypeKey,
> + kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange);
> + VTDictionarySetInt32(dpba,
> + kCVPixelBufferWidthKey,
> + p_dec->fmt_in.video.i_width);
> + VTDictionarySetInt32(dpba,
> + kCVPixelBufferHeightKey,
> + p_dec->fmt_in.video.i_height);
> + VTDictionarySetInt32(dpba,
> + kCVPixelBufferBytesPerRowAlignmentKey,
> + p_dec->fmt_in.video.i_width * 2);
> +
> + /* setup storage */
> + p_sys->storageObject = [[VTStorageObject alloc] init];
> + p_sys->storageObject.outputFrames = [[NSMutableArray alloc] init];
> + p_sys->storageObject.presentationTimes = [[NSMutableArray alloc] init];
> +
> + /* setup decoder callback record */
> + VTDecompressionOutputCallbackRecord decoderCallbackRecord;
> + decoderCallbackRecord.decompressionOutputCallback = DecoderCallback;
> + decoderCallbackRecord.decompressionOutputRefCon = p_dec;
> +
> + /* create decompression session */
> + status = VTDecompressionSessionCreate(kCFAllocatorDefault,
> + p_sys->videoFormatDescription,
> + decoderConfiguration,
> + dpba,
> + &decoderCallbackRecord,
> + &p_sys->session);
> +
> + /* release no longer needed storage items */
> + CFRelease(dpba);
> + CFRelease(decoderConfiguration);
> +
> + /* check if the session is valid */
> + if (status) {
> +
> + switch (status) {
> + case -12470:
> + msg_Err(p_dec, "VT is not supported on this hardware");
> + break;
> + case -12471:
> + msg_Err(p_dec, "Video format is not supported by VT");
> + break;
> + case -12903:
> + msg_Err(p_dec, "created session is invalid, could not select and open decoder instance");
> + break;
> + case -12906:
> + msg_Err(p_dec, "could not find decoder");
> + break;
> + case -12910:
> + msg_Err(p_dec, "unsupported data");
> + break;
> + case -12913:
> + msg_Err(p_dec, "VT is not available to sandboxed apps on this OS release");
> + break;
> + case -12917:
> + msg_Err(p_dec, "Insufficient source color data");
> + break;
> + case -12918:
> + msg_Err(p_dec, "Could not create color correction data");
> + break;
> + case -12210:
> + msg_Err(p_dec, "Insufficient authorization to create decoder");
> + break;
> +
> + default:
> + msg_Err(p_dec, "Decompression session creation failed (%i)", status);
> + break;
> + }
> +
> + goto error;
> + }
> +
> + /* return our proper VLC internal state */
> + p_dec->fmt_out.i_cat = VIDEO_ES;
> + p_dec->fmt_out.i_codec = VLC_CODEC_YV12;
Not I420 ?
> + /* FIXME: this might be wrong for anamorph contents */
> + p_dec->fmt_out.video.i_width = p_dec->fmt_in.video.i_width;
> + p_dec->fmt_out.video.i_height = p_dec->fmt_in.video.i_height;
> + p_dec->fmt_out.video.i_sar_den = p_dec->fmt_in.video.i_sar_den > 0 ? p_dec->fmt_in.video.i_sar_den : 1;
> + p_dec->fmt_out.video.i_sar_num = p_dec->fmt_in.video.i_sar_num > 0 ? p_dec->fmt_in.video.i_sar_num : 1;
> + p_dec->b_need_packetized = true;
> +
> + if (!p_dec->fmt_in.video.i_visible_width)
> + p_dec->fmt_in.video.i_visible_width = p_dec->fmt_in.video.i_width;
> + if (!p_dec->fmt_in.video.i_visible_height)
> + p_dec->fmt_in.video.i_visible_height = p_dec->fmt_in.video.i_height;
> +
> + CopyInitCache(&p_sys->image_cache, p_dec->fmt_in.video.i_width);
> +
> + p_dec->pf_decode_video = DecodeBlock;
> +
> + msg_Info(p_dec, "Using VideoToolbox for video decoding");
... that one
> +
> + return VLC_SUCCESS;
> +
> +error:
> + CloseDecoder(p_this);
> +
> + return VLC_EGENERIC;
> +}
> +
> +static void CloseDecoder(vlc_object_t *p_this)
> +{
> + decoder_t *p_dec = (decoder_t *)p_this;
> + decoder_sys_t *p_sys = p_dec->p_sys;
> +
> + if (p_sys->session) {
> + VTDecompressionSessionWaitForAsynchronousFrames(p_sys->session);
> + VTDecompressionSessionInvalidate(p_sys->session);
> + CFRelease(p_sys->session);
> + p_sys->session = NULL;
> + }
> +
> + if (p_sys->videoFormatDescription)
> + CFRelease(p_sys->videoFormatDescription);
> +
> + free(p_sys);
> +}
> +
> +#pragma mark - helpers
> +
> +static BOOL deviceSupports10BitH264()
> +{
> +#if TARGET_IPHONE_SIMULATOR
> + return NO;
> +#endif
> +#if TARGET_OS_IPHONE
> + size_t size;
> + cpu_type_t type;
> +
> + size = sizeof(type);
> + sysctlbyname("hw.cputype", &type, &size, NULL, 0);
> +
> + /* 10bit H264 decoding was introduced with the first 64bit ARM CPU, the A7 */
> + if (type == CPU_TYPE_ARM64)
> + return YES;
> +
> + return NO;
> +#else
> + return NO;
> +#endif
> +}
> +
> +static CFDataRef avvCCreate(decoder_t *p_dec, uint8_t *p_buf, uint32_t i_buf_size)
> +{
> + VLC_UNUSED(p_dec);
> + CFDataRef data;
> +
> + /* each NAL sent to the decoder is preceded by a 4 byte header
> + * we need to change the avcC header to signal headers of 4 bytes, if needed */
> + if (i_buf_size >= 4 && (p_buf[4] & 0x03) != 0x03) {
> + uint8_t *p_fixed_buf;
> + p_fixed_buf = malloc(i_buf_size);
> + if (!p_fixed_buf)
> + return NULL;
> +
> + memcpy(p_fixed_buf, p_buf, i_buf_size);
> + p_fixed_buf[4] |= 0x03;
> +
> + data = CFDataCreate(kCFAllocatorDefault,
> + p_fixed_buf,
> + i_buf_size);
> + } else
> + data = CFDataCreate(kCFAllocatorDefault,
> + p_buf,
> + i_buf_size);
> +
> + return data;
> +}
> +
> +static void write_mp4_description_length_avio(AVIOContext *context, int length)
> +{
> + uint8_t byte;
> +
> + for (int i = 3; i >= 0; i--) {
> + byte = (length >> (i * 7)) & 0x7F;
> + if (i != 0)
> + byte |= 0x80;
> +
> + avio_w8(context, byte);
avio...
> + }
> +}
> +static CFDataRef ESDSCreate(decoder_t *p_dec, uint8_t *p_buf, uint32_t i_buf_size)
> +{
> + int full_size = 3 + 5 +13 + 5 + i_buf_size + 3;
> + int config_size = 13 + 5 + i_buf_size;
> + int padding = 12;
> +
> + AVIOContext *context;
> + int status;
> + uint8_t *rw_extradata;
> +
> + status = avio_open_dyn_buf(&context);
> + if (status != noErr)
> + msg_Err(p_dec, "opening dyn buf failed %i", status);
> +
> + avio_w8(context, 0); // Version
> + avio_wb24(context, 0); // Flags
> +
> + // elementary stream description tag
> + avio_w8(context, 0x03); // ES description tag
> + write_mp4_description_length_avio(context, full_size);
> + avio_wb16(context, 0); // esid
> + avio_w8(context, 0); // stream priority (0-3)
> +
> + // decoder configuration description tag
> + avio_w8(context, 0x04);
> + write_mp4_description_length_avio(context, config_size);
> + avio_w8(context, 32); // object type identification (32 = MPEG4)
> + avio_w8(context, 0x11); // stream type
> + avio_wb24(context, 0); // buffer size
> + avio_wb32(context, 0); // max bitrate
> + avio_wb32(context, 0); // avg bitrate
> +
> + // decoder specific description tag
> + avio_w8(context, 0x05); // dec specific info tag
> + write_mp4_description_length_avio(context, i_buf_size);
> + avio_write(context, p_buf, i_buf_size);
> +
> + // sync layer configuration description tag
> + avio_w8(context, 0x06); // tag
> + avio_w8(context, 0x01); // length
> + avio_w8(context, 0x02); // no SL
> +
> + rw_extradata = malloc(full_size + padding);
> + avio_close_dyn_buf(context, &rw_extradata);
> +
> + CFDataRef data = CFDataCreate(kCFAllocatorDefault,
> + rw_extradata,
> + full_size + padding);
> +
> + return data;
> +}
Please remove avio :)
> +static CMSampleBufferRef VTSampleBufferCreate(decoder_t *p_dec,
> + CMFormatDescriptionRef fmt_desc,
> + void *buffer,
> + int size,
> + mtime_t i_pts,
> + mtime_t i_dts,
> + mtime_t i_length)
> +{
> + OSStatus status;
> + CMBlockBufferRef block_buf = NULL;
> + CMSampleBufferRef sample_buf = NULL;
> +
> + CMSampleTimingInfo timeInfo;
> + CMSampleTimingInfo timeInfoArray[1];
> +
> + timeInfo.duration = CMTimeMake(i_length, 1);
> + timeInfo.presentationTimeStamp = CMTimeMake(i_pts > 0 ? i_pts : i_dts, CLOCK_FREQ);
> + timeInfo.decodeTimeStamp = CMTimeMake(i_dts, CLOCK_FREQ);
> + timeInfoArray[0] = timeInfo;
> +
> + status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
> + buffer, // memoryBlock
> + size, // blockLength
> + kCFAllocatorNull, // blockAllocator
> + NULL, // customBlockSource
> + 0, // offsetToData
> + size, // dataLength
> + false, // flags
> + &block_buf);
> +
> + if (!status) {
> + status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
> + block_buf, // dataBuffer
> + TRUE, // dataReady
> + 0, // makeDataReadyCallback
> + 0, // makeDataReadyRefcon
> + fmt_desc, // formatDescription
> + 1, // numSamples
> + 1, // numSampleTimingEntries
> + timeInfoArray, // sampleTimingArray
> + 0, // numSampleSizeEntries
> + NULL, // sampleSizeArray
> + &sample_buf);
> + if (status != noErr)
> + msg_Warn(p_dec, "sample buffer creation failure %i", status);
> + } else
> + msg_Warn(p_dec, "cm block buffer creation failure %i", status);
> +
> + if (block_buf)
> + CFRelease(block_buf);
> +
> + return sample_buf;
> +}
> +
> +void VTDictionarySetInt32(CFMutableDictionaryRef dict, CFStringRef key, int value)
> +{
> + CFNumberRef number;
> + number = CFNumberCreate(NULL, kCFNumberSInt32Type, &value);
> + CFDictionarySetValue(dict, key, number);
> + CFRelease(number);
> +}
> +
> +static void copy420YpCbCr8Planar(picture_t *p_pic,
> + CVPixelBufferRef buffer,
> + unsigned i_width,
> + unsigned i_height,
> + copy_cache_t *cache )
> +{
> + uint8_t *pp_plane[2];
> + size_t pi_pitch[2];
> +
> + if (!buffer)
> + return;
> +
> + CVPixelBufferLockBaseAddress(buffer, 0);
> +
> + for (int i = 0; i < 2; i++) {
> + pp_plane[i] = CVPixelBufferGetBaseAddressOfPlane(buffer, i);
> + pi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(buffer, i);
> + }
> +
> + CopyFromNv12(p_pic, pp_plane, pi_pitch,
> + i_width, i_height, cache);
> +
> + CVPixelBufferUnlockBaseAddress(buffer, 0);
> +}
memcpy is murder :)
> +#pragma mark - actual decoding
> +
> +static picture_t *DecodeBlock(decoder_t *p_dec, block_t **pp_block)
> +{
> + decoder_sys_t *p_sys = p_dec->p_sys;
> + block_t *p_block;
> + VTDecodeFrameFlags decoderFlags = 0;
> + VTDecodeInfoFlags flagOut;
> + OSStatus status;
> +
> + if (!pp_block)
> + return NULL;
> +
> + if (!p_sys->session)
> + return NULL;
> +
> + p_block = *pp_block;
> +
> + if (likely(p_block)) {
> + if (unlikely(p_block->i_flags&(BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED))) { // p_block->i_dts < VLC_TS_INVALID ||
> + block_Release(p_block);
> + goto skip;
> + }
> +
> + /* feed to vt */
> + if (likely(p_block->i_buffer)) {
> + CMSampleBufferRef sampleBuffer;
> + sampleBuffer = VTSampleBufferCreate(p_dec,
> + p_sys->videoFormatDescription,
> + p_block->p_buffer,
> + p_block->i_buffer,
> + p_block->i_pts,
> + p_block->i_dts,
> + p_block->i_length);
> + if (sampleBuffer) {
> + decoderFlags = kVTDecodeFrame_EnableAsynchronousDecompression;
> +
> + status = VTDecompressionSessionDecodeFrame(p_sys->session,
> + sampleBuffer,
> + decoderFlags,
> + NULL, // sourceFrameRefCon
> + &flagOut); // infoFlagsOut
> + if (status != noErr) {
> + if (status == kCVReturnInvalidSize)
> + msg_Err(p_dec, "decoder failure: invalid block size");
> + else if (status == -6661)
> + msg_Err(p_dec, "decoder failure: invalid argument");
> + else if (status == -8969 || status == -12909)
> + msg_Err(p_dec, "decoder failure: bad data");
> + else if (status == -12911 || status == -8960)
> + msg_Err(p_dec, "decoder failure: internal malfunction");
> + else
> + msg_Info(p_dec, "decoding frame failed (%i)", status);
> + }
> +
> + CFRelease(sampleBuffer);
> + }
> + }
> +
> + block_Release(p_block);
> + }
> +
> +skip:
> +
> + *pp_block = NULL;
> +
> + if ([p_sys->storageObject.outputFrames count] && [p_sys->storageObject.presentationTimes count]) {
> + CVPixelBufferRef imageBuffer = NULL;
> + NSNumber *framePTS = nil;
> + id imageBufferObject = nil;
> + picture_t *p_pic = NULL;
> +
> + @synchronized(p_sys->storageObject) {
> + framePTS = [p_sys->storageObject.presentationTimes firstObject];
> + imageBufferObject = [p_sys->storageObject.outputFrames firstObject];
> + imageBuffer = (__bridge CVPixelBufferRef)imageBufferObject;
> +
> + if (imageBuffer != NULL) {
> + if (CVPixelBufferGetDataSize(imageBuffer) > 0) {
> + p_pic = decoder_NewPicture(p_dec);
> +
> + if (!p_pic)
> + return NULL;
> +
> +#ifdef CAN_COMPILE_SSE2
> + if (p_sys->image_cache.buffer)
> +#endif
> + copy420YpCbCr8Planar(p_pic,
> + imageBuffer,
> + CVPixelBufferGetWidthOfPlane(imageBuffer, 0),
> + CVPixelBufferGetHeightOfPlane(imageBuffer, 0),
> + &p_sys->image_cache);
> +
> + p_pic->date = framePTS.longLongValue;
> +
> + if (imageBufferObject)
> + [p_sys->storageObject.outputFrames removeObjectAtIndex:0];
> +
> + if (framePTS)
> + [p_sys->storageObject.presentationTimes removeObjectAtIndex:0];
> + }
> + }
> + }
> + return p_pic;
> + }
> +
> + return NULL;
> +}
> +
> +static void DecoderCallback(void *decompressionOutputRefCon,
> + void *sourceFrameRefCon,
> + OSStatus status,
> + VTDecodeInfoFlags infoFlags,
> + CVPixelBufferRef imageBuffer,
> + CMTime pts,
> + CMTime duration)
> +{
> + VLC_UNUSED(sourceFrameRefCon);
> + VLC_UNUSED(duration);
> + decoder_t *p_dec = (decoder_t *)decompressionOutputRefCon;
> + decoder_sys_t *p_sys = p_dec->p_sys;
> +
> +#ifndef NDEBUG
> + static BOOL outputdone = NO;
> + if (!outputdone) {
> + CFDictionaryRef attachments = CVBufferGetAttachments(imageBuffer,
> + kCVAttachmentMode_ShouldPropagate);
> + NSLog(@"%@", attachments);
> + outputdone = YES;
> + }
> +#endif
> +
> + if (status != noErr) {
> + msg_Warn(p_dec, "decoding of a frame failed (%i, %u)", status, (unsigned int) infoFlags);
> + return;
> + }
> +
> + if (imageBuffer == NULL)
> + return;
> +
> + if (infoFlags & kVTDecodeInfo_FrameDropped) {
> + msg_Dbg(p_dec, "decoder dropped frame");
> + CFRelease(imageBuffer);
> + return;
> + }
> +
> + NSNumber *framePTS = nil;
> +
> + if (CMTIME_IS_VALID(pts))
> + framePTS = [NSNumber numberWithLongLong:pts.value + 1000];
> + else {
> + msg_Dbg(p_dec, "invalid timestamp, dropping frame");
> + CFRelease(imageBuffer);
> + return;
> + }
> +
> + if (framePTS) {
> + @synchronized(p_sys->storageObject) {
> + id imageBufferObject = (__bridge id)imageBuffer;
> + BOOL shouldStop = YES;
> + NSInteger insertionIndex = [p_sys->storageObject.presentationTimes count] - 1;
> + while (insertionIndex >= 0 && shouldStop == NO) {
> + NSNumber *aNumber = p_sys->storageObject.presentationTimes[insertionIndex];
> + if ([aNumber longLongValue] <= [framePTS longLongValue]) {
> + shouldStop = YES;
> + break;
> + }
> + insertionIndex--;
> + }
> + if (insertionIndex + 1 == [p_sys->storageObject.presentationTimes count]) {
> + [p_sys->storageObject.presentationTimes addObject:framePTS];
> + [p_sys->storageObject.outputFrames addObject:imageBufferObject];
> + } else {
> + [p_sys->storageObject.presentationTimes insertObject:framePTS atIndex:insertionIndex + 1];
> + [p_sys->storageObject.outputFrames insertObject:framePTS atIndex:insertionIndex + 1];
> + }
This if deserves a comment.
> + }
> + }
> +}
> diff --git a/po/POTFILES.in b/po/POTFILES.in
> index 008df4a..43ef9eb 100644
> --- a/po/POTFILES.in
> +++ b/po/POTFILES.in
> @@ -413,6 +413,7 @@ modules/codec/telx.c
> modules/codec/theora.c
> modules/codec/twolame.c
> modules/codec/uleaddvaudio.c
> +modules/codec/videotoolbox.m
> modules/codec/vorbis.c
> modules/codec/vpx.c
> modules/codec/wmafixed/wma.c
With my kindest regards,
--
Jean-Baptiste Kempf
http://www.jbkempf.com/ - +33 672 704 734
Sent from my Electronic Device
More information about the vlc-devel
mailing list