参考链接
- FFmpeg源代码简单分析:常见结构体的初始化和销毁(AVFormatContext,AVFrame等)_雷霄骅的博客-CSDN博客
结构体
- AVFormatContext:统领全局的基本结构体。主要用于处理封装格式(FLV/MKV/RMVB等)
- FFmpeg: AVFormatContext Struct Reference
- AVIOContext:输入输出对应的结构体,用于输入输出(读写文件,RTMP协议等)
- FFmpeg: AVIOContext Struct Reference
- AVStream:视音频流对应的结构体
- FFmpeg: AVStream Struct Reference
- AVCodecContext:音视频编解码结构体
- FFmpeg: AVCodecContext Struct Reference
- AVFrame:存储非压缩的数据(视频对应RGB/YUV像素数据,音频对应PCM采样数据)
- https://ffmpeg.org/doxygen/trunk/structAVFrame.html
- AVPacket:存储压缩数据(视频对应H.264等码流数据,音频对应AAC/MP3等码流数据)
- https://ffmpeg.org/doxygen/trunk/structAVPacket.html
结构体 | 初始化 | 销毁 |
AVFormatContext | avformat_alloc_context | avformat_free_context |
AVIOContext | avio_alloc_context | avio_context_free |
AVStream | avformat_new_stream | avformat_free_context |
AVCodecContext | avcodec_alloc_context3 | avcodec_free_context |
AVPacket | av_init_packet() av_new_packet | av_packet_free |
AVFrame | av_frame_alloc av_image_fill_arrays | av_frame_free |
初始化
- avformat_alloc_context()
- FFmpeg: Core functions
AVFormatContext *avformat_alloc_context(void)
{FFFormatContext *const si = av_mallocz(sizeof(*si));AVFormatContext *s;if (!si)return NULL;s = &si->pub;s->av_class = &av_format_context_class;s->io_open = io_open_default;s->io_close = ff_format_io_close_default;s->io_close2= io_close2_default;av_opt_set_defaults(s);si->pkt = av_packet_alloc();si->parse_pkt = av_packet_alloc();if (!si->pkt || !si->parse_pkt) {avformat_free_context(s);return NULL;}si->shortest_end = AV_NOPTS_VALUE;return s;
}
- 使用av_opt_set_defaults函数给字段设置默认值
void av_opt_set_defaults(void *s)
{av_opt_set_defaults2(s, 0, 0);
}void av_opt_set_defaults2(void *s, int mask, int flags)
{const AVOption *opt = NULL;while ((opt = av_opt_next(s, opt))) {void *dst = ((uint8_t*)s) + opt->offset;if ((opt->flags & mask) != flags)continue;if (opt->flags & AV_OPT_FLAG_READONLY)continue;switch (opt->type) {case AV_OPT_TYPE_CONST:/* Nothing to be done here */break;case AV_OPT_TYPE_BOOL:case AV_OPT_TYPE_FLAGS:case AV_OPT_TYPE_INT:case AV_OPT_TYPE_INT64:case AV_OPT_TYPE_UINT64:case AV_OPT_TYPE_DURATION:
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGScase AV_OPT_TYPE_CHANNEL_LAYOUT:
FF_ENABLE_DEPRECATION_WARNINGS
#endifcase AV_OPT_TYPE_PIXEL_FMT:case AV_OPT_TYPE_SAMPLE_FMT:write_number(s, opt, dst, 1, 1, opt->default_val.i64);break;case AV_OPT_TYPE_DOUBLE:case AV_OPT_TYPE_FLOAT: {double val;val = opt->default_val.dbl;write_number(s, opt, dst, val, 1, 1);}break;case AV_OPT_TYPE_RATIONAL: {AVRational val;val = av_d2q(opt->default_val.dbl, INT_MAX);write_number(s, opt, dst, 1, val.den, val.num);}break;case AV_OPT_TYPE_COLOR:set_string_color(s, opt, opt->default_val.str, dst);break;case AV_OPT_TYPE_STRING:set_string(s, opt, opt->default_val.str, dst);break;case AV_OPT_TYPE_IMAGE_SIZE:set_string_image_size(s, opt, opt->default_val.str, dst);break;case AV_OPT_TYPE_VIDEO_RATE:set_string_video_rate(s, opt, opt->default_val.str, dst);break;case AV_OPT_TYPE_BINARY:set_string_binary(s, opt, opt->default_val.str, dst);break;case AV_OPT_TYPE_CHLAYOUT:set_string_channel_layout(s, opt, opt->default_val.str, dst);break;case AV_OPT_TYPE_DICT:set_string_dict(s, opt, opt->default_val.str, dst);break;default:av_log(s, AV_LOG_DEBUG, "AVOption type %d of option %s not implemented yet\n",opt->type, opt->name);}}
}
- avio_alloc_context()
- FFmpeg: libavformat/avio.h File Reference
- 使用av_malloc为FFIOContest进行内存分配
- 再次调用ffio_init_context完成初始化
AVIOContext *avio_alloc_context(unsigned char *buffer,int buffer_size,int write_flag,void *opaque,int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),int64_t (*seek)(void *opaque, int64_t offset, int whence))
{FFIOContext *s = av_malloc(sizeof(*s));if (!s)return NULL;ffio_init_context(s, buffer, buffer_size, write_flag, opaque,read_packet, write_packet, seek);return &s->pub;
}
- ffio_init_context对FFIOContest中的缓存,函数指针等等进行了赋值
void ffio_init_context(FFIOContext *ctx,unsigned char *buffer,int buffer_size,int write_flag,void *opaque,int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),int64_t (*seek)(void *opaque, int64_t offset, int whence))
{AVIOContext *const s = &ctx->pub;memset(ctx, 0, sizeof(*ctx));s->buffer = buffer;ctx->orig_buffer_size =s->buffer_size = buffer_size;s->buf_ptr = buffer;s->buf_ptr_max = buffer;s->opaque = opaque;s->direct = 0;url_resetbuf(s, write_flag ? AVIO_FLAG_WRITE : AVIO_FLAG_READ);s->write_packet = write_packet;s->read_packet = read_packet;s->seek = seek;s->pos = 0;s->eof_reached = 0;s->error = 0;s->seekable = seek ? AVIO_SEEKABLE_NORMAL : 0;s->min_packet_size = 0;s->max_packet_size = 0;s->update_checksum = NULL;ctx->short_seek_threshold = SHORT_SEEK_THRESHOLD;if (!read_packet && !write_flag) {s->pos = buffer_size;s->buf_end = s->buffer + buffer_size;}s->read_pause = NULL;s->read_seek = NULL;s->write_data_type = NULL;s->ignore_boundary_point = 0;ctx->current_type = AVIO_DATA_MARKER_UNKNOWN;ctx->last_time = AV_NOPTS_VALUE;ctx->short_seek_get = NULL;
#if FF_API_AVIOCONTEXT_WRITTEN
FF_DISABLE_DEPRECATION_WARNINGSs->written = 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
- avformat_new_stream()
- FFmpeg: Core functions
AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
{FFFormatContext *const si = ffformatcontext(s);FFStream *sti;AVStream *st;AVStream **streams;if (s->nb_streams >= s->max_streams) {av_log(s, AV_LOG_ERROR, "Number of streams exceeds max_streams parameter"" (%d), see the documentation if you wish to increase it\n",s->max_streams);return NULL;}streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));if (!streams)return NULL;s->streams = streams;sti = av_mallocz(sizeof(*sti));if (!sti)return NULL;st = &sti->pub;#if FF_API_AVSTREAM_CLASSst->av_class = &stream_class;
#endifst->codecpar = avcodec_parameters_alloc();if (!st->codecpar)goto fail;sti->avctx = avcodec_alloc_context3(NULL);if (!sti->avctx)goto fail;if (s->iformat) {sti->info = av_mallocz(sizeof(*sti->info));if (!sti->info)goto fail;#if FF_API_R_FRAME_RATEsti->info->last_dts = AV_NOPTS_VALUE;
#endifsti->info->fps_first_dts = AV_NOPTS_VALUE;sti->info->fps_last_dts = AV_NOPTS_VALUE;/* default pts setting is MPEG-like */avpriv_set_pts_info(st, 33, 1, 90000);/* we set the current DTS to 0 so that formats without any timestamps* but durations get some timestamps, formats with some unknown* timestamps have their first few packets buffered and the* timestamps corrected before they are returned to the user */sti->cur_dts = RELATIVE_TS_BASE;} else {sti->cur_dts = AV_NOPTS_VALUE;}st->index = s->nb_streams;st->start_time = AV_NOPTS_VALUE;st->duration = AV_NOPTS_VALUE;sti->first_dts = AV_NOPTS_VALUE;sti->probe_packets = s->max_probe_packets;sti->pts_wrap_reference = AV_NOPTS_VALUE;sti->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;sti->last_IP_pts = AV_NOPTS_VALUE;sti->last_dts_for_order_check = AV_NOPTS_VALUE;for (int i = 0; i < MAX_REORDER_DELAY + 1; i++)sti->pts_buffer[i] = AV_NOPTS_VALUE;st->sample_aspect_ratio = (AVRational) { 0, 1 };sti->inject_global_side_data = si->inject_global_side_data;sti->need_context_update = 1;s->streams[s->nb_streams++] = st;return st;
fail:ff_free_stream(&st);return NULL;
}
AVCodecContext *avcodec_alloc_context3(const AVCodec *codec)
{AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext));if (!avctx)return NULL;if (init_context_defaults(avctx, codec) < 0) {av_free(avctx);return NULL;}return avctx;
}
static int init_context_defaults(AVCodecContext *s, const AVCodec *codec)
{const FFCodec *const codec2 = ffcodec(codec);int flags=0;memset(s, 0, sizeof(AVCodecContext));s->av_class = &av_codec_context_class;s->codec_type = codec ? codec->type : AVMEDIA_TYPE_UNKNOWN;if (codec) {s->codec = codec;s->codec_id = codec->id;}if(s->codec_type == AVMEDIA_TYPE_AUDIO)flags= AV_OPT_FLAG_AUDIO_PARAM;else if(s->codec_type == AVMEDIA_TYPE_VIDEO)flags= AV_OPT_FLAG_VIDEO_PARAM;else if(s->codec_type == AVMEDIA_TYPE_SUBTITLE)flags= AV_OPT_FLAG_SUBTITLE_PARAM;av_opt_set_defaults2(s, flags, flags);av_channel_layout_uninit(&s->ch_layout);s->time_base = (AVRational){0,1};s->framerate = (AVRational){ 0, 1 };s->pkt_timebase = (AVRational){ 0, 1 };s->get_buffer2 = avcodec_default_get_buffer2;s->get_format = avcodec_default_get_format;s->get_encode_buffer = avcodec_default_get_encode_buffer;s->execute = avcodec_default_execute;s->execute2 = avcodec_default_execute2;s->sample_aspect_ratio = (AVRational){0,1};s->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;s->pix_fmt = AV_PIX_FMT_NONE;s->sw_pix_fmt = AV_PIX_FMT_NONE;s->sample_fmt = AV_SAMPLE_FMT_NONE;s->reordered_opaque = AV_NOPTS_VALUE;if(codec && codec2->priv_data_size){s->priv_data = av_mallocz(codec2->priv_data_size);if (!s->priv_data)return AVERROR(ENOMEM);if(codec->priv_class){*(const AVClass**)s->priv_data = codec->priv_class;av_opt_set_defaults(s->priv_data);}}if (codec && codec2->defaults) {int ret;const FFCodecDefault *d = codec2->defaults;while (d->key) {ret = av_opt_set(s, d->key, d->value, 0);av_assert0(ret >= 0);d++;}}return 0;
}
- avcodec_alloc_context3()
- FFmpeg: Core functions/structures.
AVCodecContext *avcodec_alloc_context3(const AVCodec *codec)
{AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext));if (!avctx)return NULL;if (init_context_defaults(avctx, codec) < 0) {av_free(avctx);return NULL;}return avctx;
}
- av_init_packet()
#if FF_API_INIT_PACKET
void av_init_packet(AVPacket *pkt)
{pkt->pts = AV_NOPTS_VALUE;pkt->dts = AV_NOPTS_VALUE;pkt->pos = -1;pkt->duration = 0;pkt->flags = 0;pkt->stream_index = 0;pkt->buf = NULL;pkt->side_data = NULL;pkt->side_data_elems = 0;pkt->opaque = NULL;pkt->opaque_ref = NULL;pkt->time_base = av_make_q(0, 1);
}
#endif
- av_new_packet()
- FFmpeg: AVPacket
int av_new_packet(AVPacket *pkt, int size)
{AVBufferRef *buf = NULL;int ret = packet_alloc(&buf, size);if (ret < 0)return ret;get_packet_defaults(pkt);pkt->buf = buf;pkt->data = buf->data;pkt->size = size;return 0;
}
static int packet_alloc(AVBufferRef **buf, int size)
{int ret;if (size < 0 || size >= INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)return AVERROR(EINVAL);ret = av_buffer_realloc(buf, size + AV_INPUT_BUFFER_PADDING_SIZE);if (ret < 0)return ret;memset((*buf)->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);return 0;
}
static void get_packet_defaults(AVPacket *pkt)
{memset(pkt, 0, sizeof(*pkt));pkt->pts = AV_NOPTS_VALUE;pkt->dts = AV_NOPTS_VALUE;pkt->pos = -1;pkt->time_base = av_make_q(0, 1);
}
- packet_alloc()中调用av_buffer_realloc()为AVPacket分配内存。然后调用memset()将分配的内存置0。
- av_frame_alloc()
- FFmpeg: AVFrame
AVFrame *av_frame_alloc(void)
{AVFrame *frame = av_malloc(sizeof(*frame));if (!frame)return NULL;get_frame_defaults(frame);return frame;
}
static void get_frame_defaults(AVFrame *frame)
{memset(frame, 0, sizeof(*frame));frame->pts =frame->pkt_dts = AV_NOPTS_VALUE;frame->best_effort_timestamp = AV_NOPTS_VALUE;frame->pkt_duration = 0;frame->pkt_pos = -1;frame->pkt_size = -1;frame->time_base = (AVRational){ 0, 1 };frame->key_frame = 1;frame->sample_aspect_ratio = (AVRational){ 0, 1 };frame->format = -1; /* unknown */frame->extended_data = frame->data;frame->color_primaries = AVCOL_PRI_UNSPECIFIED;frame->color_trc = AVCOL_TRC_UNSPECIFIED;frame->colorspace = AVCOL_SPC_UNSPECIFIED;frame->color_range = AVCOL_RANGE_UNSPECIFIED;frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;frame->flags = 0;
}
- 从av_frame_alloc()的代码可以看出,该函数并没有为AVFrame的像素数据分配空间。
- 因此AVFrame中的像素数据需要自行分配空间,例如使用av_image_fill_arrays()等函数。
- avpicture_fill() 弃用
- av_image_fill_arrays()
- FFmpeg: Image related
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],const uint8_t *src, enum AVPixelFormat pix_fmt,int width, int height, int align)
{int ret, i;ret = av_image_check_size(width, height, 0, NULL);if (ret < 0)return ret;ret = av_image_fill_linesizes(dst_linesize, pix_fmt, width);if (ret < 0)return ret;for (i = 0; i < 4; i++)dst_linesize[i] = FFALIGN(dst_linesize[i], align);return av_image_fill_pointers(dst_data, pix_fmt, height, (uint8_t *)src, dst_linesize);
}
- av_image_fill_arrays()函数中包含3个函数:av_image_check_size(),av_image_fill_linesizes(),av_image_fill_pointers()。
- av_image_check_size()用于检查输入的宽高参数是否合理,即不能太大或者为负数。
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
{return av_image_check_size2(w, h, INT64_MAX, AV_PIX_FMT_NONE, log_offset, log_ctx);
}
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
{ImgUtils imgutils = {.class = &imgutils_class,.log_offset = log_offset,.log_ctx = log_ctx,};int64_t stride = av_image_get_linesize(pix_fmt, w, 0);if (stride <= 0)stride = 8LL*w;stride += 128*8;if ((int)w<=0 || (int)h<=0 || stride >= INT_MAX || stride*(uint64_t)(h+128) >= INT_MAX) {av_log(&imgutils, AV_LOG_ERROR, "Picture size %ux%u is invalid\n", w, h);return AVERROR(EINVAL);}if (max_pixels < INT64_MAX) {if (w*(int64_t)h > max_pixels) {av_log(&imgutils, AV_LOG_ERROR,"Picture size %ux%u exceeds specified max pixel count %"PRId64", see the documentation if you wish to increase it\n",w, h, max_pixels);return AVERROR(EINVAL);}}return 0;
}
- av_image_fill_linesizes()用于填充dst_linesize。
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
{int i, ret;const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);int max_step [4]; /* max pixel step for each plane */int max_step_comp[4]; /* the component for each plane which has the max pixel step */memset(linesizes, 0, 4*sizeof(linesizes[0]));if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)return AVERROR(EINVAL);av_image_fill_max_pixsteps(max_step, max_step_comp, desc);for (i = 0; i < 4; i++) {if ((ret = image_get_linesize(width, i, max_step[i], max_step_comp[i], desc)) < 0)return ret;linesizes[i] = ret;}return 0;
}
- av_image_fill_pointers()则用于填充dst_data。
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,uint8_t *ptr, const int linesizes[4])
{int i, ret;ptrdiff_t linesizes1[4];size_t sizes[4];memset(data , 0, sizeof(data[0])*4);for (i = 0; i < 4; i++)linesizes1[i] = linesizes[i];ret = av_image_fill_plane_sizes(sizes, pix_fmt, height, linesizes1);if (ret < 0)return ret;ret = 0;for (i = 0; i < 4; i++) {if (sizes[i] > INT_MAX - ret)return AVERROR(EINVAL);ret += sizes[i];}if (!ptr)return ret;data[0] = ptr;for (i = 1; i < 4 && sizes[i]; i++)data[i] = data[i - 1] + sizes[i - 1];return ret;
}
销毁
- avformat_free_context()
- FFmpeg: Core functions
- avformat_free_context调用了各式各样的销毁函数,av_opt_free() av_freep() av_dict_free()等,这些函数用于释放不同种类的变量
void avformat_free_context(AVFormatContext *s)
{FFFormatContext *si;if (!s)return;si = ffformatcontext(s);if (s->oformat && s->oformat->deinit && si->initialized)s->oformat->deinit(s);av_opt_free(s);if (s->iformat && s->iformat->priv_class && s->priv_data)av_opt_free(s->priv_data);if (s->oformat && s->oformat->priv_class && s->priv_data)av_opt_free(s->priv_data);for (unsigned i = 0; i < s->nb_streams; i++)ff_free_stream(&s->streams[i]);s->nb_streams = 0;for (unsigned i = 0; i < s->nb_programs; i++) {av_dict_free(&s->programs[i]->metadata);av_freep(&s->programs[i]->stream_index);av_freep(&s->programs[i]);}s->nb_programs = 0;av_freep(&s->programs);av_freep(&s->priv_data);while (s->nb_chapters--) {av_dict_free(&s->chapters[s->nb_chapters]->metadata);av_freep(&s->chapters[s->nb_chapters]);}av_freep(&s->chapters);av_dict_free(&s->metadata);av_dict_free(&si->id3v2_meta);av_packet_free(&si->pkt);av_packet_free(&si->parse_pkt);av_freep(&s->streams);ff_flush_packet_queue(s);av_freep(&s->url);av_free(s);
}
- 此处查看释放AVStream的函数ff_free_stream()
- 其与AVFormatContest类似,也是调用av_opt_free() av_freep() av_dict_free()等函数用于释放不同种类的变量
void ff_free_stream(AVStream **pst)
{AVStream *st = *pst;FFStream *const sti = ffstream(st);if (!st)return;for (int i = 0; i < st->nb_side_data; i++)av_freep(&st->side_data[i].data);av_freep(&st->side_data);if (st->attached_pic.data)av_packet_unref(&st->attached_pic);av_parser_close(sti->parser);avcodec_free_context(&sti->avctx);av_bsf_free(&sti->bsfc);av_freep(&sti->priv_pts);av_freep(&sti->index_entries);av_freep(&sti->probe_data.buf);av_bsf_free(&sti->extract_extradata.bsf);if (sti->info) {av_freep(&sti->info->duration_error);av_freep(&sti->info);}av_dict_free(&st->metadata);avcodec_parameters_free(&st->codecpar);av_freep(&st->priv_data);av_freep(pst);
}
- 如果使用了parser,会调用av_parser_close()关闭parser
void av_parser_close(AVCodecParserContext *s)
{if (s) {if (s->parser->parser_close)s->parser->parser_close(s);av_freep(&s->priv_data);av_free(s);}
}
- avio_context_free()
- FFmpeg: libavformat/avio.h File Reference
- avcodec_free_context()
- FFmpeg: Core functions/structures.
- av_frame_free()
- FFmpeg: AVFrame
void av_frame_free(AVFrame **frame)
{if (!frame || !*frame)return;av_frame_unref(*frame);av_freep(frame);
}
- 在释放AVFrame结构体之前,首先调用了一个函数av_frame_unref()。av_frame_unref()也是一个FFmpeg的API,它的作用是释放AVFrame中参考的缓存(还没完全弄懂),并且重置AVFrame中的字段。
- 调用这个函数的目的应该是为了确保AVFrame可以被正常释放。
- 代码如下
void av_frame_unref(AVFrame *frame)
{int i;if (!frame)return;wipe_side_data(frame);for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)av_buffer_unref(&frame->buf[i]);for (i = 0; i < frame->nb_extended_buf; i++)av_buffer_unref(&frame->extended_buf[i]);av_freep(&frame->extended_buf);av_dict_free(&frame->metadata);av_buffer_unref(&frame->hw_frames_ctx);av_buffer_unref(&frame->opaque_ref);av_buffer_unref(&frame->private_ref);if (frame->extended_data != frame->data)av_freep(&frame->extended_data);av_channel_layout_uninit(&frame->ch_layout);get_frame_defaults(frame);
}
- av_packet_free()
- 从代码可以看出,av_packet_free()调用av_packet_unref()释放AVPacket中的数据,
- 调用了av_packet_free_side_data()释放了side_data(存储封装格式可以提供的额外的数据)。
void av_packet_free(AVPacket **pkt)
{if (!pkt || !*pkt)return;av_packet_unref(*pkt);av_freep(pkt);
}
void av_packet_unref(AVPacket *pkt)
{av_packet_free_side_data(pkt);av_buffer_unref(&pkt->opaque_ref);av_buffer_unref(&pkt->buf);get_packet_defaults(pkt);
}
请使用手机"扫一扫"x