YUV420数据和字符信息如何利用滤镜方法进行编码?
我希望用ffmpeg中的filter方法,把YUV420数据和字符信息一起编码,该怎么办呢?
本人目前只实现了把yuv420的数据进行h.264的编码了。
本人成功实现利用ffmpeg中的滤镜 filter功能,在码流中进行字符叠加,太不容易了!
本人使用的是ffmpeg 1.0.7的代码
部分代码如下:
//设定输出格式列表,支持PIX_FMT_YUV420P,PIX_FMT_GRAY8,PIX_FMT_RGB24
//enum FFmpegPixFmt::PixelFormat pix_fmts[] = {PIX_FMT_GRAY8, PIX_FMT_NONE };
//初始化滤镜所用的变量
static int init_filters(const char *filters_descr)
{
char args[512];
int ret;
AVFilter *buffersrc = avfilter_get_by_name("buffer");
AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum PixelFormat pix_fmts[] = {PIX_FMT_YUV420P, PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params;
filter_graph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
_snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",args, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
return ret;
}
/* buffer video sink: to terminate the filter chain. */
buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx,buffersink,"out",NULL,buffersink_params, filter_graph);
av_free(buffersink_params);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return ret;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse(filter_graph, filters_descr,&inputs, &outputs, NULL)) < 0)
return ret;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
return ret;
return 0;
}
char *outfilename = "D:\\out.yuv";
FILE *fp = fopen(outfilename, "a+b"); //所有视频帧都存在同一个文件
//fp = fopen(outfilename, "wb");//每次存一个文件
//滤镜描述字符串
const CHAR *filter_descr="drawtext=fontfile=simfont.ttf:fontcolor=white:shadowcolor=black:text=‘测试视频‘:x=10:y=10";
int ret=0;
AVPacket packet;
AVFrame m_frame;
int got_frame;
avcodec_register_all();
av_register_all();
avfilter_register_all();
if ((ret = open_input_file("input.h264")) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)
goto end;
/* read all packets */
while (1)
{
AVFilterBufferRef *picref;
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break;
if (packet.stream_index != video_stream_index)
continue;
avcodec_get_frame_defaults(&m_frame);
got_frame = 0;
ret = avcodec_decode_video2(dec_ctx, &m_frame, &got_frame, &packet);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
break;
}
if (got_frame)
{
m_frame.pts = av_frame_get_best_effort_timestamp(&m_frame);
if (av_buffersrc_add_frame(buffersrc_ctx, &m_frame, 0) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
while (1)
{
ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
if (picref)
{
fwrite(picref->data[0], 1, picref->buf->w * picref->buf->h * 3 / 2, fp);
fflush(fp);// fclose(fp);
//display_picref(picref, buffersink_ctx->inputs[0]->time_base);
avfilter_unref_bufferp(&picref);
}
}
}
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);
if (dec_ctx)
avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
if (ret < 0 && ret != AVERROR_EOF)
{
char buf[1024];
av_strerror(ret, buf, sizeof(buf));
fprintf(stderr, "Error occurred: %s\n", buf);
exit(1);
}
MessageBox("解码完成!");
http://bbs.csdn.net/topics/390498269