网上关于filter的使用应该不少,参照下,然后根据ffmpeg官方的文档,应该很容易实现需求,所以这里仅介绍用代码怎么调用filter来达到想要的效果。
下面是ffmpeg中blend这个filter声明的相关代码(基于ffmpeg 2.8最新版本):
1 AVFilter ff_vf_blend = { 2 .name = "blend", 3 .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."), 4 .init = init, 5 .uninit = uninit, 6 .priv_size = sizeof(BlendContext), 7 .query_formats = query_formats, 8 .inputs = blend_inputs, 9 .outputs = blend_outputs, 10 .priv_class = &blend_class, 11 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, 12 };
ffmpeg中所有的filter基本都和上面的结构一样,比较关键的是:
name:你可以根据name来获取此filter
inputs:定义了此filter输入的参数信息,每个输入通道的格式一般都有name(唯一标识符)、type(类型:视频|音频)、filter_frame(对应的输入函数,如果需要输入AVFrame一般都会调用ff_dualinput_filter_frame)
outputs:定义了此filter输出的,根据request_frame指定函数(request_frame)来处理。
所以在使用blend filter,按照下面思路即可:
1.blend需要二个输入,我们可以申请二个buffer filter,然后通过系统的avfilter_link函数来给blend填充数据;
2.blend有一个输出,我们可以申请一个buffersink filter,同理来获取数据;
转为代码:
1 #include <stdio.h> 2 3 #define __STDC_CONSTANT_MACROS 4 5 #ifdef _WIN32 6 #define snprintf _snprintf 7 //Windows 8 extern "C" 9 { 10 #include "libavfilter/avfiltergraph.h" 11 #include "libavfilter/buffersink.h" 12 #include "libavfilter/buffersrc.h" 13 #include "libavutil/avutil.h" 14 #include "libavutil/imgutils.h" 15 #include "libavformat/avformat.h" 16 }; 17 #else 18 //Linux... 19 #ifdef __cplusplus 20 extern "C" 21 { 22 #endif 23 #include <libavfilter/avfiltergraph.h> 24 #include <libavfilter/buffersink.h> 25 #include <libavfilter/buffersrc.h> 26 #include <libavutil/avutil.h> 27 #include <libavutil/imgutils.h> 28 #ifdef __cplusplus 29 }; 30 #endif 31 #endif 32 33 34 static AVFormatContext *pFormatCtx; 35 static AVCodecContext *pCodecCtx; 36 static int video_stream_index = -1; 37 38 AVFilterContext *inputContexts1, *inputContexts2; 39 AVFilterContext *outputContext; 40 AVFilterGraph *graph; 41 42 static int init_filter_graph(AVFilterGraph **pGraph, AVFilterContext **pInput1, AVFilterContext **pInput2, AVFilterContext **pOutput) 43 { 44 AVFilterGraph* tFilterGraph; 45 AVFilterContext* tBufferContext1; 46 AVFilter* tBuffer1; 47 AVFilterContext* tBufferContext2; 48 AVFilter* tBuffer2; 49 AVFilterContext* tOverlayContext; 50 AVFilter* tOverlay; 51 AVFilterContext* tBufferSinkContext; 52 AVFilter* tBufferSink; 53 AVFilter* tFormat; 54 AVFilterContext* tFormatContext; 55 56 int tError; 57 58 /* Create a new filtergraph, which will contain all the filters. */ 59 tFilterGraph = avfilter_graph_alloc(); 60 const char *args = "video_size=512x288:pix_fmt=0:time_base=1/30:pixel_aspect=1/1";//这里的参数根据实际视频参数来设置 61 62 if (!tFilterGraph) { 63 return -1; 64 } 65 66 { // BUFFER FILTER 1 67 tBuffer1 = avfilter_get_by_name("buffer"); 68 if (!tBuffer1) { 69 return -1; 70 } 71 int ret = avfilter_graph_create_filter(&tBufferContext1, tBuffer1, "top", 72 args, NULL, tFilterGraph); 73 if (ret < 0) { 74 printf("Cannot create buffer source\n"); 75 return ret; 76 } 77 } 78 79 { // BUFFER FILTER 2 80 tBuffer2 = avfilter_get_by_name("buffer"); 81 if (!tBuffer2) { 82 return -1; 83 } 84 if (1) { 85 86 int ret = avfilter_graph_create_filter(&tBufferContext2, tBuffer2, "bottom", 87 args, NULL, tFilterGraph); 88 if (ret < 0) { 89 printf("Cannot create buffer source\n"); 90 return ret; 91 } 92 } 93 } 94 95 { // BLEND FILTER 96 tOverlay = avfilter_get_by_name("blend"); 97 if (!tOverlay) { 98 return -1; 99 } 100 tOverlayContext = avfilter_graph_alloc_filter(tFilterGraph, tOverlay, "blend"); 101 if (!tOverlayContext) { 102 return -1; 103 } 104 AVDictionary *tOptionsDict = NULL; 105 av_dict_set(&tOptionsDict, "all_mode", "normal", 0); 106 av_dict_set(&tOptionsDict, "all_opacity", "1", 0); 107 av_dict_set(&tOptionsDict, "all_expr", "A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))", 0);//详情参见ffmpeg官方文档 108 tError = avfilter_init_dict(tOverlayContext, &tOptionsDict); 109 av_dict_free(&tOptionsDict); 110 if (tError < 0) { 111 return tError; 112 } 113 } 114 115 { // FORMAT FILTER 116 tFormat = avfilter_get_by_name("format"); 117 if (!tFormat) { 118 // Could not find the tFormat filter. 119 return -1; 120 } 121 122 tFormatContext = avfilter_graph_alloc_filter(tFilterGraph, tFormat, "format"); 123 if (!tFormatContext) { 124 // Could not allocate the tFormat instance. 125 return -1; 126 } 127 AVDictionary *tOptionsDict = NULL; 128 av_dict_set(&tOptionsDict, "pix_fmts", "yuv420p", 0); 129 tError = avfilter_init_dict(tFormatContext, &tOptionsDict); 130 av_dict_free(&tOptionsDict); 131 if (tError < 0) { 132 // Could not initialize the tFormat filter. 133 return tError; 134 } 135 } 136 137 { // BUFFERSINK FILTER 138 tBufferSink = avfilter_get_by_name("buffersink"); 139 if (!tBufferSink) { 140 return -1; 141 } 142 143 tBufferSinkContext = avfilter_graph_alloc_filter(tFilterGraph, tBufferSink, "sink"); 144 if (!tBufferSinkContext) { 145 return -1; 146 } 147 148 tError = avfilter_init_str(tBufferSinkContext, NULL); 149 if (tError < 0) { 150 return tError; 151 } 152 } 153 154 // Linking graph 155 tError = avfilter_link(tBufferContext1, 0, tOverlayContext, 0); 156 if (tError >= 0) 157 { 158 tError = avfilter_link(tBufferContext2, 0, tOverlayContext, 1); 159 } 160 if (tError >= 0) 161 { 162 tError = avfilter_link(tOverlayContext, 0, tFormatContext, 0); 163 } 164 if (tError >= 0) 165 { 166 tError = avfilter_link(tFormatContext, 0, tBufferSinkContext, 0); 167 } 168 if (tError < 0) { // Error connecting filters. 169 return tError; 170 } 171 172 tError = avfilter_graph_config(tFilterGraph, NULL); 173 if (tError < 0) { 174 return tError; 175 } 176 177 *pGraph = tFilterGraph; 178 *pInput1 = tBufferContext1; 179 *pInput2 = tBufferContext2; 180 *pOutput = tBufferSinkContext; 181 182 return 0; 183 } 184 185 static int open_input_file(const char *filename) 186 { 187 int ret; 188 AVCodec *dec; 189 190 if ((ret = avformat_open_input(&pFormatCtx, filename, NULL, NULL)) < 0) { 191 printf("Cannot open input file\n"); 192 return ret; 193 } 194 195 if ((ret = avformat_find_stream_info(pFormatCtx, NULL)) < 0) { 196 printf("Cannot find stream information\n"); 197 return ret; 198 } 199 200 /* select the video stream */ 201 ret = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0); 202 if (ret < 0) { 203 printf("Cannot find a video stream in the input file\n"); 204 return ret; 205 } 206 video_stream_index = ret; 207 pCodecCtx = pFormatCtx->streams[video_stream_index]->codec; 208 209 /* init the video decoder */ 210 if ((ret = avcodec_open2(pCodecCtx, dec, NULL)) < 0) { 211 printf("Cannot open video decoder\n"); 212 return ret; 213 } 214 215 return 0; 216 } 217 218 219 int main(int argc, char* argv[]) 220 { 221 av_register_all(); 222 avfilter_register_all(); 223 224 if ((open_input_file("cuc_ieschool.flv")) < 0) 225 return 1; 226 227 init_filter_graph(&graph,&inputContexts1,&inputContexts2,&outputContext); 228 229 int ret; 230 AVFrame *frame_in; 231 AVFrame *frame_out; 232 unsigned char *frame_buffer_in; 233 unsigned char *frame_buffer_out; 234 235 //Input YUV 236 FILE *fp_in=fopen("512x288.yuv","rb+"); 237 if(fp_in==NULL){ 238 printf("Error open input file.\n"); 239 return -1; 240 } 241 //Ouput YUV 242 FILE *fp_yuv = fopen("test.yuv", "wb+"); 243 int in_width= 512; 244 int in_height= 288; 245 246 247 frame_in=av_frame_alloc(); 248 frame_buffer_in=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width,in_height,1)); 249 av_image_fill_arrays(frame_in->data, frame_in->linesize,frame_buffer_in, 250 AV_PIX_FMT_YUV420P,in_width, in_height,1); 251 252 frame_in->width=in_width; 253 frame_in->height=in_height; 254 frame_in->format=AV_PIX_FMT_YUV420P; 255 256 AVPacket packet; 257 AVFrame *pFrame = av_frame_alloc(); 258 AVFrame *pFrame_out = av_frame_alloc(); 259 int count = 5;//保存的yuv数 260 int readonce = 10;//便于观察,取yuv文件第10副来混合,如果你想混合2个视频,可自行修改,另外注意,blend需要2个Frame意味着如果你需要混合请确保输入满足此要求 261 if (readonce-- > 0) { 262 //get one buffer from yuv. 263 if (fread(frame_buffer_in, 1, in_width*in_height * 3 / 2, fp_in) != in_width*in_height * 3 / 2) { 264 return 1; 265 } 266 //input Y,U,V 267 frame_in->data[0] = frame_buffer_in; 268 frame_in->data[1] = frame_buffer_in + in_width*in_height; 269 frame_in->data[2] = frame_buffer_in + in_width*in_height * 5 / 4; 270 } 271 272 while (1) { 273 //read one packet. 274 ret = av_read_frame(pFormatCtx, &packet); 275 if (ret< 0) 276 break; 277 if (packet.stream_index == 0) { 278 int got_frame = 0; 279 //decode 280 ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_frame, &packet); 281 if (ret < 0) { 282 printf("Error decoding video\n"); 283 break; 284 } 285 //success 286 if (got_frame) { 287 pFrame->pts = av_frame_get_best_effort_timestamp(pFrame); 288 /* push the decoded frame into the filtergraph */ 289 if (av_buffersrc_add_frame(inputContexts1, pFrame) < 0) { 290 printf("Error while feeding the filtergraph\n"); 291 break; 292 } 293 294 frame_in->pts = pFrame->pts; 295 if (av_buffersrc_add_frame(inputContexts2, frame_in) < 0) { 296 printf("Error while add frame.\n"); 297 break; 298 } 299 /* pull filtered pictures from the filtergraph */ 300 while (1) { 301 ret = av_buffersink_get_frame(outputContext, pFrame_out); 302 if (ret < 0) 303 break; 304 if (pFrame_out->format == AV_PIX_FMT_YUV420P && count-- > 0) { 305 //Y, U, V 306 for (int i = 0; i < pFrame_out->height; i++) { 307 fwrite(pFrame_out->data[0] + pFrame_out->linesize[0] * i, 1, pFrame_out->width, fp_yuv); 308 } 309 for (int i = 0; i < pFrame_out->height / 2; i++) { 310 fwrite(pFrame_out->data[1] + pFrame_out->linesize[1] * i, 1, pFrame_out->width / 2, fp_yuv); 311 } 312 for (int i = 0; i < pFrame_out->height / 2; i++) { 313 fwrite(pFrame_out->data[2] + pFrame_out->linesize[2] * i, 1, pFrame_out->width / 2, fp_yuv); 314 } 315 printf("Process 1 frame!\n"); 316 } 317 av_frame_unref(pFrame_out); 318 } 319 } 320 av_frame_unref(pFrame); 321 } 322 av_free_packet(&packet); 323 } 324 325 fclose(fp_in); 326 fclose(fp_yuv); 327 328 av_frame_free(&pFrame); 329 av_frame_free(&pFrame_out); 330 av_frame_free(&frame_in); 331 avfilter_graph_free(&graph); 332 333 return 0; 334 }
文件的网盘地址:https://pan.baidu.com/s/1pLi6px5
另外一个变种,关于动态文字水印:https://pan.baidu.com/s/1eSJw9we
时间: 2024-10-20 14:31:53