tonylight2016

trancoding

Dec 1st, 2020
1,214
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. /*
  2.  * Copyright (c) 2010 Nicolas George
  3.  * Copyright (c) 2011 Stefano Sabatini
  4.  * Copyright (c) 2014 Andrey Utkin
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a copy
  7.  * of this software and associated documentation files (the "Software"), to deal
  8.  * in the Software without restriction, including without limitation the rights
  9.  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10.  * copies of the Software, and to permit persons to whom the Software is
  11.  * furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21.  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22.  * THE SOFTWARE.
  23.  */
  24.  
  25. /**
  26.  * @file
  27.  * API example for demuxing, decoding, filtering, encoding and muxing
  28.  * @example transcoding.c
  29.  */
  30.  
  31. #include <libavcodec/avcodec.h>
  32. #include <libavformat/avformat.h>
  33. #include <libavfilter/buffersink.h>
  34. #include <libavfilter/buffersrc.h>
  35. #include <libavutil/opt.h>
  36. #include <libavutil/pixdesc.h>
  37.  
  38. static AVFormatContext *ifmt_ctx;
  39. static AVFormatContext *ofmt_ctx;
  40. typedef struct FilteringContext {
  41.     AVFilterContext *buffersink_ctx;
  42.     AVFilterContext *buffersrc_ctx;
  43.     AVFilterGraph *filter_graph;
  44.  
  45.     AVFrame *filtered_frame;
  46. } FilteringContext;
  47. static FilteringContext *filter_ctx;
  48.  
  49. typedef struct StreamContext {
  50.     AVCodecContext *dec_ctx;
  51.     AVCodecContext *enc_ctx;
  52.  
  53.     AVFrame *dec_frame;
  54. } StreamContext;
  55. static StreamContext *stream_ctx;
  56.  
  57. static int open_input_file(const char *filename)
  58. {
  59.     int ret;
  60.     unsigned int i;
  61.  
  62.     ifmt_ctx = NULL;
  63.     if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
  64.         av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
  65.         return ret;
  66.     }
  67.  
  68.     if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
  69.         av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
  70.         return ret;
  71.     }
  72.  
  73.     stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
  74.     if (!stream_ctx)
  75.         return AVERROR(ENOMEM);
  76.  
  77.     for (i = 0; i < ifmt_ctx->nb_streams; i++) {
  78.         AVStream *stream = ifmt_ctx->streams[i];
  79.         AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
  80.         AVCodecContext *codec_ctx;
  81.         if (!dec) {
  82.             av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
  83.             return AVERROR_DECODER_NOT_FOUND;
  84.         }
  85.         codec_ctx = avcodec_alloc_context3(dec);
  86.         if (!codec_ctx) {
  87.             av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
  88.             return AVERROR(ENOMEM);
  89.         }
  90.         ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
  91.         if (ret < 0) {
  92.             av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
  93.                    "for stream #%u\n", i);
  94.             return ret;
  95.         }
  96.         /* Reencode video & audio and remux subtitles etc. */
  97.         if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
  98.                 || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  99.             if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
  100.                 codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
  101.             /* Open decoder */
  102.             ret = avcodec_open2(codec_ctx, dec, NULL);
  103.             if (ret < 0) {
  104.                 av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
  105.                 return ret;
  106.             }
  107.         }
  108.         stream_ctx[i].dec_ctx = codec_ctx;
  109.  
  110.         stream_ctx[i].dec_frame = av_frame_alloc();
  111.         if (!stream_ctx[i].dec_frame)
  112.             return AVERROR(ENOMEM);
  113.     }
  114.  
  115.     av_dump_format(ifmt_ctx, 0, filename, 0);
  116.     return 0;
  117. }
  118.  
  119. static int open_output_file(const char *filename)
  120. {
  121.     AVStream *out_stream;
  122.     AVStream *in_stream;
  123.     AVCodecContext *dec_ctx, *enc_ctx;
  124.     AVCodec *encoder;
  125.     int ret;
  126.     unsigned int i;
  127.  
  128.     ofmt_ctx = NULL;
  129.     avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
  130.     if (!ofmt_ctx) {
  131.         av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
  132.         return AVERROR_UNKNOWN;
  133.     }
  134.  
  135.  
  136.     for (i = 0; i < ifmt_ctx->nb_streams; i++) {
  137.         out_stream = avformat_new_stream(ofmt_ctx, NULL);
  138.         if (!out_stream) {
  139.             av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
  140.             return AVERROR_UNKNOWN;
  141.         }
  142.  
  143.         in_stream = ifmt_ctx->streams[i];
  144.         dec_ctx = stream_ctx[i].dec_ctx;
  145.  
  146.         if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
  147.                 || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  148.             /* in this example, we choose transcoding to same codec */
  149.             encoder = avcodec_find_encoder(dec_ctx->codec_id);
  150.             if (!encoder) {
  151.                 av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
  152.                 return AVERROR_INVALIDDATA;
  153.             }
  154.             enc_ctx = avcodec_alloc_context3(encoder);
  155.             if (!enc_ctx) {
  156.                 av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
  157.                 return AVERROR(ENOMEM);
  158.             }
  159.  
  160.             /* In this example, we transcode to same properties (picture size,
  161.              * sample rate etc.). These properties can be changed for output
  162.              * streams easily using filters */
  163.             if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
  164.                 enc_ctx->height = dec_ctx->height;
  165.                 enc_ctx->width = dec_ctx->width;
  166.                 enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
  167.                 /* take first format from list of supported formats */
  168.                 if (encoder->pix_fmts)
  169.                     enc_ctx->pix_fmt = encoder->pix_fmts[0];
  170.                 else
  171.                     enc_ctx->pix_fmt = dec_ctx->pix_fmt;
  172.                 /* video time_base can be set to whatever is handy and supported by encoder */
  173.                 enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
  174.             } else {
  175.                 enc_ctx->sample_rate = dec_ctx->sample_rate;
  176.                 enc_ctx->channel_layout = dec_ctx->channel_layout;
  177.                 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
  178.                 /* take first format from list of supported formats */
  179.                 enc_ctx->sample_fmt = encoder->sample_fmts[0];
  180.                 enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
  181.             }
  182.  
  183.             if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
  184.                 enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
  185.  
  186.             /* Third parameter can be used to pass settings to encoder */
  187.             ret = avcodec_open2(enc_ctx, encoder, NULL);
  188.             if (ret < 0) {
  189.                 av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
  190.                 return ret;
  191.             }
  192.             ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
  193.             if (ret < 0) {
  194.                 av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
  195.                 return ret;
  196.             }
  197.  
  198.             out_stream->time_base = enc_ctx->time_base;
  199.             stream_ctx[i].enc_ctx = enc_ctx;
  200.         } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
  201.             av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
  202.             return AVERROR_INVALIDDATA;
  203.         } else {
  204.             /* if this stream must be remuxed */
  205.             ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
  206.             if (ret < 0) {
  207.                 av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
  208.                 return ret;
  209.             }
  210.             out_stream->time_base = in_stream->time_base;
  211.         }
  212.  
  213.     }
  214.     av_dump_format(ofmt_ctx, 0, filename, 1);
  215.  
  216.     if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
  217.         ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
  218.         if (ret < 0) {
  219.             av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
  220.             return ret;
  221.         }
  222.     }
  223.  
  224.     /* init muxer, write output file header */
  225.     ret = avformat_write_header(ofmt_ctx, NULL);
  226.     if (ret < 0) {
  227.         av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
  228.         return ret;
  229.     }
  230.  
  231.     return 0;
  232. }
  233.  
  234. static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
  235.         AVCodecContext *enc_ctx, const char *filter_spec)
  236. {
  237.     char args[512];
  238.     int ret = 0;
  239.     const AVFilter *buffersrc = NULL;
  240.     const AVFilter *buffersink = NULL;
  241.     AVFilterContext *buffersrc_ctx = NULL;
  242.     AVFilterContext *buffersink_ctx = NULL;
  243.     AVFilterInOut *outputs = avfilter_inout_alloc();
  244.     AVFilterInOut *inputs  = avfilter_inout_alloc();
  245.     AVFilterGraph *filter_graph = avfilter_graph_alloc();
  246.  
  247.     if (!outputs || !inputs || !filter_graph) {
  248.         ret = AVERROR(ENOMEM);
  249.         goto end;
  250.     }
  251.  
  252.     if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
  253.         buffersrc = avfilter_get_by_name("buffer");
  254.         buffersink = avfilter_get_by_name("buffersink");
  255.         if (!buffersrc || !buffersink) {
  256.             av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
  257.             ret = AVERROR_UNKNOWN;
  258.             goto end;
  259.         }
  260.  
  261.         snprintf(args, sizeof(args),
  262.                 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
  263.                 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
  264.                 dec_ctx->time_base.num, dec_ctx->time_base.den,
  265.                 dec_ctx->sample_aspect_ratio.num,
  266.                 dec_ctx->sample_aspect_ratio.den);
  267.  
  268.         ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
  269.                 args, NULL, filter_graph);
  270.         if (ret < 0) {
  271.             av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
  272.             goto end;
  273.         }
  274.  
  275.         ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
  276.                 NULL, NULL, filter_graph);
  277.         if (ret < 0) {
  278.             av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
  279.             goto end;
  280.         }
  281.  
  282.         ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
  283.                 (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
  284.                 AV_OPT_SEARCH_CHILDREN);
  285.         if (ret < 0) {
  286.             av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
  287.             goto end;
  288.         }
  289.     } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  290.         buffersrc = avfilter_get_by_name("abuffer");
  291.         buffersink = avfilter_get_by_name("abuffersink");
  292.         if (!buffersrc || !buffersink) {
  293.             av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
  294.             ret = AVERROR_UNKNOWN;
  295.             goto end;
  296.         }
  297.  
  298.         if (!dec_ctx->channel_layout)
  299.             dec_ctx->channel_layout =
  300.                 av_get_default_channel_layout(dec_ctx->channels);
  301.         snprintf(args, sizeof(args),
  302.                 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
  303.                 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
  304.                 av_get_sample_fmt_name(dec_ctx->sample_fmt),
  305.                 dec_ctx->channel_layout);
  306.         ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
  307.                 args, NULL, filter_graph);
  308.         if (ret < 0) {
  309.             av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
  310.             goto end;
  311.         }
  312.  
  313.         ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
  314.                 NULL, NULL, filter_graph);
  315.         if (ret < 0) {
  316.             av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
  317.             goto end;
  318.         }
  319.  
  320.         ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
  321.                 (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
  322.                 AV_OPT_SEARCH_CHILDREN);
  323.         if (ret < 0) {
  324.             av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
  325.             goto end;
  326.         }
  327.  
  328.         ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
  329.                 (uint8_t*)&enc_ctx->channel_layout,
  330.                 sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
  331.         if (ret < 0) {
  332.             av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
  333.             goto end;
  334.         }
  335.  
  336.         ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
  337.                 (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
  338.                 AV_OPT_SEARCH_CHILDREN);
  339.         if (ret < 0) {
  340.             av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
  341.             goto end;
  342.         }
  343.     } else {
  344.         ret = AVERROR_UNKNOWN;
  345.         goto end;
  346.     }
  347.  
  348.     /* Endpoints for the filter graph. */
  349.     outputs->name       = av_strdup("in");
  350.     outputs->filter_ctx = buffersrc_ctx;
  351.     outputs->pad_idx    = 0;
  352.     outputs->next       = NULL;
  353.  
  354.     inputs->name       = av_strdup("out");
  355.     inputs->filter_ctx = buffersink_ctx;
  356.     inputs->pad_idx    = 0;
  357.     inputs->next       = NULL;
  358.  
  359.     if (!outputs->name || !inputs->name) {
  360.         ret = AVERROR(ENOMEM);
  361.         goto end;
  362.     }
  363.  
  364.     if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
  365.                     &inputs, &outputs, NULL)) < 0)
  366.         goto end;
  367.  
  368.     if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
  369.         goto end;
  370.  
  371.     /* Fill FilteringContext */
  372.     fctx->buffersrc_ctx = buffersrc_ctx;
  373.     fctx->buffersink_ctx = buffersink_ctx;
  374.     fctx->filter_graph = filter_graph;
  375.  
  376. end:
  377.     avfilter_inout_free(&inputs);
  378.     avfilter_inout_free(&outputs);
  379.  
  380.     return ret;
  381. }
  382.  
  383. static int init_filters(void)
  384. {
  385.     const char *filter_spec;
  386.     unsigned int i;
  387.     int ret;
  388.     filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
  389.     if (!filter_ctx)
  390.         return AVERROR(ENOMEM);
  391.  
  392.     for (i = 0; i < ifmt_ctx->nb_streams; i++) {
  393.         filter_ctx[i].buffersrc_ctx  = NULL;
  394.         filter_ctx[i].buffersink_ctx = NULL;
  395.         filter_ctx[i].filter_graph   = NULL;
  396.         if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
  397.                 || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
  398.             continue;
  399.  
  400.  
  401.         if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  402.             filter_spec = "null"; /* passthrough (dummy) filter for video */
  403.         else
  404.             filter_spec = "anull"; /* passthrough (dummy) filter for audio */
  405.         ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
  406.                 stream_ctx[i].enc_ctx, filter_spec);
  407.         if (ret)
  408.             return ret;
  409.  
  410.         filter_ctx[i].filtered_frame = av_frame_alloc();
  411.         if (!filter_ctx[i].filtered_frame)
  412.             return AVERROR(ENOMEM);
  413.     }
  414.     return 0;
  415. }
  416.  
  417. static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index)
  418. {
  419.     StreamContext *stream = &stream_ctx[stream_index];
  420.     int ret;
  421.     AVPacket enc_pkt;
  422.  
  423.     av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
  424.     /* encode filtered frame */
  425.     enc_pkt.data = NULL;
  426.     enc_pkt.size = 0;
  427.     av_init_packet(&enc_pkt);
  428.  
  429.     ret = avcodec_send_frame(stream->enc_ctx, filt_frame);
  430.  
  431.     if (ret < 0)
  432.         return ret;
  433.  
  434.     while (ret >= 0) {
  435.         ret = avcodec_receive_packet(stream->enc_ctx, &enc_pkt);
  436.  
  437.         if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
  438.             return 0;
  439.  
  440.         /* prepare packet for muxing */
  441.         enc_pkt.stream_index = stream_index;
  442.         av_packet_rescale_ts(&enc_pkt,
  443.                              stream->enc_ctx->time_base,
  444.                              ofmt_ctx->streams[stream_index]->time_base);
  445.  
  446.         av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
  447.         /* mux encoded frame */
  448.         ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
  449.     }
  450.  
  451.     return ret;
  452. }
  453.  
  454. static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
  455. {
  456.     FilteringContext *filter = &filter_ctx[stream_index];
  457.     int ret;
  458.  
  459.     av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
  460.     /* push the decoded frame into the filtergraph */
  461.     ret = av_buffersrc_add_frame_flags(filter->buffersrc_ctx,
  462.             frame, 0);
  463.     if (ret < 0) {
  464.         av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
  465.         return ret;
  466.     }
  467.  
  468.     /* pull filtered frames from the filtergraph */
  469.     while (1) {
  470.         av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
  471.         ret = av_buffersink_get_frame(filter->buffersink_ctx,
  472.                                       filter->filtered_frame);
  473.         if (ret < 0) {
  474.             /* if no more frames for output - returns AVERROR(EAGAIN)
  475.              * if flushed and no more frames for output - returns AVERROR_EOF
  476.              * rewrite retcode to 0 to show it as normal procedure completion
  477.              */
  478.             if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
  479.                 ret = 0;
  480.             break;
  481.         }
  482.  
  483.         filter->filtered_frame->pict_type = AV_PICTURE_TYPE_NONE;
  484.         ret = encode_write_frame(filter->filtered_frame, stream_index);
  485.         av_frame_unref(filter->filtered_frame);
  486.         if (ret < 0)
  487.             break;
  488.     }
  489.  
  490.     return ret;
  491. }
  492.  
  493. static int flush_encoder(unsigned int stream_index)
  494. {
  495.     if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
  496.                 AV_CODEC_CAP_DELAY))
  497.         return 0;
  498.  
  499.     av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
  500.     return encode_write_frame(NULL, stream_index);
  501. }
  502.  
  503. int main(int argc, char **argv)
  504. {
  505.     int ret;
  506.     AVPacket packet = { .data = NULL, .size = 0 };
  507.     unsigned int stream_index;
  508.     unsigned int i;
  509.  
  510.     if (argc != 3) {
  511.         av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
  512.         return 1;
  513.     }
  514.  
  515.     if ((ret = open_input_file(argv[1])) < 0)
  516.         goto end;
  517.     if ((ret = open_output_file(argv[2])) < 0)
  518.         goto end;
  519.     if ((ret = init_filters()) < 0)
  520.         goto end;
  521.  
  522.     /* read all packets */
  523.     while (1) {
  524.         if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
  525.             break;
  526.         stream_index = packet.stream_index;
  527.         av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
  528.                 stream_index);
  529.  
  530.         if (filter_ctx[stream_index].filter_graph) {
  531.             StreamContext *stream = &stream_ctx[stream_index];
  532.  
  533.             av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
  534.  
  535.             av_packet_rescale_ts(&packet,
  536.                                  ifmt_ctx->streams[stream_index]->time_base,
  537.                                  stream->dec_ctx->time_base);
  538.             ret = avcodec_send_packet(stream->dec_ctx, &packet);
  539.             if (ret < 0) {
  540.                 av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
  541.                 break;
  542.             }
  543.  
  544.             while (ret >= 0) {
  545.                 ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
  546.                 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
  547.                     break;
  548.                 else if (ret < 0)
  549.                     goto end;
  550.  
  551.                 stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
  552.                 ret = filter_encode_write_frame(stream->dec_frame, stream_index);
  553.                 if (ret < 0)
  554.                     goto end;
  555.             }
  556.         } else {
  557.             /* remux this frame without reencoding */
  558.             av_packet_rescale_ts(&packet,
  559.                                  ifmt_ctx->streams[stream_index]->time_base,
  560.                                  ofmt_ctx->streams[stream_index]->time_base);
  561.  
  562.             ret = av_interleaved_write_frame(ofmt_ctx, &packet);
  563.             if (ret < 0)
  564.                 goto end;
  565.         }
  566.         av_packet_unref(&packet);
  567.     }
  568.  
  569.     /* flush filters and encoders */
  570.     for (i = 0; i < ifmt_ctx->nb_streams; i++) {
  571.         /* flush filter */
  572.         if (!filter_ctx[i].filter_graph)
  573.             continue;
  574.         ret = filter_encode_write_frame(NULL, i);
  575.         if (ret < 0) {
  576.             av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
  577.             goto end;
  578.         }
  579.  
  580.         /* flush encoder */
  581.         ret = flush_encoder(i);
  582.         if (ret < 0) {
  583.             av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
  584.             goto end;
  585.         }
  586.     }
  587.  
  588.     av_write_trailer(ofmt_ctx);
  589. end:
  590.     av_packet_unref(&packet);
  591.     for (i = 0; i < ifmt_ctx->nb_streams; i++) {
  592.         avcodec_free_context(&stream_ctx[i].dec_ctx);
  593.         if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
  594.             avcodec_free_context(&stream_ctx[i].enc_ctx);
  595.         if (filter_ctx && filter_ctx[i].filter_graph) {
  596.             avfilter_graph_free(&filter_ctx[i].filter_graph);
  597.             av_frame_free(&filter_ctx[i].filtered_frame);
  598.         }
  599.  
  600.         av_frame_free(&stream_ctx[i].dec_frame);
  601.     }
  602.     av_free(filter_ctx);
  603.     av_free(stream_ctx);
  604.     avformat_close_input(&ifmt_ctx);
  605.     if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
  606.         avio_closep(&ofmt_ctx->pb);
  607.     avformat_free_context(ofmt_ctx);
  608.  
  609.     if (ret < 0)
  610.         av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
  611.  
  612.     return ret ? 1 : 0;
  613. }
RAW Paste Data