Advertisement
Guest User

FFmpeg MPEG-TS muxrate example

a guest
Jul 2nd, 2013
862
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 16.62 KB | None | 0 0
  1. /*
  2.  * Copyright (c) 2003 Fabrice Bellard
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a copy
  5.  * of this software and associated documentation files (the "Software"), to deal
  6.  * in the Software without restriction, including without limitation the rights
  7.  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8.  * copies of the Software, and to permit persons to whom the Software is
  9.  * furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice shall be included in
  12.  * all copies or substantial portions of the Software.
  13.  *
  14.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19.  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  20.  * THE SOFTWARE.
  21.  */
  22.  
  23. /**
  24.  * @file
  25.  * libavformat API example.
  26.  *
  27.  * Output a media file in any supported libavformat format.
  28.  * The default codecs are used.
  29.  * @example doc/examples/muxing.c
  30.  */
  31.  
  32. #include <stdlib.h>
  33. #include <stdio.h>
  34. #include <string.h>
  35. #include <math.h>
  36. #define __STDC_CONSTANT_MACROS
  37. extern "C"
  38. {
  39.     #include <libavutil/mathematics.h>
  40.     #include <libavformat/avformat.h>
  41.     #include <libswscale/swscale.h>
  42. }
  43.  
  44. #ifdef  __cplusplus
  45.  
  46.     #include <string>
  47.  
  48.     static const std::string av_make_error_string(int errnum)
  49.     {
  50.         char errbuf[AV_ERROR_MAX_STRING_SIZE];
  51.         av_strerror(errnum, errbuf, AV_ERROR_MAX_STRING_SIZE);
  52.         return (std::string)errbuf;
  53.     }
  54.  
  55.     #undef av_err2str
  56.     #define av_err2str(errnum) av_make_error_string(errnum).c_str()
  57.  
  58. #endif // __cplusplus
  59.  
  60. /* 5 seconds stream duration */
  61. #define STREAM_DURATION   200.0
  62. #define STREAM_FRAME_RATE 25 /* 25 images/s */
  63. #define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
  64. #define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */
  65.  
  66. static int sws_flags = SWS_BICUBIC;
  67.  
  68. /**************************************************************/
  69. /* audio output */
  70.  
  71. static float t, tincr, tincr2;
  72. static int16_t *samples;
  73. static int audio_input_frame_size;
  74.  
  75. /* Add an output stream. */
  76. static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
  77.                             enum AVCodecID codec_id)
  78. {
  79.     AVCodecContext *c;
  80.     AVStream *st;
  81.  
  82.     /* find the encoder */
  83.     *codec = avcodec_find_encoder(codec_id);
  84.     if (!(*codec)) {
  85.         fprintf(stderr, "Could not find encoder for '%s'\n",
  86.                 avcodec_get_name(codec_id));
  87.         exit(1);
  88.     }
  89.  
  90.     st = avformat_new_stream(oc, *codec);
  91.     if (!st) {
  92.         fprintf(stderr, "Could not allocate stream\n");
  93.         exit(1);
  94.     }
  95.     st->id = oc->nb_streams-1;
  96.     c = st->codec;
  97.  
  98.     switch ((*codec)->type) {
  99.     case AVMEDIA_TYPE_AUDIO:
  100.         c->sample_fmt  = AV_SAMPLE_FMT_S16;
  101.         c->bit_rate    = 64000;
  102.         c->sample_rate = 44100;
  103.         c->channels    = 2;
  104.         break;
  105.  
  106.     case AVMEDIA_TYPE_VIDEO:
  107.         c->codec_id = codec_id;
  108.  
  109.         c->bit_rate = 400000;
  110.         /* Resolution must be a multiple of two. */
  111.         c->width    = 352;
  112.         c->height   = 288;
  113.         /* timebase: This is the fundamental unit of time (in seconds) in terms
  114.          * of which frame timestamps are represented. For fixed-fps content,
  115.          * timebase should be 1/framerate and timestamp increments should be
  116.          * identical to 1. */
  117.         c->time_base.den = STREAM_FRAME_RATE;
  118.         c->time_base.num = 1;
  119.         c->gop_size      = 12; /* emit one intra frame every twelve frames at most */
  120.         c->pix_fmt       = STREAM_PIX_FMT;
  121.         if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  122.             /* just for testing, we also add B frames */
  123.             c->max_b_frames = 2;
  124.         }
  125.         if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  126.             /* Needed to avoid using macroblocks in which some coeffs overflow.
  127.              * This does not happen with normal video, it just happens here as
  128.              * the motion of the chroma plane does not match the luma plane. */
  129.             c->mb_decision = 2;
  130.         }
  131.     break;
  132.  
  133.     default:
  134.         break;
  135.     }
  136.  
  137.     /* Some formats want stream headers to be separate. */
  138.     if (oc->oformat->flags & AVFMT_GLOBALHEADER)
  139.         c->flags |= CODEC_FLAG_GLOBAL_HEADER;
  140.  
  141.     return st;
  142. }
  143.  
  144. /**************************************************************/
  145. /* audio output */
  146.  
  147. //static float t, tincr, tincr2;
  148. //static int16_t *samples;
  149. //static int audio_input_frame_size;
  150.  
  151. static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
  152. {
  153.     AVCodecContext *c;
  154.     int ret;
  155.  
  156.     c = st->codec;
  157.  
  158.     /* open it */
  159.     ret = avcodec_open2(c, codec, NULL);
  160.     if (ret < 0) {
  161.         fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
  162.         exit(1);
  163.     }
  164.  
  165.     /* init signal generator */
  166.     t     = 0;
  167.     tincr = 2 * M_PI * 110.0 / c->sample_rate;
  168.     /* increment frequency by 110 Hz per second */
  169.     tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
  170.  
  171.     if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
  172.         audio_input_frame_size = 10000;
  173.     else
  174.         audio_input_frame_size = c->frame_size;
  175.     samples = (int16_t*)av_malloc(audio_input_frame_size *
  176.                         av_get_bytes_per_sample(c->sample_fmt) *
  177.                         c->channels);
  178.     if (!samples) {
  179.         fprintf(stderr, "Could not allocate audio samples buffer\n");
  180.         exit(1);
  181.     }
  182. }
  183.  
  184. /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
  185.  * 'nb_channels' channels. */
  186. static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
  187. {
  188.     int j, i, v;
  189.     int16_t *q;
  190.  
  191.     q = samples;
  192.     for (j = 0; j < frame_size; j++) {
  193.         v = (int)(sin(t) * 10000);
  194.         for (i = 0; i < nb_channels; i++)
  195.             *q++ = v;
  196.         t     += tincr;
  197.         tincr += tincr2;
  198.     }
  199. }
  200.  
  201. static void write_audio_frame(AVFormatContext *oc, AVStream *st)
  202. {
  203.     AVCodecContext *c;
  204.     AVPacket pkt = { 0 }; // data and size must be 0;
  205.     AVFrame *frame = avcodec_alloc_frame();
  206.     int got_packet, ret;
  207.  
  208.     av_init_packet(&pkt);
  209.     c = st->codec;
  210.  
  211.     get_audio_frame(samples, audio_input_frame_size, c->channels);
  212.     frame->nb_samples = audio_input_frame_size;
  213.     avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
  214.                              (uint8_t *)samples,
  215.                              audio_input_frame_size *
  216.                              av_get_bytes_per_sample(c->sample_fmt) *
  217.                              c->channels, 1);
  218.  
  219.     ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
  220.     if (ret < 0) {
  221.         fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
  222.         exit(1);
  223.     }
  224.  
  225.     if (!got_packet)
  226.         return;
  227.  
  228.     pkt.stream_index = st->index;
  229.  
  230.     /* Write the compressed frame to the media file. */
  231.     ret = av_interleaved_write_frame(oc, &pkt);
  232.     if (ret != 0) {
  233.         fprintf(stderr, "Error while writing audio frame: %s\n",
  234.                 av_err2str(ret));
  235.         exit(1);
  236.     }
  237.     avcodec_free_frame(&frame);
  238. }
  239.  
  240. static void close_audio(AVFormatContext *oc, AVStream *st)
  241. {
  242.     avcodec_close(st->codec);
  243.  
  244.     av_free(samples);
  245. }
  246.  
  247. /**************************************************************/
  248. /* video output */
  249.  
  250. static AVFrame *frame;
  251. static AVPicture src_picture, dst_picture;
  252. static int frame_count;
  253.  
  254. static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
  255. {
  256.     int ret;
  257.     AVCodecContext *c = st->codec;
  258.  
  259.     /* open the codec */
  260.     ret = avcodec_open2(c, codec, NULL);
  261.     if (ret < 0) {
  262.         fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
  263.         exit(1);
  264.     }
  265.  
  266.     /* allocate and init a re-usable frame */
  267.     frame = avcodec_alloc_frame();
  268.     if (!frame) {
  269.         fprintf(stderr, "Could not allocate video frame\n");
  270.         exit(1);
  271.     }
  272.  
  273.     /* Allocate the encoded raw picture. */
  274.     ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
  275.     if (ret < 0) {
  276.         fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
  277.         exit(1);
  278.     }
  279.  
  280.     /* If the output format is not YUV420P, then a temporary YUV420P
  281.      * picture is needed too. It is then converted to the required
  282.      * output format. */
  283.     if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
  284.         ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
  285.         if (ret < 0) {
  286.             fprintf(stderr, "Could not allocate temporary picture: %s\n",
  287.                     av_err2str(ret));
  288.             exit(1);
  289.         }
  290.     }
  291.  
  292.     /* copy data and linesize picture pointers to frame */
  293.     *((AVPicture *)frame) = dst_picture;
  294. }
  295.  
  296. /* Prepare a dummy image. */
  297. static void fill_yuv_image(AVPicture *pict, int frame_index,
  298.                            int width, int height)
  299. {
  300.     int x, y, i;
  301.  
  302.     i = frame_index;
  303.  
  304.     /* Y */
  305.     for (y = 0; y < height; y++)
  306.         for (x = 0; x < width; x++)
  307.             pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
  308.  
  309.     /* Cb and Cr */
  310.     for (y = 0; y < height / 2; y++) {
  311.         for (x = 0; x < width / 2; x++) {
  312.             pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
  313.             pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
  314.         }
  315.     }
  316. }
  317.  
  318. static void write_video_frame(AVFormatContext *oc, AVStream *st)
  319. {
  320.     int ret;
  321.     static struct SwsContext *sws_ctx;
  322.     AVCodecContext *c = st->codec;
  323.  
  324.     if (frame_count >= STREAM_NB_FRAMES) {
  325.         /* No more frames to compress. The codec has a latency of a few
  326.          * frames if using B-frames, so we get the last frames by
  327.          * passing the same picture again. */
  328.     } else {
  329.         if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
  330.             /* as we only generate a YUV420P picture, we must convert it
  331.              * to the codec pixel format if needed */
  332.             if (!sws_ctx) {
  333.                 sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
  334.                                          c->width, c->height, c->pix_fmt,
  335.                                          sws_flags, NULL, NULL, NULL);
  336.                 if (!sws_ctx) {
  337.                     fprintf(stderr,
  338.                             "Could not initialize the conversion context\n");
  339.                     exit(1);
  340.                 }
  341.             }
  342.             fill_yuv_image(&src_picture, frame_count, c->width, c->height);
  343.             sws_scale(sws_ctx,
  344.                       (const uint8_t * const *)src_picture.data, src_picture.linesize,
  345.                       0, c->height, dst_picture.data, dst_picture.linesize);
  346.         } else {
  347.             fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
  348.         }
  349.     }
  350.  
  351.     if (oc->oformat->flags & AVFMT_RAWPICTURE) {
  352.         /* Raw video case - directly store the picture in the packet */
  353.         AVPacket pkt;
  354.         av_init_packet(&pkt);
  355.  
  356.         pkt.flags        |= AV_PKT_FLAG_KEY;
  357.         pkt.stream_index  = st->index;
  358.         pkt.data          = dst_picture.data[0];
  359.         pkt.size          = sizeof(AVPicture);
  360.  
  361.         ret = av_interleaved_write_frame(oc, &pkt);
  362.     } else {
  363.         AVPacket pkt = { 0 };
  364.         int got_packet;
  365.         av_init_packet(&pkt);
  366.  
  367.         /* encode the image */
  368.         ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
  369.         if (ret < 0) {
  370.             fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
  371.             exit(1);
  372.         }
  373.         /* If size is zero, it means the image was buffered. */
  374.  
  375.         if (!ret && got_packet && pkt.size) {
  376.             pkt.stream_index = st->index;
  377.  
  378.             /* Write the compressed frame to the media file. */
  379.             ret = av_interleaved_write_frame(oc, &pkt);
  380.         } else {
  381.             ret = 0;
  382.         }
  383.     }
  384.     if (ret != 0) {
  385.         fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
  386.         exit(1);
  387.     }
  388.     frame_count++;
  389. }
  390.  
  391. static void close_video(AVFormatContext *oc, AVStream *st)
  392. {
  393.     avcodec_close(st->codec);
  394.     av_free(src_picture.data[0]);
  395.     av_free(dst_picture.data[0]);
  396.     av_free(frame);
  397. }
  398.  
  399. /**************************************************************/
  400. /* media file output */
  401.  
  402. int main(int argc, char **argv)
  403. {
  404.     const char *filename;
  405.     AVOutputFormat *fmt;
  406.     AVFormatContext *oc;
  407.     AVStream *audio_st, *video_st;
  408.     AVCodec *audio_codec = NULL, *video_codec = NULL;
  409.     double audio_time, video_time;
  410.     int ret;
  411.  
  412.     /* Initialize libavcodec, and register all codecs and formats. */
  413.     av_register_all();
  414.  
  415.     if (argc != 2) {
  416.         printf("usage: %s output_file\n"
  417.                "API example program to output a media file with libavformat.\n"
  418.                "This program generates a synthetic audio and video stream, encodes and\n"
  419.                "muxes them into a file named output_file.\n"
  420.                "The output format is automatically guessed according to the file extension.\n"
  421.                "Raw images can also be output by using '%%d' in the filename.\n"
  422.                "\n", argv[0]);
  423.         return 1;
  424.     }
  425.  
  426.     filename = argv[1];
  427.  
  428.     /* allocate the output media context */
  429.     avformat_alloc_output_context2(&oc, NULL, NULL, filename);
  430.     if (!oc) {
  431.         printf("Could not deduce output format from file extension: using MPEG.\n");
  432.         avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
  433.     }
  434.     if (!oc) {
  435.         return 1;
  436.     }
  437.     fmt = oc->oformat;
  438.  
  439.     /* Add the audio and video streams using the default format codecs
  440.      * and initialize the codecs. */
  441.     video_st = NULL;
  442.     audio_st = NULL;
  443.  
  444.     if (fmt->video_codec != AV_CODEC_ID_NONE) {
  445.         video_st = add_stream(oc, &video_codec, fmt->video_codec);
  446.     }
  447.     if (fmt->audio_codec != AV_CODEC_ID_NONE) {
  448.         audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
  449.     }
  450.  
  451.     /* Now that all the parameters are set, we can open the audio and
  452.      * video codecs and allocate the necessary encode buffers. */
  453.     if (video_st)
  454.         open_video(oc, video_codec, video_st);
  455.     if (audio_st)
  456.         open_audio(oc, audio_codec, audio_st);
  457.  
  458.     av_dump_format(oc, 0, filename, 1);
  459.  
  460.     /* open the output file, if needed */
  461.     if (!(fmt->flags & AVFMT_NOFILE)) {
  462.         ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
  463.         if (ret < 0) {
  464.             fprintf(stderr, "Could not open '%s': %s\n", filename,
  465.                     av_err2str(ret));
  466.             return 1;
  467.         }
  468.     }
  469.  
  470.     /* Write the stream header, if any. */
  471.     AVDictionary *dict = NULL;
  472.     av_dict_set(&dict, "muxrate", "2000000", 0);
  473.     ret = avformat_write_header(oc, &dict);
  474.     if (ret < 0) {
  475.         fprintf(stderr, "Error occurred when opening output file: %s\n",
  476.                 av_err2str(ret));
  477.         return 1;
  478.     }
  479.  
  480.     if (frame)
  481.         frame->pts = 0;
  482.     for (;;) {
  483.         /* Compute current audio and video time. */
  484.         if (audio_st)
  485.             audio_time = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
  486.         else
  487.             audio_time = 0.0;
  488.  
  489.         if (video_st)
  490.             video_time = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
  491.         else
  492.             video_time = 0.0;
  493.  
  494.         if ((!audio_st || audio_time >= STREAM_DURATION) &&
  495.             (!video_st || video_time >= STREAM_DURATION))
  496.             break;
  497.  
  498.         /* write interleaved audio and video frames */
  499.         if (!video_st || (video_st && audio_st && audio_time < video_time)) {
  500.             write_audio_frame(oc, audio_st);
  501.         } else {
  502.             write_video_frame(oc, video_st);
  503.             frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
  504.         }
  505.     }
  506.  
  507.     /* Write the trailer, if any. The trailer must be written before you
  508.      * close the CodecContexts open when you wrote the header; otherwise
  509.      * av_write_trailer() may try to use memory that was freed on
  510.      * av_codec_close(). */
  511.     av_write_trailer(oc);
  512.  
  513.     /* Close each codec. */
  514.     if (video_st)
  515.         close_video(oc, video_st);
  516.     if (audio_st)
  517.         close_audio(oc, audio_st);
  518.  
  519.     if (!(fmt->flags & AVFMT_NOFILE))
  520.         /* Close the output file. */
  521.         avio_close(oc->pb);
  522.  
  523.     /* free the stream */
  524.     avformat_free_context(oc);
  525.  
  526.     return 0;
  527. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement