Advertisement
Guest User

FFmpeg Sender

a guest
Nov 30th, 2015
128
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 16.90 KB | None | 0 0
  1. #define USEFILTER 0
  2.  
  3. #include <stdio.h>
  4. #include <conio.h>
  5. #include <windows.h>
  6. #define snprintf _snprintf
  7. extern "C"
  8. {
  9. #include "libavutil/opt.h"
  10. #include "libavutil/time.h"
  11. #include "libavutil/mathematics.h"
  12. #include "libavcodec/avcodec.h"
  13. #include "libavformat/avformat.h"
  14. #include "libavdevice/avdevice.h"
  15. #include "libswscale/swscale.h"
  16. #if USEFILTER
  17. #include "libavfilter/avfiltergraph.h"
  18. #include "libavfilter/buffersink.h"
  19. #include "libavfilter/buffersrc.h"
  20. #endif
  21. };
  22.  
  23.  
  24. int flush_encoder(AVFormatContext *ifmt_ctx, AVFormatContext *ofmt_ctx, unsigned int stream_index, int framecnt);
  25.  
  26. int exit_thread = 0;
  27. #if USEFILTER
  28. int filter_change = 1;
  29. const char *filter_descr = "null";
  30. const char *filter_mirror = "crop=iw/2:ih:0:0,split[left][tmp];[tmp]hflip[right]; \
  31.                                                 [left]pad=iw*2[a];[a][right]overlay=w";
  32. const char *filter_watermark = "movie=test.jpg[wm];[in][wm]overlay=5:5[out]";
  33. const char *filter_negate = "negate[out]";
  34. const char *filter_edge = "edgedetect[out]";
  35. const char *filter_split4 = "scale=iw/2:ih/2[in_tmp];[in_tmp]split=4[in_1][in_2][in_3][in_4];[in_1]pad=iw*2:ih*2[a];[a][in_2]overlay=w[b];[b][in_3]overlay=0:h[d];[d][in_4]overlay=w:h[out]";
  36. const char *filter_vintage = "curves=vintage";
  37. typedef enum {
  38.     FILTER_NULL = 48,
  39.     FILTER_MIRROR,
  40.     FILTER_WATERMATK,
  41.     FILTER_NEGATE,
  42.     FILTER_EDGE,
  43.     FILTER_SPLIT4,
  44.     FILTER_VINTAGE
  45. }FILTERS;
  46.  
  47. AVFilterContext *buffersink_ctx;
  48. AVFilterContext *buffersrc_ctx;
  49. AVFilterGraph *filter_graph;
  50. AVFilter *buffersrc;
  51. AVFilter *buffersink;
  52. AVFrame* picref;
  53. #endif
  54.  
  55. DWORD WINAPI MyThreadFunction(LPVOID lpParam)
  56. {
  57. #if USEFILTER
  58.     int ch = getchar();
  59.     while (ch != '\n')
  60.     {
  61.         switch (ch) {
  62.         case FILTER_NULL:
  63.         {
  64.             printf("\nnow using null filter\nPress other numbers for other filters:");
  65.             filter_change = 1;
  66.             filter_descr = "null";
  67.             getchar();
  68.             ch = getchar();
  69.             break;
  70.         }
  71.         case FILTER_MIRROR:
  72.         {
  73.             printf("\nnow using mirror filter\nPress other numbers for other filters:");
  74.             filter_change = 1;
  75.             filter_descr = filter_mirror;
  76.             getchar();
  77.             ch = getchar();
  78.             break;
  79.         }
  80.         case FILTER_WATERMATK:
  81.         {
  82.             printf("\nnow using watermark filter\nPress other numbers for other filters:");
  83.             filter_change = 1;
  84.             filter_descr = filter_watermark;
  85.             getchar();
  86.             ch = getchar();
  87.             break;
  88.         }
  89.         case FILTER_NEGATE:
  90.         {
  91.             printf("\nnow using negate filter\nPress other numbers for other filters:");
  92.             filter_change = 1;
  93.             filter_descr = filter_negate;
  94.             getchar();
  95.             ch = getchar();
  96.             break;
  97.         }
  98.         case FILTER_EDGE:
  99.         {
  100.             printf("\nnow using edge filter\nPress other numbers for other filters:");
  101.             filter_change = 1;
  102.             filter_descr = filter_edge;
  103.             getchar();
  104.             ch = getchar();
  105.             break;
  106.         }
  107.         case FILTER_SPLIT4:
  108.         {
  109.             printf("\nnow using split4 filter\nPress other numbers for other filters:");
  110.             filter_change = 1;
  111.             filter_descr = filter_split4;
  112.             getchar();
  113.             ch = getchar();
  114.             break;
  115.         }
  116.         case FILTER_VINTAGE:
  117.         {
  118.             printf("\nnow using vintage filter\nPress other numbers for other filters:");
  119.             filter_change = 1;
  120.             filter_descr = filter_vintage;
  121.             getchar();
  122.             ch = getchar();
  123.             break;
  124.         }
  125.         default:
  126.         {
  127.             getchar();
  128.             ch = getchar();
  129.             break;
  130.         }
  131.         }
  132. #else
  133.     while ((getchar()) != '\n')
  134.     {
  135.         ;
  136. #endif
  137.     }
  138.     exit_thread = 1;
  139.     return 0;
  140.     }
  141.  
  142. //Show Device  
  143. void show_dshow_device() {
  144.     AVFormatContext *pFmtCtx = avformat_alloc_context();
  145.     AVDictionary* options = NULL;
  146.     av_dict_set(&options, "list_devices", "true", 0);
  147.     AVInputFormat *iformat = av_find_input_format("dshow");
  148.     printf("Device Info=============\n");
  149.     avformat_open_input(&pFmtCtx, "video=dummy", iformat, &options);
  150.     printf("========================\n");
  151. }
  152.  
  153. #if USEFILTER
  154. static int apply_filters(AVFormatContext *ifmt_ctx)
  155. {
  156.     char args[512];
  157.     int ret;
  158.     AVFilterInOut *outputs = avfilter_inout_alloc();
  159.     if (!outputs)
  160.     {
  161.         printf("Cannot alloc output\n");
  162.         return -1;
  163.     }
  164.     AVFilterInOut *inputs = avfilter_inout_alloc();
  165.     if (!inputs)
  166.     {
  167.         printf("Cannot alloc input\n");
  168.         return -1;
  169.     }
  170.  
  171.     if (filter_graph)
  172.         avfilter_graph_free(&filter_graph);
  173.     filter_graph = avfilter_graph_alloc();
  174.     if (!filter_graph)
  175.     {
  176.         printf("Cannot create filter graph\n");
  177.         return -1;
  178.     }
  179.  
  180.     /* buffer video source: the decoded frames from the decoder will be inserted here. */
  181.     snprintf(args, sizeof(args),
  182.         "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
  183.         ifmt_ctx->streams[0]->codec->width, ifmt_ctx->streams[0]->codec->height, ifmt_ctx->streams[0]->codec->pix_fmt,
  184.         ifmt_ctx->streams[0]->time_base.num, ifmt_ctx->streams[0]->time_base.den,
  185.         ifmt_ctx->streams[0]->codec->sample_aspect_ratio.num, ifmt_ctx->streams[0]->codec->sample_aspect_ratio.den);
  186.  
  187.     ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
  188.         args, NULL, filter_graph);
  189.     if (ret < 0) {
  190.         printf("Cannot create buffer source\n");
  191.         return ret;
  192.     }
  193.  
  194.     /* buffer video sink: to terminate the filter chain. */
  195.     ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
  196.         NULL, NULL, filter_graph);
  197.     if (ret < 0) {
  198.         printf("Cannot create buffer sink\n");
  199.         return ret;
  200.     }
  201.  
  202.     /* Endpoints for the filter graph. */
  203.     outputs->name = av_strdup("in");
  204.     outputs->filter_ctx = buffersrc_ctx;
  205.     outputs->pad_idx = 0;
  206.     outputs->next = NULL;
  207.  
  208.     inputs->name = av_strdup("out");
  209.     inputs->filter_ctx = buffersink_ctx;
  210.     inputs->pad_idx = 0;
  211.     inputs->next = NULL;
  212.  
  213.     if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
  214.         &inputs, &outputs, NULL)) < 0)
  215.         return ret;
  216.  
  217.     if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
  218.         return ret;
  219.  
  220.     avfilter_inout_free(&inputs);
  221.     avfilter_inout_free(&outputs);
  222.  
  223.     return 0;
  224. }
  225. #endif
  226.  
  227. int main(int argc, char* argv[])
  228. {
  229.     AVFormatContext *ifmt_ctx = NULL;
  230.     AVFormatContext *ofmt_ctx;
  231.     AVInputFormat* ifmt;
  232.     AVStream* video_st;
  233.     AVCodecContext* pCodecCtx;
  234.     AVCodec* pCodec;
  235.     AVPacket *dec_pkt, enc_pkt;
  236.     AVFrame *pframe, *pFrameYUV;
  237.     struct SwsContext *img_convert_ctx;
  238.  
  239.     char capture_name[80] = { 0 };
  240.     char device_name[80] = { 0 };
  241.     int framecnt = 0;
  242.     int videoindex;
  243.     int i;
  244.     int ret;
  245.     HANDLE  hThread;
  246.  
  247.     const char* out_path = "udp://127.0.0.1:8888";
  248.     int dec_got_frame, enc_got_frame;
  249.  
  250.     av_register_all();
  251.     //Register Device
  252.     avdevice_register_all();
  253.     avformat_network_init();
  254. #if USEFILTER
  255.     //Register Filter
  256.     avfilter_register_all();
  257.     buffersrc = avfilter_get_by_name("buffer");
  258.     buffersink = avfilter_get_by_name("buffersink");
  259. #endif
  260.  
  261.     //Show Dshow Device  
  262.     //show_dshow_device();
  263.  
  264.     /*printf("\nChoose capture device: ");
  265.     if (gets(capture_name) == 0)
  266.     {
  267.     printf("Error in gets()\n");
  268.     return -1;
  269.     }
  270.     sprintf(device_name, "video=%s", capture_name);*/
  271.  
  272.     ifmt = av_find_input_format("dshow");
  273.  
  274.     //Set own video device's name
  275.     if (avformat_open_input(&ifmt_ctx, "video=Integrated Webcam", ifmt, NULL) != 0) {
  276.         printf("Couldn't open input stream.���޷�������������\n");
  277.         return -1;
  278.     }
  279.     //input initialize
  280.     if (avformat_find_stream_info(ifmt_ctx, NULL)<0)
  281.     {
  282.         printf("Couldn't find stream information.���޷���ȡ����Ϣ��\n");
  283.         return -1;
  284.     }
  285.     videoindex = -1;
  286.     for (i = 0; i<ifmt_ctx->nb_streams; i++)
  287.         if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
  288.         {
  289.             videoindex = i;
  290.             break;
  291.         }
  292.     if (videoindex == -1)
  293.     {
  294.         printf("Couldn't find a video stream.��û���ҵ���Ƶ����\n");
  295.         return -1;
  296.     }
  297.     if (avcodec_open2(ifmt_ctx->streams[videoindex]->codec, avcodec_find_decoder(ifmt_ctx->streams[videoindex]->codec->codec_id), NULL)<0)
  298.     {
  299.         printf("Could not open codec.���޷��򿪽�������\n");
  300.         return -1;
  301.     }
  302.     av_dump_format(ifmt_ctx, 0, "video=Integrated Webcam", 0);
  303.  
  304.     //output initialize
  305.     avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path);
  306.     //output encoder initialize
  307.     pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
  308.     if (!pCodec) {
  309.         printf("Can not find encoder! (û���ҵ����ʵı�������)\n");
  310.         return -1;
  311.     }
  312.     pCodecCtx = avcodec_alloc_context3(pCodec);
  313.     pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
  314.     pCodecCtx->width = ifmt_ctx->streams[videoindex]->codec->width;
  315.     pCodecCtx->height = ifmt_ctx->streams[videoindex]->codec->height;
  316.     pCodecCtx->time_base.num = 1;
  317.     pCodecCtx->time_base.den = 25;
  318.     pCodecCtx->bit_rate = 400000;
  319.     pCodecCtx->gop_size = 250;
  320.     /* Some formats want stream headers to be separate. */
  321.     if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
  322.         pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
  323.  
  324.     //H264 codec param
  325.     //pCodecCtx->me_range = 16;
  326.     //pCodecCtx->max_qdiff = 4;
  327.     //pCodecCtx->qcompress = 0.6;
  328.     pCodecCtx->qmin = 10;
  329.     pCodecCtx->qmax = 51;
  330.     //Optional Param
  331.     pCodecCtx->max_b_frames = 3;
  332.     // Set H264 preset and tune
  333.     AVDictionary *param = 0;
  334.     av_dict_set(&param, "rtbufsize", "702000k", 0);
  335.     av_dict_set(&param, "preset", "fast", 0);
  336.     av_dict_set(&param, "tune", "zerolatency", 0);
  337.  
  338.     if (avcodec_open2(pCodecCtx, pCodec, &param) < 0) {
  339.         printf("Failed to open encoder! (����������ʧ�ܣ�)\n");
  340.         return -1;
  341.     }
  342.  
  343.     //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
  344.     video_st = avformat_new_stream(ofmt_ctx, pCodec);
  345.     if (video_st == NULL) {
  346.         return -1;
  347.     }
  348.     video_st->time_base.num = 1;
  349.     video_st->time_base.den = 25;
  350.     video_st->codec = pCodecCtx;
  351.  
  352.     //Open output URL,set before avformat_write_header() for muxing
  353.     if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0) {
  354.         printf("Failed to open output file! (�����ļ�����ʧ�ܣ�)\n");
  355.         return -1;
  356.     }
  357.  
  358.     //Show some Information
  359.     av_dump_format(ofmt_ctx, 0, out_path, 1);
  360.  
  361.     //Write File Header
  362.     avformat_write_header(ofmt_ctx, NULL);
  363.  
  364.     //prepare before decode and encode
  365.     dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));
  366.     //enc_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));
  367.     //camera data has a pix fmt of RGB,convert it to YUV420
  368. #if USEFILTER
  369. #else
  370.     img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height,
  371.         ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  372. #endif
  373.     pFrameYUV = av_frame_alloc();
  374.     uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
  375.     avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
  376.  
  377.     printf("\n --------call started----------\n");
  378. #if USEFILTER
  379.     printf("\n Press differnet number for different filters:");
  380.     printf("\n 1->Mirror");
  381.     printf("\n 2->Add Watermark");
  382.     printf("\n 3->Negate");
  383.     printf("\n 4->Draw Edge");
  384.     printf("\n 5->Split Into 4");
  385.     printf("\n 6->Vintage");
  386.     printf("\n Press 0 to remove filter\n");
  387. #endif
  388.     printf("\nPress enter to stop...\n");
  389.     hThread = CreateThread(
  390.         NULL,                   // default security attributes
  391.         0,                      // use default stack size  
  392.         MyThreadFunction,       // thread function name
  393.         NULL,          // argument to thread function
  394.         0,                      // use default creation flags
  395.         NULL);   // returns the thread identifier
  396.  
  397.                  //start decode and encode
  398.     int64_t start_time = av_gettime();
  399.     while (av_read_frame(ifmt_ctx, dec_pkt) >= 0) {
  400.         if (exit_thread)
  401.             break;
  402.         av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");
  403.         pframe = av_frame_alloc();
  404.         if (!pframe) {
  405.             ret = AVERROR(ENOMEM);
  406.             return -1;
  407.         }
  408.         //av_packet_rescale_ts(dec_pkt, ifmt_ctx->streams[dec_pkt->stream_index]->time_base,
  409.         //  ifmt_ctx->streams[dec_pkt->stream_index]->codec->time_base);
  410.         ret = avcodec_decode_video2(ifmt_ctx->streams[dec_pkt->stream_index]->codec, pframe,
  411.             &dec_got_frame, dec_pkt);
  412.         if (ret < 0) {
  413.             av_frame_free(&pframe);
  414.             av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
  415.             break;
  416.         }
  417.         if (dec_got_frame) {
  418. #if USEFILTER
  419.             pframe->pts = av_frame_get_best_effort_timestamp(pframe);
  420.  
  421.             if (filter_change)
  422.                 apply_filters(ifmt_ctx);
  423.             filter_change = 0;
  424.             /* push the decoded frame into the filtergraph */
  425.             if (av_buffersrc_add_frame(buffersrc_ctx, pframe) < 0) {
  426.                 printf("Error while feeding the filtergraph\n");
  427.                 break;
  428.             }
  429.             picref = av_frame_alloc();
  430.  
  431.             /* pull filtered pictures from the filtergraph */
  432.             while (1) {
  433.                 ret = av_buffersink_get_frame_flags(buffersink_ctx, picref, 0);
  434.                 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
  435.                     break;
  436.                 if (ret < 0)
  437.                     return ret;
  438.  
  439.                 if (picref) {
  440.                     img_convert_ctx = sws_getContext(picref->width, picref->height, (AVPixelFormat)picref->format, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  441.                     sws_scale(img_convert_ctx, (const uint8_t* const*)picref->data, picref->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
  442.                     sws_freeContext(img_convert_ctx);
  443.                     pFrameYUV->width = picref->width;
  444.                     pFrameYUV->height = picref->height;
  445.                     pFrameYUV->format = PIX_FMT_YUV420P;
  446. #else
  447.             sws_scale(img_convert_ctx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
  448.             pFrameYUV->width = pframe->width;
  449.             pFrameYUV->height = pframe->height;
  450.             pFrameYUV->format = PIX_FMT_YUV420P;
  451. #endif                 
  452.             enc_pkt.data = NULL;
  453.             enc_pkt.size = 0;
  454.             av_init_packet(&enc_pkt);
  455.             ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame);
  456.             av_frame_free(&pframe);
  457.             if (enc_got_frame == 1) {
  458.                 //printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size);
  459.                 framecnt++;
  460.                 enc_pkt.stream_index = video_st->index;
  461.  
  462.                 //Write PTS
  463.                 AVRational time_base = ofmt_ctx->streams[videoindex]->time_base;//{ 1, 1000 };
  464.                 AVRational r_framerate1 = ifmt_ctx->streams[videoindex]->r_frame_rate;// { 50, 2 };
  465.                 AVRational time_base_q = { 1, AV_TIME_BASE };
  466.                 //Duration between 2 frames (us)
  467.                 int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //�ڲ�ʱ����
  468.                                                                                             //Parameters
  469.                                                                                             //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
  470.                 enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
  471.                 enc_pkt.dts = enc_pkt.pts;
  472.                 enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
  473.                 enc_pkt.pos = -1;
  474.  
  475.                 //Delay
  476.                 int64_t pts_time = av_rescale_q(enc_pkt.dts, time_base, time_base_q);
  477.                 int64_t now_time = av_gettime() - start_time;
  478.                 if (pts_time > now_time)
  479.                     av_usleep(pts_time - now_time);
  480.  
  481.                 ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
  482.                 av_free_packet(&enc_pkt);
  483.             }
  484. #if USEFILTER
  485.             av_frame_unref(picref);
  486.                 }
  487.             }
  488. #endif
  489.         }
  490.         else {
  491.             av_frame_free(&pframe);
  492.         }
  493.         av_free_packet(dec_pkt);
  494.     }
  495.     //Flush Encoder
  496.     ret = flush_encoder(ifmt_ctx, ofmt_ctx, 0, framecnt);
  497.     if (ret < 0) {
  498.         printf("Flushing encoder failed\n");
  499.         return -1;
  500.     }
  501.  
  502.     //Write file trailer
  503.     av_write_trailer(ofmt_ctx);
  504.  
  505.     //Clean
  506. #if USEFILTER
  507.     if (filter_graph)
  508.         avfilter_graph_free(&filter_graph);
  509. #endif
  510.     if (video_st)
  511.         avcodec_close(video_st->codec);
  512.     av_free(out_buffer);
  513.     avio_close(ofmt_ctx->pb);
  514.     avformat_free_context(ifmt_ctx);
  515.     avformat_free_context(ofmt_ctx);
  516.     CloseHandle(hThread);
  517.     return 0;
  518. }
  519.  
  520. int flush_encoder(AVFormatContext *ifmt_ctx, AVFormatContext *ofmt_ctx, unsigned int stream_index, int framecnt) {
  521.     int ret;
  522.     int got_frame;
  523.     AVPacket enc_pkt;
  524.     if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
  525.         CODEC_CAP_DELAY))
  526.         return 0;
  527.     while (1) {
  528.         enc_pkt.data = NULL;
  529.         enc_pkt.size = 0;
  530.         av_init_packet(&enc_pkt);
  531.         ret = avcodec_encode_video2(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
  532.             NULL, &got_frame);
  533.         av_frame_free(NULL);
  534.         if (ret < 0)
  535.             break;
  536.         if (!got_frame) {
  537.             ret = 0;
  538.             break;
  539.         }
  540.         printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);
  541.  
  542.         //Write PTS
  543.         AVRational time_base = ofmt_ctx->streams[stream_index]->time_base;//{ 1, 1000 };
  544.         AVRational r_framerate1 = ifmt_ctx->streams[stream_index]->r_frame_rate;// { 50, 2 };
  545.         AVRational time_base_q = { 1, AV_TIME_BASE };
  546.         //Duration between 2 frames (us)
  547.         int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //�ڲ�ʱ����
  548.                                                                                     //Parameters
  549.         enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
  550.         enc_pkt.dts = enc_pkt.pts;
  551.         enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base);
  552.  
  553.         /* copy packet*/
  554.         //ת��PTS/DTS��Convert PTS/DTS��
  555.         enc_pkt.pos = -1;
  556.         framecnt++;
  557.         ofmt_ctx->duration = enc_pkt.duration * framecnt;
  558.  
  559.         /* mux encoded frame */
  560.         ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
  561.         if (ret < 0)
  562.             break;
  563.     }
  564.     return ret;
  565. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement