Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- /*
- * encode one video frame and send it to the muxer
- * return 1 when encoding is finished, 0 otherwise
- */
- static int write_video_frame(AVFormatContext *oc, OutputStream *ost, AVFrame *frame)
- {
- int ret;
- AVCodecContext *c;
- // AVFrame *frame;
- int got_packet = 0;
- c = ost->st->codec;
- // frame = get_video_frame(ost);
- if (oc->oformat->flags & AVFMT_RAWPICTURE) {
- /* a hack to avoid data copy with some raw video muxers */
- AVPacket pkt;
- av_init_packet(&pkt);
- if (!frame)
- return 1;
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = ost->st->index;
- pkt.data = (uint8_t *)frame;
- pkt.size = sizeof(AVPicture);
- pkt.pts = pkt.dts = frame->pts;
- av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- AVPacket pkt = { 0 };
- av_init_packet(&pkt);
- /* encode the image */
- ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
- if (ret < 0) {
- fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
- exit(1);
- }
- if (got_packet) {
- ret = write_frame(oc, &c->time_base, ost->st, &pkt);
- } else {
- ret = 0;
- }
- }
- if (ret < 0) {
- fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
- exit(1);
- }
- return (frame || got_packet) ? 0 : 1;
- }
- static void close_stream(AVFormatContext *oc, OutputStream *ost)
- {
- avcodec_close(ost->st->codec);
- av_frame_free(&ost->frame);
- av_frame_free(&ost->tmp_frame);
- sws_freeContext(ost->sws_ctx);
- swr_free(&ost->swr_ctx);
- }
- int renderMovieRequest(movieRequest *movieRequestObj, string outputPath) {
- AVOutputFormat *ofmt = NULL;
- AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
- AVFormatContext *pFormatCtx = NULL;
- AVCodec *audio_codec, *video_codec;
- OutputStream video_st = { 0 }, audio_st = { 0 };
- size_t i;
- int videoStream;
- AVCodecContext *pCodecCtx = NULL;
- AVCodec *pCodec = NULL;
- AVFrame *pFrame = NULL;
- AVFrame *pFrameRGB = NULL;
- AVPacket packet = { 0 };
- int frameFinished;
- int numBytes;
- uint8_t *buffer = NULL;
- AVDictionary *optionsDict = NULL;
- AVDictionary *opt = NULL;
- struct SwsContext *sws_ctx = NULL;
- const char *in_filename, *out_filename;
- int ret;
- int have_audio = 0, have_video = 0;
- int encode_audio = 0, encode_video = 0;
- processProtobuf(movieRequestObj);
- // in_filename = argv[1];
- out_filename = outputPath.c_str();
- av_register_all();
- DLOG("attempting to create context for output file %s", out_filename);
- avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
- if (!ofmt_ctx) {
- ELOG("Could not create output context\n");
- ret = AVERROR_UNKNOWN;
- return ret; //goto end;
- }
- ofmt = ofmt_ctx->oformat;
- /* Add the audio and video streams using the default format codecs
- * and initialize the codecs. */
- if (ofmt->video_codec != AV_CODEC_ID_NONE) {
- add_stream(&video_st, ofmt_ctx, &video_codec, ofmt->video_codec);
- have_video = 1;
- encode_video = 1;
- }
- if (ofmt->audio_codec != AV_CODEC_ID_NONE) {
- add_stream(&audio_st, ofmt_ctx, &audio_codec, ofmt->audio_codec);
- have_audio = 1;
- encode_audio = 1;
- }
- DLOG("allocate encode buffers");
- /* Now that all the parameters are set, we can open the audio and
- * video codecs and allocate the necessary encode buffers. */
- if (have_video)
- open_video(ofmt_ctx, video_codec, &video_st, opt);
- if (have_audio)
- open_audio(ofmt_ctx, audio_codec, &audio_st, opt);
- DLOG("open output file for writing");
- /* open the output file, if needed */
- if (!(ofmt->flags & AVFMT_NOFILE)) {
- ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
- if (ret < 0) {
- ELOG( "Could not open '%s': %s\n", out_filename, av_err2str(ret));
- return 1;
- }
- }
- /* Write the stream header, if any. */
- ret = avformat_write_header(ofmt_ctx, &opt);
- if (ret < 0) {
- ELOG("Error occurred when opening output file: %s\n", av_err2str(ret));
- return 1;
- }
- vector<clipShPtr> * clips = &(movieRequestObj->clips);
- DLOG("ready to process clips: %i", clips->size());
- for (size_t clipIdx = 0; clipIdx < clips->size(); ++clipIdx) {
- shared_ptr<clip> currentClip = clips->at(clipIdx);
- DLOG("Processing clip %i [%s]", clipIdx, in_filename);
- switch (currentClip->getClipType()) {
- case VIDEO_CLIP: {
- DLOG("clip is a video clip...");
- shared_ptr<videoClip> vidClip = dynamic_pointer_cast<videoClip>(clips->at(clipIdx));
- if (vidClip->shouldHaveSegments) {
- DLOG("Found segments... :");
- // add clips to new video
- // open the file for reading and create a temporary file for output
- in_filename = vidClip->vidFileName.c_str();
- if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
- ELOG("Could not open input file '%s'", in_filename);
- return ret; //goto end;
- }
- if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
- ELOG("Failed to retrieve input stream information");
- return ret; //goto end;
- }
- av_dump_format(ifmt_ctx, 0, in_filename, 0);
- videoStream = -1;
- // setup input format context and output format context;
- AVStream *in_stream = NULL;
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
- videoStream=i;
- in_stream = ifmt_ctx->streams[i];
- }
- }
- if (videoStream == -1) {
- DLOG("not a video stream.");
- continue;
- }
- // Get a pointer to the codec context for the video stream
- pCodecCtx = ifmt_ctx->streams[videoStream]->codec;
- // Find the decoder for the video stream
- pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
- if(pCodec==NULL) {
- ELOG("Unsupported codec!\n");
- return -1; // Codec not found
- }
- // Open codec
- if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0) {
- ELOG("Unable to open codec");
- return -1; // Could not open codec
- }
- // get the timebase
- timeBase = (int64_t(pCodecCtx->time_base.num) * AV_TIME_BASE) / int64_t(pCodecCtx->time_base.den);
- // Allocate video frame
- pFrame=av_frame_alloc();
- // Allocate an AVFrame structure
- pFrameRGB=av_frame_alloc();
- if(pFrameRGB==NULL)
- return -1;
- // Determine required buffer size and allocate buffer
- numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
- DLOG("Buffer size allocated: %i x %i: %i ", pCodecCtx->width, pCodecCtx->height, numBytes);
- buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
- sws_ctx = sws_getContext
- (
- pCodecCtx->width,
- pCodecCtx->height,
- pCodecCtx->pix_fmt,
- pCodecCtx->width,
- pCodecCtx->height,
- PIX_FMT_RGB24,
- SWS_BILINEAR,
- NULL,
- NULL,
- NULL
- );
- // Assign appropriate parts of buffer to image planes in pFrameRGB
- // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
- // of AVPicture
- avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
- size_t numSegments = vidClip->segments.size();
- DLOG("Found %i segments to process", numSegments);
- for (size_t segmentIdx = 0; segmentIdx < numSegments; ++segmentIdx) {
- // seek to the right position
- DLOG("Processing segment # %i", segmentIdx);
- int frameOffset = vidClip->segments.at(segmentIdx).first;
- int clipDuration = vidClip->segments.at(segmentIdx).second;
- DLOG("Starting Frame Number: %i", frameOffset);
- DLOG("Segment duration: %i", clipDuration);
- seek(ifmt_ctx, frameOffset);
- // loop for X frames where X is < frameOffset + clipDuration
- for (int frameIdx = frameOffset; frameIdx < (frameOffset + clipDuration); ++frameIdx) {
- DLOG("reading in frame %i", frameIdx);
- av_init_packet(&packet);
- int avReadResult = 0;
- int continueRecording = 1;
- while(continueRecording == 1) {
- avReadResult = av_read_frame(ifmt_ctx, &packet);
- if(avReadResult != 0){
- if (avReadResult != AVERROR_EOF) {
- ELOG("av_read_frame error: %i", avReadResult );
- } else {
- ILOG("End of input file");
- }
- continueRecording = 0;
- }
- // Is this a packet from the video stream?
- if(packet.stream_index==videoStream) {
- // Decode video frame
- avcodec_decode_video2(pCodecCtx, pFrameRGB, &frameFinished, &packet);
- // Did we get a video frame?
- if(frameFinished) {
- // // Convert the image from its native format to RGB
- // sws_scale
- // (
- // sws_ctx,
- // (uint8_t const * const *)pFrame->data,
- // pFrame->linesize,
- // 0,
- // pCodecCtx->height,
- // pFrameRGB->data,
- // pFrameRGB->linesize
- // );
- // Save the frame to disk
- // if(frameIdx % 10 == 0) {
- // SaveFrameLocal(pFrameRGB, pCodecCtx->width, pCodecCtx->height, frameIdx, vidClip->vidFileName.c_str());
- // }
- write_video_frame(ofmt_ctx, &video_st, pFrameRGB);
- frameIdx++;
- }
- }
- else {
- // this is an audio frame.
- }
- // Free the packet that was allocated by av_read_frame
- av_free_packet(&packet);
- }
- // Free the RGB image
- DLOG("Cleaning up frame allocations");
- av_free(buffer);
- av_free(pFrameRGB);
- // Free the YUV frame
- av_free(pFrame);
- }
- }
- } // end video clip processing
- }
- break;
- case TITLE_CLIP: {
- }
- break;
- default:
- ELOG("Failed to identify clip");
- break;
- } // end switch statement
- } // end main for loop -> clip iteration
- /* Write the trailer, if any. The trailer must be written before you
- * close the CodecContexts open when you wrote the header; otherwise
- * av_write_trailer() may try to use memory that was freed on
- * av_codec_close(). */
- av_write_trailer(ofmt_ctx);
- /* Close each codec. */
- if (have_video)
- close_stream(ofmt_ctx, &video_st);
- if (have_audio)
- close_stream(ofmt_ctx, &audio_st);
- if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) {
- /* Close the output file. */
- avio_close(ofmt_ctx->pb);
- }
- DLOG("Closing input format context");
- avformat_close_input(&ifmt_ctx);
- DLOG("Free ouptut format context");
- avformat_free_context(ofmt_ctx);
- if (ret < 0 && ret != AVERROR_EOF) {
- ELOG( "Error occurred: %s\n", av_err2str(ret));
- return 1;
- }
- return 0;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement