Recherche avancée

Médias (0)

Mot : - Tags -/utilisateurs

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (74)

  • Gestion des droits de création et d’édition des objets

    8 février 2011, par

    Par défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;

  • Supporting all media types

    13 avril 2011, par

    Unlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)

  • Dépôt de média et thèmes par FTP

    31 mai 2013, par

    L’outil MédiaSPIP traite aussi les média transférés par la voie FTP. Si vous préférez déposer par cette voie, récupérez les identifiants d’accès vers votre site MédiaSPIP et utilisez votre client FTP favori.
    Vous trouverez dès le départ les dossiers suivants dans votre espace FTP : config/ : dossier de configuration du site IMG/ : dossier des média déjà traités et en ligne sur le site local/ : répertoire cache du site web themes/ : les thèmes ou les feuilles de style personnalisées tmp/ : dossier de travail (...)

Sur d’autres sites (6375)

  • What am I doing wrong with my audio writing in ffmpeg ? [on hold]

    12 septembre 2014, par Michael Nguyen

    I’m trying to splice multiple video sources into one. I’m having trouble understanding the audio portion of it. Rather I should say, the audio part of my code doesn’t seem to work. I don’t understand it. Could somebody help me understand what I am doing wrong ? The method doing all the work is called renderMovieRequest

    Thanks in advance.

    My entire code can be found here : http://pastebin.com/rAZkU3XZ

    Any help would be appreciated.
    below is a snippet of the code (it’s too long otherwise)

    int64_t timeBase;
    bool seek(AVFormatContext *pFormatCtx, int frameIndex){

       if(!pFormatCtx)
           return false;

       int64_t seekTarget = int64_t(frameIndex) * timeBase;

       if(av_seek_frame(pFormatCtx, -1, seekTarget, AVSEEK_FLAG_ANY) < 0) {
           ELOG("av_seek_frame failed.");
           return false;
       }

       return true;

    }

    typedef struct OutputStream {
       AVStream *st;
       /* pts of the next frame that will be generated */
       int64_t next_pts;
       int samples_count;
       AVFrame *frame;
       AVFrame *tmp_frame;
       float t, tincr, tincr2;
       struct SwsContext *sws_ctx;
       struct SwrContext *swr_ctx;
    } OutputStream;


    static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
    {
       /* rescale output packet timestamp values from codec to stream timebase */
       av_packet_rescale_ts(pkt, *time_base, st->time_base);
       pkt->stream_index = st->index;
       /* Write the compressed frame to the media file. */
       log_packet(fmt_ctx, pkt);
       return av_interleaved_write_frame(fmt_ctx, pkt);
    }
    /* Add an output stream. */
    static void add_stream(OutputStream *ost, AVFormatContext *oc,
                          AVCodec **codec,
                          enum AVCodecID codec_id) {
       AVCodecContext *c;
       int i;
       /* find the encoder */
       *codec = avcodec_find_encoder(codec_id);
       if (!(*codec)) {
           ELOG("Could not find encoder for '%s'\n", avcodec_get_name(codec_id));
           return;
       }
       ost->st = avformat_new_stream(oc, *codec);
       if (!ost->st) {
           ELOG("Could not allocate stream\n");
           return;
       }
       ost->st->id = oc->nb_streams-1;
       c = ost->st->codec;
       switch ((*codec)->type) {
       case AVMEDIA_TYPE_AUDIO:
           c->sample_fmt  = (*codec)->sample_fmts ?
               (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
           c->bit_rate    = 64000;
           c->sample_rate = 44100;
           if ((*codec)->supported_samplerates) {
               c->sample_rate = (*codec)->supported_samplerates[0];
               for (i = 0; (*codec)->supported_samplerates[i]; i++) {
                   if ((*codec)->supported_samplerates[i] == 44100)
                       c->sample_rate = 44100;
               }
           }
           c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
           c->channel_layout = AV_CH_LAYOUT_STEREO;
           if ((*codec)->channel_layouts) {
               c->channel_layout = (*codec)->channel_layouts[0];
               for (i = 0; (*codec)->channel_layouts[i]; i++) {
                   if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
                       c->channel_layout = AV_CH_LAYOUT_STEREO;
               }
           }
           c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
           ost->st->time_base = (AVRational){ 1, c->sample_rate };
           break;
       case AVMEDIA_TYPE_VIDEO:
           c->codec_id = codec_id;
           c->bit_rate = 400000;
           /* Resolution must be a multiple of two. */
    //        c->width    = 352;
    //        c->height   = 288;
           c->width    = 1280;
           c->height   = 720;

           /* timebase: This is the fundamental unit of time (in seconds) in terms
            * of which frame timestamps are represented. For fixed-fps content,
            * timebase should be 1/framerate and timestamp increments should be
            * identical to 1. */
           ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
           c->time_base       = ost->st->time_base;
           c->gop_size      = 12; /* emit one intra frame every twelve frames at most */
           c->pix_fmt       = STREAM_PIX_FMT;
           if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
               /* just for testing, we also add B frames */
               c->max_b_frames = 2;
           }
           if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
               /* Needed to avoid using macroblocks in which some coeffs overflow.
                * This does not happen with normal video, it just happens here as
                * the motion of the chroma plane does not match the luma plane. */
               c->mb_decision = 2;
           }
       break;
       default:
           break;
       }
       /* Some formats want stream headers to be separate. */
       if (oc->oformat->flags & AVFMT_GLOBALHEADER)
           c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    /**************************************************************/
    /* audio output */
    static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                     uint64_t channel_layout,
                                     int sample_rate, int nb_samples)
    {
       AVFrame *frame = av_frame_alloc();
       int ret;
       if (!frame) {
           fprintf(stderr, "Error allocating an audio frame\n");
           exit(1);
       }
       frame->format = sample_fmt;
       frame->channel_layout = channel_layout;
       frame->sample_rate = sample_rate;
       frame->nb_samples = nb_samples;
       if (nb_samples) {
           ret = av_frame_get_buffer(frame, 0);
           if (ret < 0) {
               fprintf(stderr, "Error allocating an audio buffer\n");
               exit(1);
           }
       }
       return frame;
    }
    static int open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
    {
       AVCodecContext *c;
       int nb_samples;
       int ret;
       AVDictionary *opt = NULL;
       c = ost->st->codec;
       /* open it */
       av_dict_copy(&opt, opt_arg, 0);
       ret = avcodec_open2(c, codec, &opt);
       av_dict_free(&opt);
       if (ret < 0) {
           ELOG("Could not open audio codec: %s\n", av_err2str(ret));
           return ret;
       }
       /* init signal generator */
       ost->t     = 0;
       ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
       /* increment frequency by 110 Hz per second */
       ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
       if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
           nb_samples = 10000;
       else
           nb_samples = c->frame_size;
       ost->frame     = alloc_audio_frame(c->sample_fmt, c->channel_layout,
                                          c->sample_rate, nb_samples);
       ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
                                          c->sample_rate, nb_samples);
       /* create resampler context */
           ost->swr_ctx = swr_alloc();
           if (!ost->swr_ctx) {
               ELOG("Could not allocate resampler context\n");
               return -300;
           }
           /* set options */
           av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
           av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     c->sample_rate,    0);
           av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
           av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
           av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
           av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);
           /* initialize the resampling context */
           if ((ret = swr_init(ost->swr_ctx)) < 0) {
               ELOG("Failed to initialize the resampling context: %i\n", ret);
               return ret;
           }

           return 0;
    }

    /*
    * encode one audio frame and send it to the muxer
    * return 1 when encoding is finished, 0 otherwise
    */
    static int write_audio_frame(AVFormatContext *oc, OutputStream *ost, AVFrame *frame)
    {
       AVCodecContext *c;
       AVPacket pkt = { 0 }; // data and size must be 0;
    //    AVFrame *frame;
       int ret;
       int got_packet;
       int dst_nb_samples;
       av_init_packet(&pkt);
       c = ost->st->codec;
    //    frame = get_audio_frame(ost);
       if (frame) {
           /* convert samples from native format to destination codec format, using the resampler */
               /* compute destination number of samples */
               dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                                               c->sample_rate, c->sample_rate, AV_ROUND_UP);
               av_assert0(dst_nb_samples == frame->nb_samples);
           /* when we pass a frame to the encoder, it may keep a reference to it
            * internally;
            * make sure we do not overwrite it here
            */
           ret = av_frame_make_writable(ost->frame);
           if (ret < 0) {
               ELOG("Unable to prepare frame for writing: Error code: %s", av_err2str(ret));
               return ret;
           }
               /* convert to destination format */
               ret = swr_convert(ost->swr_ctx,
                                 ost->frame->data, dst_nb_samples,
                                 (const uint8_t **)frame->data, frame->nb_samples);
               if (ret < 0) {
                   ELOG("Error while converting: %s\n", av_err2str(ret));
                   return -1;
               }
               frame = ost->frame;
           frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
           ost->samples_count += dst_nb_samples;
       }
       ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
       if (ret < 0) {
           ELOG("Error encoding audio frame: %s\n", av_err2str(ret));
           return -1;
       }
       if (got_packet) {
           ret = write_frame(oc, &c->time_base, ost->st, &pkt);
           if (ret < 0) {
               ELOG( "Error while writing audio frame: %s\n", av_err2str(ret));
               return -1;
           }
       }
       return (frame || got_packet) ? 0 : 1;
    }


    /**************************************************************/
    /* video output */
    static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
    {
       AVFrame *picture;
       int ret;
       picture = av_frame_alloc();
       if (!picture)
           return NULL;
       picture->format = pix_fmt;
       picture->width  = width;
       picture->height = height;
       /* allocate the buffers for the frame data */
       ret = av_frame_get_buffer(picture, 32);
       if (ret < 0) {
           fprintf(stderr, "Could not allocate frame data.\n");
           exit(1);
       }
       return picture;
    }


    static int open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
    {
       int ret;
       AVCodecContext *c = ost->st->codec;
       AVDictionary *opt = NULL;
       av_dict_copy(&opt, opt_arg, 0);
       /* open the codec */
       ret = avcodec_open2(c, codec, &opt);
       av_dict_free(&opt);

       if (ret < 0) {
           ELOG("Could not open video codec: %s\n", av_err2str(ret));
           return ret;
       }
       /* allocate and init a re-usable frame */
       DLOG("Allocate and init a are-usable frame: %i x %i Format: %i", c->width, c->height, c->pix_fmt);
       ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
       if (!ost->frame) {
           ELOG("Could not allocate video frame\n");
           return -100;
       }

       /* If the output format is not YUV420P, then a temporary YUV420P
        * picture is needed too. It is then converted to the required
        * output format. */
       ost->tmp_frame = NULL;
       if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
           DLOG("input format is not YUV420P converting to size %i x %i", c->width, c->height);
           ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
           if (!ost->tmp_frame) {
               ELOG("Could not allocate temporary picture\n");
               return -200;
           }
       }

       return 0;
    }

    /*
    * encode one video frame and send it to the muxer
    * return 1 when encoding is finished, 0 otherwise
    */
    static int write_video_frame(AVFormatContext *oc, OutputStream *ost, AVFrame *frame)
    {
       int ret;
       AVCodecContext *c;
       int got_packet = 0;
       c = ost->st->codec;

       if (oc->oformat->flags & AVFMT_RAWPICTURE) {
           /* a hack to avoid data copy with some raw video muxers */
           AVPacket pkt;
           av_init_packet(&pkt);
           if (!frame)
               return 1;
           pkt.flags        |= AV_PKT_FLAG_KEY;
           pkt.stream_index  = ost->st->index;
           pkt.data          = (uint8_t *)frame;
           pkt.size          = sizeof(AVPicture);
           pkt.pts = pkt.dts = frame->pts;
           av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
           ret = av_interleaved_write_frame(oc, &pkt);
       } else {
           AVPacket pkt = { 0 };
           av_init_packet(&pkt);
           /* encode the image */
           ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
           if (ret < 0) {
               fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
               exit(1);
           }
           if (got_packet) {
               ret = write_frame(oc, &c->time_base, ost->st, &pkt);
           } else {
               ret = 0;
           }
       }
       if (ret < 0) {
           fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
           exit(1);
       }
       return (frame || got_packet) ? 0 : 1;
    }
    static void close_stream(AVFormatContext *oc, OutputStream *ost)
    {
       avcodec_close(ost->st->codec);
       av_frame_free(&ost->frame);
       av_frame_free(&ost->tmp_frame);
       sws_freeContext(ost->sws_ctx);
       swr_free(&ost->swr_ctx);
    }



    int renderMovieRequest(movieRequest *movieRequestObj, string outputPath) {
       AVOutputFormat *ofmt = NULL;
       AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
       AVFormatContext *pFormatCtx = NULL;
       AVCodec *audio_codec, *video_codec;

       OutputStream video_st = { 0 }, audio_st = { 0 };
       size_t            i;
       int             videoStream, audioStream;
       AVCodecContext  *pCodecCtx = NULL;
       AVCodec         *pCodec = NULL;
       AVFrame         *pFrame = NULL;
       AVFrame         *pFrameRGB = NULL;
       AVPacket        packet = { 0 };
       int             frameFinished;
       int             audioFrameFinished;
       int             numBytes;
       uint8_t         *buffer = NULL;
       AVDictionary    *optionsDict = NULL;
       AVDictionary *opt = NULL;
       struct SwsContext      *sws_ctx = NULL;

       const char *in_filename, *out_filename;
       int ret;

       int have_audio = 0, have_video = 0;
       int encode_audio = 0, encode_video = 0;

       processProtobuf(movieRequestObj);

       out_filename = outputPath.c_str();

       av_register_all();

       DLOG("attempting to create context for output file %s", out_filename);

       avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
       if (!ofmt_ctx) {
           ELOG("Could not create output context\n");
           ret = AVERROR_UNKNOWN;
           return ret; //goto end;
       }
       ofmt = ofmt_ctx->oformat;

      /* Add the audio and video streams using the default format codecs
          * and initialize the codecs. */
         if (ofmt->video_codec != AV_CODEC_ID_NONE) {
             add_stream(&video_st, ofmt_ctx, &video_codec, ofmt->video_codec);
             have_video = 1;
             encode_video = 1;
         }
         if (ofmt->audio_codec != AV_CODEC_ID_NONE) {
             add_stream(&audio_st, ofmt_ctx, &audio_codec, ofmt->audio_codec);
             have_audio = 1;
             encode_audio = 1;
         }

       DLOG("allocate encode buffers");
    /* Now that all the parameters are set, we can open the audio and
        * video codecs and allocate the necessary encode buffers. */
       if (have_video)
           open_video(ofmt_ctx, video_codec, &video_st, opt);
       if (have_audio) {
           DLOG("Opening audio codec");
           open_audio(ofmt_ctx, audio_codec, &audio_st, opt);
       }

       DLOG("open output file for writing");
      /* open the output file, if needed */
       if (!(ofmt->flags & AVFMT_NOFILE)) {
           ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
           if (ret < 0) {
               ELOG( "Could not open '%s': %s\n", out_filename, av_err2str(ret));
               return 1;
           }
       }

       /* Write the stream header, if any. */
       ret = avformat_write_header(ofmt_ctx, &opt);
       if (ret < 0) {
           ELOG("Error occurred when opening output file: %s\n", av_err2str(ret));
           return 1;
       }

       vector<clipshptr> * clips = &amp;(movieRequestObj->clips);

       DLOG("ready to process clips: %i", clips->size());
       for (size_t clipIdx = 0; clipIdx &lt; clips->size(); ++clipIdx) {

           shared_ptr<clip> currentClip = clips->at(clipIdx);

           switch (currentClip->getClipType()) {
               case VIDEO_CLIP: {
                   DLOG("clip is a video clip...");

                   shared_ptr<videoclip> vidClip = dynamic_pointer_cast<videoclip>(clips->at(clipIdx));

                   if (vidClip->shouldHaveSegments) {
                       // open the file for reading and create a temporary file for output
                       in_filename = vidClip->vidFileName.c_str();
                       DLOG("Opening %s for reading", in_filename);

                       if ((ret = avformat_open_input(&amp;ifmt_ctx, in_filename, 0, 0)) &lt; 0) {
                           ELOG("Could not open input file '%s'", in_filename);
                           return ret; //goto end;
                       }

                       if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) &lt; 0) {
                           ELOG("Failed to retrieve input stream information");
                           return ret; //goto end;
                       }

                       av_dump_format(ifmt_ctx, 0, in_filename, 0);

                       videoStream = -1;
                       audioStream = -1;
                       // setup input format context and output format context;
    //                    AVStream *video_in_stream = NULL;
                       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {
                           if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
                               videoStream=i;
    //                            video_in_stream = ifmt_ctx->streams[i];
                           }
                           else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
                               audioStream=i;
    //                            video_in_stream = ifmt_ctx->streams[i];
                           }
                       }

                       if (videoStream == -1) {
                           DLOG("not a video stream.");
                           continue;
                       }

                       // Get a pointer to the codec context for the video stream
                       pCodecCtx = ifmt_ctx->streams[videoStream]->codec;
                       if (pCodecCtx == NULL) {
                           ELOG("Error in getting pointer to codec for vidstream");
                       }

                       DLOG("Input pixel format: %i ", pCodecCtx->pix_fmt);

                       // Find the decoder for the video stream
                       pCodec=avcodec_find_decoder(pCodecCtx->codec_id);

                       if(pCodec==NULL) {
                           ELOG("Unsupported codec!\n");
                           return -1; // Codec not found
                       }
                       // Open codec
                       if(avcodec_open2(pCodecCtx, pCodec, &amp;optionsDict)&lt;0) {
                           ELOG("Unable to open codec");
                           return -1; // Could not open codec
                       }

                       // get the timebase
                       timeBase = (int64_t(pCodecCtx->time_base.num) * AV_TIME_BASE) / int64_t(pCodecCtx->time_base.den);

                       // Allocate video frame
                       pFrame=av_frame_alloc();

                       // Allocate an AVFrame structure
                       pFrameRGB=av_frame_alloc();
                       if(pFrameRGB==NULL)
                           return -1;

                       // Determine required buffer size and allocate buffer
    //                    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
                       numBytes = avpicture_get_size(PIX_FMT_RGB24, movieRequestObj->width, movieRequestObj->height);
                       DLOG("Buffer size allocated: %i x %i: %i ", movieRequestObj->width, movieRequestObj->height, numBytes);
                       buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

                       sws_ctx = sws_getContext
                       (
                           pCodecCtx->width,
                           pCodecCtx->height,
                           pCodecCtx->pix_fmt,
                           movieRequestObj->width,
                           movieRequestObj->height,
                           PIX_FMT_RGB24,
                           SWS_BILINEAR,
                           NULL,
                           NULL,
                           NULL
                       );

                       // Assign appropriate parts of buffer to image planes in pFrameRGB
                       // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
                       // of AVPicture
                       avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, movieRequestObj->width, movieRequestObj->height);
                       size_t numSegments = vidClip->segments.size();

                       DLOG("Found %i segments to process", numSegments);
                       for (size_t segmentIdx = 0; segmentIdx &lt; numSegments; ++segmentIdx) {
                           // seek to the right position
                           int frameOffset = vidClip->segments.at(segmentIdx).first;
                           int clipDuration = vidClip->segments.at(segmentIdx).second;
                           DLOG("Starting Frame Number: %i Duration: %i", frameOffset, clipDuration);

                           seek(ifmt_ctx, frameOffset);
                           // loop for X frames where X is &lt; frameOffset + clipDuration; clipDuration is the length of the clip in terms of frames
                           for (int frameIdx = frameOffset; frameIdx &lt; (frameOffset + clipDuration); ++frameIdx) {
                               av_init_packet(&amp;packet);
                               int avReadResult = 0;
                               int continueRecording = 1;
                               while ((continueRecording == 1) &amp;&amp; (frameIdx &lt; (frameOffset + clipDuration) )) {
                                   avReadResult = av_read_frame(ifmt_ctx, &amp;packet);
                                   if(avReadResult != 0){
                                       if (avReadResult != AVERROR_EOF) {
                                           ELOG("av_read_frame error: %i", avReadResult );
                                       } else {
                                           ILOG("End of input file");
                                       }
                                       continueRecording = 0;
                                   }
                                   // Is this a packet from the video stream?
                                   if(packet.stream_index==videoStream) {
                                       // Decode video frame
                                       avcodec_decode_video2(pCodecCtx, pFrameRGB, &amp;frameFinished, &amp;packet);

                                       // Did we get a video frame?
                                       if(frameFinished) {
                                           // Convert the image from its native format to RGB
                                           sws_scale
                                           (
                                              sws_ctx,
                                              (uint8_t const * const *)pFrame->data,
                                              pFrame->linesize,
                                              0,
                                              pCodecCtx->height,
                                              pFrameRGB->data,
                                              pFrameRGB->linesize
                                           );
                                           write_video_frame(ofmt_ctx, &amp;video_st, pFrameRGB);
                                           frameIdx++;
                                       }

                                   }
                                   else if (packet.stream_index == audioStream) {
                                       // Decode audio frame
                                       DLOG("Audio frame found");
                                       avcodec_decode_audio4(pCodecCtx, pFrameRGB, &amp;audioFrameFinished, &amp;packet);

                                       if (audioFrameFinished) {
    //                                        write the audio frame to file
                                           write_audio_frame(ofmt_ctx, &amp;audio_st, pFrameRGB);

                                       }

                                   }
                                   // Free the packet that was allocated by av_read_frame
                                   av_free_packet(&amp;packet);
                               }
                                   // Free the RGB image

                           }
                       }

                       DLOG("Cleaning up frame allocations");
                       av_free(buffer);
                       av_free(pFrameRGB);
                       // Free the YUV frame
                       av_free(pFrame);

                   } // end video clip processing
               }
               break;

               case TITLE_CLIP: {
                 }
               break;

               default:
                   ELOG("Failed to identify clip");
                   break;
           } // end switch statement

           DLOG("Finished processing clip #%i", clipIdx);
           avformat_close_input(&amp;ifmt_ctx);
       } // end main for loop -> clip iteration


    /* Write the trailer, if any. The trailer must be written before you
        * close the CodecContexts open when you wrote the header; otherwise
        * av_write_trailer() may try to use memory that was freed on
        * av_codec_close(). */
       av_write_trailer(ofmt_ctx);

       /* Close each codec. */
       if (have_video)
           close_stream(ofmt_ctx, &amp;video_st);
       if (have_audio)
           close_stream(ofmt_ctx, &amp;audio_st);

       if (ofmt_ctx &amp;&amp; !(ofmt->flags &amp; AVFMT_NOFILE)) {
           /* Close the output file. */
           avio_close(ofmt_ctx->pb);
       }

       DLOG("Closing input format context");
       avformat_close_input(&amp;ifmt_ctx);

       DLOG("Free ouptut format context");
       avformat_free_context(ofmt_ctx);

       if (ret &lt; 0 &amp;&amp; ret != AVERROR_EOF) {
           ELOG( "Error occurred: %s\n", av_err2str(ret));
           return 1;
       }

       return 0;
    }


    #ifdef __cplusplus
    }

    #endif
    </videoclip></videoclip></clip></clipshptr>
  • stout to textarea from ffmpeg command

    21 septembre 2014, par Brett

    Hi im trying to get the output of a ffmpeg command into a text level here is my code i am posting the lot as im brand new to java and am not sure where ive gone wrong i want to run the command and have the progress bar update and the output to show on a text area.
    any help would appreciated

    package MyPackage;
    import java.util.*;
    import java.io.BufferedReader;
    import java.io.File;
    import java.io.IOException;
    import java.io.InputStreamReader;
    import java.util.logging.Level;
    import java.util.logging.Logger;


    /**
    *
    * @author brett
    */
    public class NumberAdditionUI extends javax.swing.JFrame {
       private static final long serialVersionUID = 1L;

       /**
        * Creates new form NumberAdditionUI
        */
       public NumberAdditionUI() {
           initComponents();
       }

       /**
        * This method is called from within the constructor to initialize the form.
        * WARNING: Do NOT modify this code. The content of this method is always
        * regenerated by the Form Editor.
        */
       @SuppressWarnings("unchecked")
       //
       private void initComponents() {

           db = new javax.swing.JFileChooser();
           jButton1 = new javax.swing.JButton();
           jButton2 = new javax.swing.JButton();
           jTextField1 = new javax.swing.JTextField();
           jButton3 = new javax.swing.JButton();
           jLabel1 = new javax.swing.JLabel();
           jProgressBar1 = new javax.swing.JProgressBar();
           jScrollPane1 = new javax.swing.JScrollPane();
           jTextArea1 = new javax.swing.JTextArea();

           db.setBackground(java.awt.Color.white);
           db.setCurrentDirectory(new java.io.File("C:\\Users\\brett\\Documents\\convert"));
           db.setDialogTitle("grabAFile");

           setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
           setTitle("Smoke Goobies");
           setMaximumSize(getPreferredSize());

           jButton1.setText("Exit");
           jButton1.setHorizontalAlignment(javax.swing.SwingConstants.RIGHT);
           jButton1.addActionListener(new java.awt.event.ActionListener() {
               public void actionPerformed(java.awt.event.ActionEvent evt) {
                   jButton1ActionPerformed(evt);
               }
           });

           jButton2.setText("Browse");
           jButton2.addActionListener(new java.awt.event.ActionListener() {
               public void actionPerformed(java.awt.event.ActionEvent evt) {
                   jButton2ActionPerformed(evt);
               }
           });

           jTextField1.setText("Select A File To Covert");

           jButton3.setText("Run This Puppy");
           jButton3.addActionListener(new java.awt.event.ActionListener() {
               public void actionPerformed(java.awt.event.ActionEvent evt) {
                   jButton3ActionPerformed(evt);
               }
           });

           jLabel1.setFont(new java.awt.Font("Goudy Old Style", 1, 56)); // NOI18N
           jLabel1.setIcon(jLabel1.getIcon());
           jLabel1.setText("   MASHiTuP");

           jProgressBar1.setValue(50);
           jProgressBar1.setBorder(new javax.swing.border.MatteBorder(null));

           jTextArea1.setColumns(20);
           jTextArea1.setRows(5);
           jTextArea1.setAutoscrolls(false);
           jScrollPane1.setViewportView(jTextArea1);

           javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
           getContentPane().setLayout(layout);
           layout.setHorizontalGroup(
               layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
               .addGroup(layout.createSequentialGroup()
                   .addContainerGap()
                   .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
                       .addComponent(jProgressBar1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
                       .addGroup(layout.createSequentialGroup()
                           .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
                               .addComponent(jLabel1, javax.swing.GroupLayout.PREFERRED_SIZE, 355, javax.swing.GroupLayout.PREFERRED_SIZE)
                               .addGroup(layout.createSequentialGroup()
                                   .addComponent(jTextField1, javax.swing.GroupLayout.PREFERRED_SIZE, 282, javax.swing.GroupLayout.PREFERRED_SIZE)
                                   .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
                                   .addComponent(jButton2, javax.swing.GroupLayout.PREFERRED_SIZE, 77, javax.swing.GroupLayout.PREFERRED_SIZE))
                               .addGroup(layout.createSequentialGroup()
                                   .addComponent(jButton3)
                                   .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
                                   .addComponent(jButton1)))
                           .addGap(0, 0, Short.MAX_VALUE))
                       .addComponent(jScrollPane1))
                   .addContainerGap())
           );
           layout.setVerticalGroup(
               layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
               .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
                   .addContainerGap()
                   .addComponent(jLabel1, javax.swing.GroupLayout.PREFERRED_SIZE, 73, javax.swing.GroupLayout.PREFERRED_SIZE)
                   .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
                   .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
                       .addComponent(jButton2)
                       .addComponent(jTextField1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
                   .addGap(34, 34, 34)
                   .addComponent(jProgressBar1, javax.swing.GroupLayout.PREFERRED_SIZE, 24, javax.swing.GroupLayout.PREFERRED_SIZE)
                   .addGap(18, 18, 18)
                   .addComponent(jScrollPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 84, javax.swing.GroupLayout.PREFERRED_SIZE)
                   .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 48, Short.MAX_VALUE)
                   .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
                       .addComponent(jButton1)
                       .addComponent(jButton3))
                   .addContainerGap())
           );

           jProgressBar1.getAccessibleContext().setAccessibleName("MYsTATUS");

           pack();
       }//

       private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {                                        
           System.exit(0);
       }                                        

       private void jButton2ActionPerformed(java.awt.event.ActionEvent evt) {                                        
          int returnVal = db.showOpenDialog( this );
          File f = db.getSelectedFile();
           String filename = f.getAbsolutePath();
          jTextField1.setText(filename);

       }                                        

       private void jButton3ActionPerformed(java.awt.event.ActionEvent evt) {                                        
           java.lang.Runtime rt = java.lang.Runtime.getRuntime();
           java.lang.Process proc = null;
           try {
               proc = rt.exec("ipconfig");

               //proc = rt.exec("ffmpeg -i C:\\Users\\brett\\Documents\\MASH_02.avi C:\\Users\\brett\\Documents\\mash09.avi");
           } catch (IOException ex) {
               Logger.getLogger(NumberAdditionUI.class.getName()).log(Level.SEVERE, null, ex);
           }
           BufferedReader stdInput = new BufferedReader(new InputStreamReader(proc.getInputStream()));
           BufferedReader stdError = new BufferedReader(new InputStreamReader(proc.getErrorStream()));

           // read the output from the command
           System.out.println("Here is the standard output of the command:\n");
           String s;
           try {
               while ((s = stdInput.readLine()) != null) {
                   System.out.println(s);

                   jTextArea1.append(s+"\n\n");

               }
           } catch (IOException ex) {
               Logger.getLogger(NumberAdditionUI.class.getName()).log(Level.SEVERE, null, ex);
           }

           // read any errors from the attempted command
           System.out.println("Here is the standard error of the command (if any):\n");
           try {
               while ((s = stdError.readLine()) != null) {
                   System.out.println(s);
               }
           } catch (IOException ex) {
               Logger.getLogger(NumberAdditionUI.class.getName()).log(Level.SEVERE, null, ex);
           }
       }                                        

       /**
        *
        * @param args the command line arguments
        */
       public static void main(String args[])
    {
           /*
            * Set the Nimbus look and feel
            */
           //
           /*
            * If Nimbus (introduced in Java SE 6) is not available, stay with the
            * default look and feel. For details see
            * http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html
            */
           try {
               for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {
                   if ("Nimbus".equals(info.getName())) {
                       javax.swing.UIManager.setLookAndFeel(info.getClassName());
                       break;
                   }
               }
           } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | javax.swing.UnsupportedLookAndFeelException ex) {
               java.util.logging.Logger.getLogger(NumberAdditionUI.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
           }
           //

           /*
            * Create and display the form
            */
           java.awt.EventQueue.invokeLater(new Runnable() {

               @Override
               public void run() {
                   new NumberAdditionUI().setVisible(true);
               }
           });
       }
       // Variables declaration - do not modify
       private javax.swing.JFileChooser db;
       private javax.swing.JButton jButton1;
       private javax.swing.JButton jButton2;
       private javax.swing.JButton jButton3;
       private javax.swing.JLabel jLabel1;
       private javax.swing.JProgressBar jProgressBar1;
       private javax.swing.JScrollPane jScrollPane1;
       private javax.swing.JTextArea jTextArea1;
       private javax.swing.JTextField jTextField1;
       // End of variables declaration
    }
  • Convert PRORES PCM S24LE to H.264 AAC

    23 septembre 2014, par Adam Walker

    I’m a rookie when it comes to media conversions. I’m trying use ffmpeg to convert raw PCM S24LE audio to something usable by Premiere. I also have raw video in the same format. PRORES.

    Here is the code I currently have.

    cd ../ffmpeg/bin
    :again
    if "%~1" == "" goto done
    ffmpeg -f  s24le -i "%~1" -strict experimental -acodec aac ../../2Ready4Premiere/%~n1.mp3
    shift
    goto again
    :done
    pause
    exit