
Recherche avancée
Médias (1)
-
La conservation du net art au musée. Les stratégies à l’œuvre
26 mai 2011
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (68)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (13595)
-
FFMPEG filter is not implementing
31 août 2017, par Alok Kumar VermaI’m using FFMPEG for applying and saving the file to the specified path. Since I’m new to FFMPEG so I’ve followed this link to import the FFMPEG libraries inside my project.
FFMPEG libraries are working fine inside my project. Now I’ve followed FFMPEG Commands link for applying filters using commands. For now i’m using vintage filter command to check whether it is working or not
I’ve followed FFMPEG Project in Android link to apply the filters and save it to my storage path.
I’m using a file from my storage path to apply filter for the same and saving them inside the specified path. The problem is that the command is implementing(doubtful) but no file is getting saved with the filters, nor I get any exception or any error in my logcat. I"m very doubtful whether what is being left to get the desired result.
This is my code where I’m using the FFMPEG for applying filters and saving them in the specified path.
FilterAcitivity.java
public class FilterActivity extends AppCompatActivity {
private ArrayList<string> videoReceiveddata = null;
private EPlayerView ePlayerView;
private StringBuilder stringBuilder;
private DataSource.Factory dataSourceFactory;
private ExtractorsFactory extractorsFactory;
private Button play,stop,noneFilter,faded,noir,instant;
private FFmpeg fFmpeg;
private int choice = 0;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_filter);
Bundle videoExtras = getIntent().getExtras();
videoReceiveddata = videoExtras.getStringArrayList("sendData");
Log.e("RECEIVED_VIDEO====", videoReceiveddata.toString());
stringBuilder = new StringBuilder();
for(String path : videoReceiveddata){
stringBuilder.append(path);
}
//laoding the ffmpeg binary files
loadFFMEPGBinary();
BandwidthMeter bandwidthMeter = new DefaultBandwidthMeter();
TrackSelection.Factory videoTrackSelectionFactory =
new AdaptiveTrackSelection.Factory(bandwidthMeter);
TrackSelector trackSelector =
new DefaultTrackSelector(videoTrackSelectionFactory);
// Measures bandwidth during playback. Can be null if not required.
DefaultBandwidthMeter defaultBandwidthMeter = new DefaultBandwidthMeter();
// Produces DataSource instances through which media data is loaded.
dataSourceFactory = new DefaultDataSourceFactory(getApplicationContext(),
Util.getUserAgent(getApplicationContext(), "TestApp"), defaultBandwidthMeter);
// Produces Extractor instances for parsing the media data.
extractorsFactory = new DefaultExtractorsFactory();
// This is the MediaSource representing the media to be played.
MediaSource videoSource = new ExtractorMediaSource(Uri.parse(stringBuilder.toString()),
dataSourceFactory, extractorsFactory, null, null);
// 2. Create the player
final SimpleExoPlayer player =
ExoPlayerFactory.newSimpleInstance(getApplicationContext(), trackSelector);
player.prepare(videoSource);
ePlayerView = (EPlayerView) findViewById(R.id.ePlayer);
ePlayerView.setSimpleExoPlayer(player);
ePlayerView.onResume();
play = (Button) findViewById(R.id.playButton);
stop = (Button) findViewById(R.id.stopButton);
faded = (Button) findViewById(R.id.fadedFilter);
noneFilter = (Button) findViewById(R.id.noFilter);
noir = (Button) findViewById(R.id.noirFilter);
instant = (Button) findViewById(R.id.instantFilter);
play.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
MediaSource videoSource = new ExtractorMediaSource(Uri.parse(stringBuilder.toString()),
dataSourceFactory, extractorsFactory, null, null);
player.prepare(videoSource);
player.setPlayWhenReady(true);
}
});
stop.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
player.stop();
}
});
noneFilter.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
choice = 1;
ePlayerView.setGlFilter(new GlFilter());
}
});
faded.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
choice = 2;
ePlayerView.setGlFilter(new GlSepiaFilter());
Log.e("Filter Applied",ePlayerView.toString());
Log.e("Filter Applied====",stringBuilder.toString());
}
});
}
//loading binary of ffmpeg
private void loadFFMEPGBinary() {
try {
if (fFmpeg == null) {
Log.e("TEST=====", "ffmpeg : null");
fFmpeg = FFmpeg.getInstance(this);
}
fFmpeg.loadBinary(new LoadBinaryResponseHandler() {
@Override
public void onFailure() {
showUnsupportedExceptionDialog();
}
@Override
public void onSuccess() {
Log.d("TESTAPP====", "ffmpef : coorect loaded");
}
});
} catch (FFmpegNotSupportedException e) {
showUnsupportedExceptionDialog();
} catch (Exception e) {
Log.d("TESTAPP=====", "Exception not supported" + e);
}
}
private void showUnsupportedExceptionDialog() {
new AlertDialog.Builder(FilterActivity.this)
.setIcon(android.R.drawable.ic_dialog_alert)
.setTitle("Not Supported")
.setMessage("Device Not Supported")
.setCancelable(false)
.setPositiveButton(android.R.string.ok, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
FilterActivity.this.finish();
}
})
.create()
.show();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
MenuInflater menuInflater = getMenuInflater();
menuInflater.inflate(R.menu.filter_menu,menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case R.id.save:
if(choice == 1){
addThatFilter();
}else if(choice == 2){
Toast.makeText(getApplicationContext(),"No filter",
Toast.LENGTH_SHORT).show();
}
default:
return super.onOptionsItemSelected(item);
}
}
private void addThatFilter() {
String savingPath = Environment.getExternalStorageDirectory().getAbsolutePath().toString()
+ "/FilterVideo.mp4";
String complexCommand = "ffmpeg -y -i"+ stringBuilder.toString() +"-strict experimental -vf " +
"curves=vintage -s 640x480 -r 30 -aspect 4:3 -ab 48000 -ac 2 -ar 22050 -b 2097k -vcodec " +
savingPath;
execFFMPEGBinary(complexCommand);
}
private void execFFMPEGBinary( final String complexCommand) {
try{
fFmpeg.execute(new String[]{complexCommand}, new ExecuteBinaryResponseHandler(){
@Override
public void onFailure(String message) {
Log.e("Failed with output", message);
}
@Override
public void onSuccess(String message) {
Toast.makeText(getApplicationContext(),"Success!",Toast.LENGTH_SHORT)
.show();
}
});
}catch (FFmpegCommandAlreadyRunningException e){
//do nothing
}
} }
</string> -
ffmpeg ; opus encoded sound in webm does not work with ffplay or YouTube, only VLC [on hold]
2 août 2017, par MockarutanI’m having trouble getting Opus encoded sound in the webm container to work. I’m using libopus in ffmpeg.
The file does work in VLC. But not in ffplay or on YouTube. If I take the raw wav data in a wav file and then convert it to Opus/webm with the ffmpeg.exe that comes pre-compiled. It works in VLC, ffplay and YouTube.
So ffmpeg can obviously do it correctly, I must be doing something wrong in my code.
The file my code produces : https://drive.google.com/file/d/0B16rIXjPXJCqcU5HVllIYW1iODg/view?usp=sharing
Edit, More details that I forgot in my frustration : The file can be opened by ffplay and uploaded to youtube (when I interlace it with VP9 video). But the sound is just "ticks", example : https://www.youtube.com/watch?v=j_ShBbuizeo&feature=youtu.be
I have read though all example codes that I know of from ffmpeg, but all of them is in the old API, not the send/receive api, so a big part of the code does not apply anymore. This code works with all other Codes I’ve tested, including H.264+AAC in mp4, VP8+Opus in ogg and raw PCM F32LE in wav. I would have gone with VP8+Opus in ogg if the license was as straight forward as the webm license
I’ve looked though the source for the ffmpeg.exe command line tool and coped everything applicable in to my code base.
(Edit 3, reduced the code as much as I can)
Here is my code : https://pastebin.com/HTuc0g8KSetup :
int initialize(int sample_rate, int per_frame_audio_samples, int audio_bitrate, const char *filename)
{
int ret;
avcodec_register_all();
av_register_all();
ret = avformat_alloc_output_context2(&outctx, NULL, "webm", filename);
if (ret < 0)
return ret;
aud_codec = avcodec_find_encoder(aud_codec_id);
avcodec_register(aud_codec);
if (!aud_codec)
return -1;
// Setup Audio Stream
aud_codec_context = avcodec_alloc_context3(aud_codec);
if (!aud_codec_context)
return -1;
/* select other audio parameters supported by the encoder */
aud_codec_context->bit_rate = audio_bitrate;
aud_codec_context->sample_rate = sample_rate;
aud_codec_context->sample_fmt = sample_fmt;
aud_codec_context->channel_layout = AV_CH_LAYOUT_STEREO;
aud_codec_context->channels = av_get_channel_layout_nb_channels(aud_codec_context->channel_layout);
aud_codec_context->codec = aud_codec;
aud_codec_context->codec_id = aud_codec_id;
AVRational time_base;
time_base.num = per_frame_audio_samples;
time_base.den = aud_codec_context->sample_rate;
aud_codec_context->time_base = time_base;
ret = avcodec_open2(aud_codec_context, aud_codec, NULL);
if (ret < 0)
return ret;
outctx->audio_codec = aud_codec;
outctx->audio_codec_id = aud_codec_id;
audio_st = avformat_new_stream(outctx, aud_codec);
avcodec_parameters_from_context(audio_st->codecpar, aud_codec_context);
conv_time_base.num = aud_codec_context->frame_size;
conv_time_base.den = aud_codec_context->sample_rate;
// Setup audio frame
aud_frame = av_frame_alloc();
aud_frame->nb_samples = aud_codec_context->frame_size;
aud_frame->format = aud_codec_context->sample_fmt;
aud_frame->channel_layout = aud_codec_context->channel_layout;
aud_frame->sample_rate = aud_codec_context->sample_rate;
int buffer_size;
if (aud_codec_context->frame_size == 0)
{
buffer_size = per_frame_audio_samples * 2 * 4;
aud_frame->nb_samples = per_frame_audio_samples;
}
else
{
buffer_size = av_samples_get_buffer_size(NULL, aud_codec_context->channels, aud_codec_context->frame_size,
aud_codec_context->sample_fmt, 0);
}
if (av_sample_fmt_is_planar(sample_fmt))
ret = av_frame_get_buffer(aud_frame, buffer_size / 2);
else
ret = av_frame_get_buffer(aud_frame, buffer_size);
if (!aud_frame || ret < 0)
return ret;
// Setup audio resampler
audio_swr_ctx = swr_alloc();
if (!audio_swr_ctx)
return -1;
/* set options */
av_opt_set_int(audio_swr_ctx, "in_channel_layout", aud_codec_context->channel_layout, 0);
av_opt_set_int(audio_swr_ctx, "in_sample_rate", sample_rate, 0);
av_opt_set_int(audio_swr_ctx, "in_frame_size", per_frame_audio_samples, 0);
av_opt_set_sample_fmt(audio_swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_FLT, 0);
av_opt_set_int(audio_swr_ctx, "out_channel_layout", aud_codec_context->channel_layout, 0);
av_opt_set_int(audio_swr_ctx, "out_sample_rate", aud_codec_context->sample_rate, 0);
av_opt_set_int(audio_swr_ctx, "out_frame_size", aud_codec_context->frame_size, 0);
av_opt_set_sample_fmt(audio_swr_ctx, "out_sample_fmt", aud_codec_context->sample_fmt, 0);
/* initialize the resampling context */
if ((ret = swr_init(audio_swr_ctx)) < 0)
{
return ret;
}
dst_rate = aud_codec_context->sample_rate;
src_rate = sample_rate;
src_nb_samples = per_frame_audio_samples;
dst_nb_samples = aud_codec_context->frame_size;
max_dst_nb_samples = av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
dst_nb_channels = av_get_channel_layout_nb_channels(aud_codec_context->channel_layout);
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels, dst_nb_samples, sample_fmt, 0);
aud_frame_counter = 0;
if (ret < 0)
return ret;
av_dump_format(outctx, 0, filename, 1);
if (!(outctx->oformat->flags & AVFMT_NOFILE))
{
ret = avio_open(&outctx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
return ret;
}
}
ret = avformat_write_header(outctx, NULL);
if (ret < 0)
return ret;
return 0;
}Encoding and ending :
int process_encode_loop(AVFormatContext *local_outctx, AVCodecContext *codec_context, AVStream *stream, AVRational time_base, bool flush)
{
int ret;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
while (true)
{
ret = avcodec_receive_packet(codec_context, &pkt);
if (!ret)
{
pkt.stream_index = stream->index;
av_packet_rescale_ts(&pkt, time_base, stream->time_base);
av_interleaved_write_frame(local_outctx, &pkt);
av_packet_unref(&pkt);
}
if (ret == AVERROR(EAGAIN))
break;
else if (ret == AVERROR_EOF)
break;
else if (ret < 0)
return ret;
else if (flush == false)
break;
}
return 0;
}
int write_audio_frame(float_t *aud_sample)
{
int ret;
if (dst_nb_samples > max_dst_nb_samples)
{
av_free(&aud_frame->data[0]);
ret = av_samples_alloc(aud_frame->data, &dst_linesize, dst_nb_channels, dst_nb_samples, sample_fmt, 1);
if (ret < 0)
return ret;
max_dst_nb_samples = dst_nb_samples;
}
ret = swr_convert(audio_swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)&aud_sample, src_nb_samples);
if (ret < 0)
{
return ret;
}
aud_frame->data[0] = (uint8_t*)dst_data[0];
aud_frame->extended_data[0] = (uint8_t*)dst_data[0];
aud_frame->pts = aud_frame_counter++;
ret = avcodec_send_frame(aud_codec_context, aud_frame);
ret = process_encode_loop(outctx, aud_codec_context, audio_st, conv_time_base, false);
if (ret < 0)
return ret;
return 0;
}
int finish_audio_encoding()
{
int ret = avcodec_send_frame(aud_codec_context, NULL);
if (ret < 0)
return ret;
ret = process_encode_loop(outctx, aud_codec_context, audio_st, conv_time_base, true);
if (ret < 0)
return ret;
av_write_trailer(outctx);
return ret;
}Main :
void fill_samples(float_t *dst, int nb_samples, int nb_channels, int sample_rate, float_t *t)
{
int i, j;
float_t tincr = 1.0 / sample_rate;
const float_t c = 2 * M_PI * 440.0;
/* generate sin tone with 440Hz frequency and duplicated channels */
for (i = 0; i < nb_samples; i++) {
*dst = sin(c * *t);
for (j = 1; j < nb_channels; j++)
dst[j] = dst[0];
dst += nb_channels;
*t += tincr;
}
}
int main()
{
int frame_rate = 30;
int sec = 12;
int bit_rate = 192000;
float t = 0;
int src_samples_linesize;
int src_nb_samples = 1024;
int src_channels = 2;
int sample_rate = 48000;
uint8_t **src_data = NULL;
int ret;
initialize(sample_rate, src_nb_samples, bit_rate, "sound_test.webm");
ret = av_samples_alloc_array_and_samples(&src_data, &src_samples_linesize, src_channels,
src_nb_samples, AV_SAMPLE_FMT_FLT, 0);
for (size_t i = 0; i < frame_rate * sec; i++)
{
fill_samples((float *)src_data[0], src_nb_samples, src_channels, sample_rate, &t);
write_audio_frame((float *)src_data[0]);
}
finish_audio_encoding();
cleanup();
return 0;
}Edit 2, This code reproduces the issue exactly and is fully self contained, if you have the ffmpeg 3.3.x libraries. It’s tried with 3.3.1 and 3.3.2 is the same results.
So what could I be missing ? I do not think something is wrong with the sample rates or any other specifications, else it would not work in VLC or an ogg file. I do think the audio stream itself if correct, just some part of the header or how the file is formatted (look further down for some EBML inspection) that is not correct.
As explained earlier, the licence with VP9+Opus in webm is why I have these specifics. And the exact problem is that I want the audio stream produced to work well when I upload it to YouTube.
Any suggestion is appreciated, thanks in Advance !
Some other things I’ve tried :
I’ve looked at the header with the "MediaInfo" app built in to MVKTool :
https://i.gyazo.com/3b29b41629a28bd526bf7637ce3f2601.png
It all looks fine to me.I’ve also inspected the raw EBML file with EBML-Viewer (https://code.google.com/archive/p/ebml-viewer/) and in there I can se some difference between the files ;
My file : https://i.gyazo.com/6fa8c540a2698a8a4d3421d363aede0a.png
File produced with ffmpeg.exe : https://i.gyazo.com/04d60e64ff3c3040ea83e98cdf507530.pngIn my file it’s "Cluster" -> "BlockGroup" -> "Block", " ?"
In the other it’s just "Cluster" -> "SimpleBlock"
And in the webm specs, it says both are supported (https://www.webmproject.org/docs/container/)But I do not know much about these specific things, just looking for anything.
-
Scale filter crashes with error when used from transcoding example
27 juin 2017, par ValiI’ve modified a bit (just to compile in c++) this code example :
https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/transcoding.c.What works : as is (null filter), a number of other filters like framerate, drawtext, ...
What doesn’t work : scale filter when scaling down.
I use the following syntax for scale ( I’ve tried many others also, same effect) :
"scale=w=iw/2 :-1"The error is : "Input picture width (240) is greater than stride (128)" where the values for width and stride depend on the input.
Misc environment info : windows, VS 2017, input example : rtsp ://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov
Any clue as to what I’m doing wrong ?
Thanks !
EDITED to add working code sample
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avfilter.lib")
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2014 Andrey Utkin
*
**** EDITED 2017 for testing (see original here: https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/transcoding.c)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* API example for demuxing, decoding, filtering, encoding and muxing
* @example transcoding.c
*/
extern "C"
{
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavfilter></libavfilter>avfiltergraph.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libavfilter></libavfilter>buffersrc.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>pixdesc.h>
}
static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;
typedef struct StreamContext {
AVCodecContext *dec_ctx;
AVCodecContext *enc_ctx;
} StreamContext;
static StreamContext *stream_ctx;
static int open_input_file(const char *filename, int& videoStreamIndex)
{
int ret;
unsigned int i;
ifmt_ctx = NULL;
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
// Just need video
videoStreamIndex = -1;
for (unsigned int i = 0; i < ifmt_ctx->nb_streams; i++)
{
if (ifmt_ctx->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
videoStreamIndex = i;
break;
}
if (videoStreamIndex < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot find video stream\n");
return videoStreamIndex;
}
stream_ctx = (StreamContext*)av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
if (!stream_ctx)
return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
// Just need video
if (i != videoStreamIndex)
continue;
AVStream *stream = ifmt_ctx->streams[i];
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
AVCodecContext *codec_ctx;
if (!dec) {
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
return AVERROR_DECODER_NOT_FOUND;
}
codec_ctx = avcodec_alloc_context3(dec);
if (!codec_ctx) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
return AVERROR(ENOMEM);
}
ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
"for stream #%u\n", i);
return ret;
}
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
/* Open decoder */
ret = avcodec_open2(codec_ctx, dec, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
stream_ctx[i].dec_ctx = codec_ctx;
}
av_dump_format(ifmt_ctx, 0, filename, 0);
return 0;
}
static int open_output_file(const char *filename, const int videoStreamIndex)
{
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *dec_ctx, *enc_ctx;
AVCodec *encoder;
int ret;
unsigned int i;
ofmt_ctx = NULL;
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
if (!ofmt_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
// Just need video
if (i != videoStreamIndex)
continue;
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
in_stream = ifmt_ctx->streams[i];
dec_ctx = stream_ctx[i].dec_ctx;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder(dec_ctx->codec_id);
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
}
enc_ctx = avcodec_alloc_context3(encoder);
if (!enc_ctx) {
av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
return AVERROR(ENOMEM);
}
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
if (encoder->pix_fmts)
enc_ctx->pix_fmt = encoder->pix_fmts[0];
else
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
/* video time_base can be set to whatever is handy and supported by encoder */
//enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
enc_ctx->time_base = dec_ctx->time_base;
/* Third parameter can be used to pass settings to encoder */
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
return ret;
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
out_stream->time_base = enc_ctx->time_base;
stream_ctx[i].enc_ctx = enc_ctx;
}
else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
return AVERROR_INVALIDDATA;
}
else {
/* if this stream must be remuxed */
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
return ret;
}
out_stream->time_base = in_stream->time_base;
}
}
av_dump_format(ofmt_ctx, 0, filename, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
return ret;
}
}
/* init muxer, write output file header */
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
return 0;
}
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
AVCodecContext *enc_ctx, const char *filter_spec)
{
char args[512];
int ret = 0;
AVFilter *buffersrc = NULL;
AVFilter *buffersink = NULL;
AVFilterContext *buffersrc_ctx = NULL;
AVFilterContext *buffersink_ctx = NULL;
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
AVFilterGraph *filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
buffersrc = avfilter_get_by_name("buffer");
buffersink = avfilter_get_by_name("buffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num,
dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
}
else {
ret = AVERROR_UNKNOWN;
goto end;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if (!outputs->name || !inputs->name) {
ret = AVERROR(ENOMEM);
goto end;
}
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Fill FilteringContext */
fctx->buffersrc_ctx = buffersrc_ctx;
fctx->buffersink_ctx = buffersink_ctx;
fctx->filter_graph = filter_graph;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static int init_filters(const int videoStreamIndex)
{
const char *filter_spec;
unsigned int i;
int ret;
filter_ctx = (FilteringContext*)av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
if (!filter_ctx)
return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
// Just video
if (i != videoStreamIndex)
continue;
filter_ctx[i].buffersrc_ctx = NULL;
filter_ctx[i].buffersink_ctx = NULL;
filter_ctx[i].filter_graph = NULL;
if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
|| ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
continue;
filter_spec = "null"; /* passthrough (dummy) filter for video */
//filter_spec = "scale=w=iw/2:-1";
// filter_spec = "drawtext=fontfile=FreeSerif.ttf: text='%{localtime}': x=w-text_w: y=0: fontsize=24: fontcolor=yellow@1.0: box=1: boxcolor=red@1.0";
// filter_spec = "drawtext=fontfile=FreeSerif.ttf :text='test': x=w-text_w: y=text_h: fontsize=24: fontcolor=yellow@1.0: box=1: boxcolor=red@1.0";
ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
stream_ctx[i].enc_ctx, filter_spec);
if (ret)
return ret;
}
return 0;
}
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame, const int videoStreamIndex) {
// Just video
if (stream_index != videoStreamIndex)
return 0;
int ret;
int got_frame_local;
AVPacket enc_pkt;
int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
(ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
if (!got_frame)
got_frame = &got_frame_local;
// av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
/* encode filtered frame */
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
filt_frame, got_frame);
av_frame_free(&filt_frame);
if (ret < 0)
return ret;
if (!(*got_frame))
return 0;
/* prepare packet for muxing */
/*enc_pkt.stream_index = stream_index;
av_packet_rescale_ts(&enc_pkt, stream_ctx[stream_index].enc_ctx->time_base, ofmt_ctx->streams[stream_index]->time_base);*/
enc_pkt.stream_index = 0;
av_packet_rescale_ts(&enc_pkt, stream_ctx[stream_index].enc_ctx->time_base, ofmt_ctx->streams[0]->time_base);
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
/* mux encoded frame */
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
return ret;
}
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index, const int videoStreamIndex)
{
// Just video, all else crashes
if (stream_index != videoStreamIndex)
return 0;
int ret;
AVFrame *filt_frame;
// av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
/* push the decoded frame into the filtergraph */
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
frame, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
return ret;
}
/* pull filtered frames from the filtergraph */
while (1) {
filt_frame = av_frame_alloc();
if (!filt_frame) {
ret = AVERROR(ENOMEM);
break;
}
// av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
filt_frame);
if (ret < 0) {
/* if no more frames for output - returns AVERROR(EAGAIN)
* if flushed and no more frames for output - returns AVERROR_EOF
* rewrite retcode to 0 to show it as normal procedure completion
*/
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
av_frame_free(&filt_frame);
break;
}
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
ret = encode_write_frame(filt_frame, stream_index, NULL, videoStreamIndex);
if (ret < 0)
break;
}
return ret;
}
static int flush_encoder(unsigned int stream_index, const int videoStreamIndex)
{
int ret;
int got_frame;
// Just video
if (stream_index != videoStreamIndex)
return 0;
if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
AV_CODEC_CAP_DELAY))
return 0;
while (1) {
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
ret = encode_write_frame(NULL, stream_index, &got_frame, videoStreamIndex);
if (ret < 0)
break;
if (!got_frame)
return 0;
}
return ret;
}
#include <vector>
int main(int argc, char **argv)
{
int ret;
AVPacket packet;
packet.data = NULL;
packet.size = 0;
AVFrame *frame = NULL;
enum AVMediaType type;
unsigned int stream_index;
unsigned int i;
int got_frame;
int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
#ifdef _DEBUG
// Hardcoded arguments
std::vector varguments;
{
varguments.push_back(argv[0]);
// Source
varguments.push_back("./big_buck_bunny_short.mp4 ");
// Destination
varguments.push_back("./big_buck_bunny_short-processed.mp4");
}
char** arguments = new char*[varguments.size()];
for (unsigned int i = 0; i < varguments.size(); i++)
{
arguments[i] = varguments[i];
}
argc = varguments.size();
argv = arguments;
#endif // _DEBUG
if (argc != 3) {
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file="file" /> <output file="file">\n", argv[0]);
return 1;
}
av_register_all();
avfilter_register_all();
int videoStreamIndex = -1;
if ((ret = open_input_file(argv[1], videoStreamIndex)) < 0)
goto end;
if ((ret = open_output_file(argv[2], videoStreamIndex)) < 0)
goto end;
if ((ret = init_filters(videoStreamIndex)) < 0)
goto end;
// Stop after a couple of frames
int framesToGet = 100;
/* read all packets */
//while (framesToGet--)
while(1)
{
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
break;
stream_index = packet.stream_index;
// I just need video
if (stream_index != videoStreamIndex) {
av_packet_unref(&packet);
continue;
}
type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
stream_index);
if (filter_ctx[stream_index].filter_graph) {
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
break;
}
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
stream_ctx[stream_index].dec_ctx->time_base);
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
avcodec_decode_audio4;
ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
&got_frame, &packet);
if (ret < 0) {
av_frame_free(&frame);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
break;
}
if (got_frame) {
frame->pts = frame->best_effort_timestamp;
ret = filter_encode_write_frame(frame, stream_index, videoStreamIndex);
av_frame_free(&frame);
if (ret < 0)
goto end;
}
else {
av_frame_free(&frame);
}
}
else {
/* remux this frame without reencoding */
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ofmt_ctx->streams[stream_index]->time_base);
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
if (ret < 0)
goto end;
}
av_packet_unref(&packet);
}
/* flush filters and encoders */
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
/* flush filter */
if (!filter_ctx[i].filter_graph)
continue;
ret = filter_encode_write_frame(NULL, i, videoStreamIndex);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
goto end;
}
/* flush encoder */
ret = flush_encoder(i, videoStreamIndex);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
goto end;
}
}
av_write_trailer(ofmt_ctx);
end:
av_packet_unref(&packet);
av_frame_free(&frame);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
// Just video
if (i != videoStreamIndex)
continue;
avcodec_free_context(&stream_ctx[i].dec_ctx);
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
avcodec_free_context(&stream_ctx[i].enc_ctx);
if (filter_ctx && filter_ctx[i].filter_graph)
avfilter_graph_free(&filter_ctx[i].filter_graph);
}
av_free(filter_ctx);
av_free(stream_ctx);
avformat_close_input(&ifmt_ctx);
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
/*if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));*/
return ret ? 1 : 0;
}
</output></vector>