
Recherche avancée
Médias (91)
-
Spitfire Parade - Crisis
15 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Wired NextMusic
14 mai 2011, par
Mis à jour : Février 2012
Langue : English
Type : Video
-
Video d’abeille en portrait
14 mai 2011, par
Mis à jour : Février 2012
Langue : français
Type : Video
-
Sintel MP4 Surround 5.1 Full
13 mai 2011, par
Mis à jour : Février 2012
Langue : English
Type : Video
-
Carte de Schillerkiez
13 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
-
Publier une image simplement
13 avril 2011, par ,
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (57)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Ajouter notes et légendes aux images
7 février 2011, parPour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
Modification lors de l’ajout d’un média
Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...) -
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page.
Sur d’autres sites (11090)
-
How can I use FFMpeg to convert videos saved in gallery to mp4 no matter what extension is chose ?
7 juin 2014, par user3587194Intent intent = new Intent(Intent.ACTION_PICK, android.provider.MediaStore.Video.Media.EXTERNAL_CONTENT_URI);
intent.setType("video/*");
startActivityForResult(intent, VIDEO_SELECTED_GALLERY);
}
}, expires);
}
});
sendBroadcast(new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE, Uri.parse("file://" +Environment.getExternalStorageDirectory())));Here is my OnActivityResults this is after the Intent happens when i choose a video from the selected gallery. Im trying to implement in FFMpeg command during the onActivityResult so i can be able to convert all video files to mp4 when choosing a video from the gallery
else if (requestCode == VIDEO_SELECTED_GALLERY) {
if (resultCode == RESULT_OK) {
try {
selectedVideo = intent.getData();
if (selectedVideo != null) {
try {
String[] filePathColumn = {
MediaStore.Video.VideoColumns.DATA,
MediaStore.Video.VideoColumns.SIZE,
MediaStore.Video.VideoColumns.DURATION,
MediaStore.Video.VideoColumns.MIME_TYPE
};
Cursor cursor = getContentResolver().query(selectedVideo, filePathColumn, null, null, null);
cursor.moveToFirst();
int columnIndex0 = cursor.getColumnIndex(filePathColumn[0]);
int columnIndex1 = cursor.getColumnIndex(filePathColumn[1]);
int columnIndex2 = cursor.getColumnIndex(filePathColumn[2]);
int columnIndex3 = cursor.getColumnIndex(filePathColumn[3]);
if (cursor.getString(columnIndex3) != null && cursor.getString(columnIndex3).compareTo("")!=0 && cursor.getString(columnIndex3).contains("wmv")) {
displayErrorAlert("Upload Alert", "You cannot upload .wmv format videos. Sorry, that file format is not supported at this time");
}
else {
float lduration = 0.0f;
String lpath = "";
if(cursor.getString(columnIndex1) != null && cursor.getString(columnIndex1).compareTo("")!=0) {
lsize = Long.parseLong(cursor.getString(columnIndex1));
size_of_file = lsize;
}
if (cursor.getString(columnIndex2) != null && cursor.getString(columnIndex2).compareTo("")!=0) {
lduration = Float.parseFloat(cursor.getString(columnIndex2));
}
if(cursor.getString(columnIndex0) != null && cursor.getString(columnIndex0).compareTo("")!= 0) {
lpath = cursor.getString(columnIndex0);
lduration = lduration / 1000;
if (lduration > 31.00) {
displayErrorAlert("Time Limit", "Duration of video is more than 30 seconds");
}
else {
path = lpath;
getVideoFileSize();
if (lsize > 26214400 && lsize < FILE_SIZE) {
AlertDialog.Builder confirm = new AlertDialog.Builder(this);
confirm.setTitle("Warning: Long Upload Time");
confirm.setMessage("This video is larger than 25MB. Uploading may take more than 5 minutes, depending on your internet speed.");
confirm.setPositiveButton("Yes", new DialogInterface.OnClickListener() {
@Override
public void onClick (DialogInterface dialog, int which) {
if (mConnectionDetector.isConnectedToInternet()) {
// TODO Auto-generated method
new S3PutObjectTask(VideoPlay.this, s3Client, code, path, size_of_file, securityCode).execute();
}
else {
mTosty.showToast(getResources().getString(R.string.internetIssue));
}
}
});
confirm.setNegativeButton("No", new DialogInterface.OnClickListener() {
public void onClick (DialogInterface dialog, int which) {
dialog.dismiss();
}
});
confirm.show().show();
}
else {
if (mConnectionDetector.isConnectedToInternet()) {
new S3PutObjectTask(VideoPlay.this, s3Client, code, path, size_of_file, securityCode).execute();
}
else {
mTosty.showToast(getResources().getString(R.string.internetIssue));
}
}
}
}
else {
if (lsize > FILE_SIZE) {
displayErrorAlert("File Size Too Big", "You can not download greater than 50mb.");
}
else if (lsize > 26214400 && lsize < FILE_SIZE) {
AlertDialog.Builder confirm = new AlertDialog.Builder(this);
confirm.setTitle("Warning: Long Download Time");
confirm.setMessage("This video is larger than 25MB. Downloading may take more than 5 minutes, depending on your internet speed.");
confirm.setPositiveButton("Yes", new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
VideoDownloaderTaskForAlbum mTask =new VideoDownloaderTaskForAlbum(VideoPlay.this,VideoPlay.this,lsize,selectedVideo);
mTask.execute(selectedVideo);
dialog.dismiss();
}
});
confirm.setNegativeButton("No", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
dialog.dismiss();
}
});
confirm.show().show();
}
else {
VideoDownloaderTaskForAlbum mTask =new VideoDownloaderTaskForAlbum(VideoPlay.this,VideoPlay.this,lsize,selectedVideo);
mTask.execute(selectedVideo);
}
}
}
}
catch(Exception e) {
e.printStackTrace();
}
}
else {
displayErrorAlert("Wrong File", "Wrong file Path");
}
}
catch (Exception e) {
Log.i("tag", e.getMessage());
}
} else if (resultCode == RESULT_CANCELED) {
mTosty.showToast("Result Canceled");
}
}
} -
Révision 19897 : Dans une requete HEAD renvoyer le vrai en-tête. Pour cela on est bien obligé de ...
11 septembre 2012, par cedric -On perd donc la rapidité de calcul des requetes HEAD au profit de leur exactitude. Dans la mesure ou HEAD représente en général moins de 1% des requetes (voire de l’ordre de 2/1000 sur un echantillon de sites en production) la perte en performance serveur est négligeable (d’autant plus qu’une (...)
-
how to stream h.264 video with mp3 audio using libavcodec ?
18 septembre 2012, par dasgI read h.264 frames from webcamera and capture audio from microphone. I need to stream live video to ffserver. During debug I read video from ffserver using ffmpeg with following command :
ffmpeg -i http://127.0.0.1:12345/robot.avi -vcodec copy -acodec copy out.avi
My video in output file is slightly accelerated. If I add a audio stream it is accelerated several times. Sometimes there is no audio in the output file.
Here is my code for encoding audio :
#include "v_audio_encoder.h"
extern "C" {
#include <libavcodec></libavcodec>avcodec.h>
}
#include <cassert>
struct VAudioEncoder::Private
{
AVCodec *m_codec;
AVCodecContext *m_context;
std::vector m_outBuffer;
};
VAudioEncoder::VAudioEncoder( int sampleRate, int bitRate )
{
d = new Private( );
d->m_codec = avcodec_find_encoder( CODEC_ID_MP3 );
assert( d->m_codec );
d->m_context = avcodec_alloc_context3( d->m_codec );
// put sample parameters
d->m_context->channels = 2;
d->m_context->bit_rate = bitRate;
d->m_context->sample_rate = sampleRate;
d->m_context->sample_fmt = AV_SAMPLE_FMT_S16;
strcpy( d->m_context->codec_name, "libmp3lame" );
// open it
int res = avcodec_open2( d->m_context, d->m_codec, 0 );
assert( res >= 0 );
d->m_outBuffer.resize( d->m_context->frame_size );
}
VAudioEncoder::~VAudioEncoder( )
{
avcodec_close( d->m_context );
av_free( d->m_context );
delete d;
}
void VAudioEncoder::encode( const std::vector& samples, std::vector& outbuf )
{
assert( (int)samples.size( ) == d->m_context->frame_size );
int outSize = avcodec_encode_audio( d->m_context, d->m_outBuffer.data( ),
d->m_outBuffer.size( ), reinterpret_cast<const>( samples.data( ) ) );
if( outSize ) {
outbuf.resize( outSize );
memcpy( outbuf.data( ), d->m_outBuffer.data( ), outSize );
}
else
outbuf.clear( );
}
int VAudioEncoder::getFrameSize( ) const
{
return d->m_context->frame_size;
}
</const></cassert>Here is my code for streaming video :
#include "v_out_video_stream.h"
extern "C" {
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>avstring.h>
#include <libavformat></libavformat>avio.h>
}
#include <stdexcept>
#include <cassert>
struct VStatticRegistrar
{
VStatticRegistrar( )
{
av_register_all( );
avformat_network_init( );
}
};
VStatticRegistrar __registrar;
struct VOutVideoStream::Private
{
AVFormatContext * m_context;
int m_videoStreamIndex;
int m_audioStreamIndex;
int m_videoBitrate;
int m_width;
int m_height;
int m_fps;
int m_bitrate;
bool m_waitKeyFrame;
};
VOutVideoStream::VOutVideoStream( int width, int height, int fps, int bitrate )
{
d = new Private( );
d->m_width = width;
d->m_height = height;
d->m_fps = fps;
d->m_context = 0;
d->m_videoStreamIndex = -1;
d->m_audioStreamIndex = -1;
d->m_bitrate = bitrate;
d->m_waitKeyFrame = true;
}
bool VOutVideoStream::connectToServer( const std::string& uri )
{
assert( ! d->m_context );
// initalize the AV context
d->m_context = avformat_alloc_context();
if( !d->m_context )
return false;
// get the output format
d->m_context->oformat = av_guess_format( "ffm", NULL, NULL );
if( ! d->m_context->oformat )
return false;
strcpy( d->m_context->filename, uri.c_str( ) );
// add an H.264 stream
AVStream *stream = avformat_new_stream( d->m_context, NULL );
if ( ! stream )
return false;
// initalize codec
AVCodecContext* codec = stream->codec;
if( d->m_context->oformat->flags & AVFMT_GLOBALHEADER )
codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
codec->codec_id = CODEC_ID_H264;
codec->codec_type = AVMEDIA_TYPE_VIDEO;
strcpy( codec->codec_name, "libx264" );
// codec->codec_tag = ( unsigned('4') << 24 ) + (unsigned('6') << 16 ) + ( unsigned('2') << 8 ) + 'H';
codec->width = d->m_width;
codec->height = d->m_height;
codec->time_base.den = d->m_fps;
codec->time_base.num = 1;
codec->bit_rate = d->m_bitrate;
d->m_videoStreamIndex = stream->index;
// add an MP3 stream
stream = avformat_new_stream( d->m_context, NULL );
if ( ! stream )
return false;
// initalize codec
codec = stream->codec;
if( d->m_context->oformat->flags & AVFMT_GLOBALHEADER )
codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
codec->codec_id = CODEC_ID_MP3;
codec->codec_type = AVMEDIA_TYPE_AUDIO;
strcpy( codec->codec_name, "libmp3lame" );
codec->sample_fmt = AV_SAMPLE_FMT_S16;
codec->channels = 2;
codec->bit_rate = 64000;
codec->sample_rate = 44100;
d->m_audioStreamIndex = stream->index;
// try to open the stream
if( avio_open( &d->m_context->pb, d->m_context->filename, AVIO_FLAG_WRITE ) < 0 )
return false;
// write the header
return avformat_write_header( d->m_context, NULL ) == 0;
}
void VOutVideoStream::disconnect( )
{
assert( d->m_context );
avio_close( d->m_context->pb );
avformat_free_context( d->m_context );
d->m_context = 0;
}
VOutVideoStream::~VOutVideoStream( )
{
if( d->m_context )
disconnect( );
delete d;
}
int VOutVideoStream::getVopType( const std::vector& image )
{
if( image.size( ) < 6 )
return -1;
unsigned char *b = (unsigned char*)image.data( );
// Verify NAL marker
if( b[ 0 ] || b[ 1 ] || 0x01 != b[ 2 ] ) {
++b;
if ( b[ 0 ] || b[ 1 ] || 0x01 != b[ 2 ] )
return -1;
}
b += 3;
// Verify VOP id
if( 0xb6 == *b ) {
++b;
return ( *b & 0xc0 ) >> 6;
}
switch( *b ) {
case 0x65: return 0;
case 0x61: return 1;
case 0x01: return 2;
}
return -1;
}
bool VOutVideoStream::sendVideoFrame( std::vector& image )
{
// Init packet
AVPacket pkt;
av_init_packet( &pkt );
pkt.flags |= ( 0 >= getVopType( image ) ) ? AV_PKT_FLAG_KEY : 0;
// Wait for key frame
if ( d->m_waitKeyFrame ) {
if( pkt.flags & AV_PKT_FLAG_KEY )
d->m_waitKeyFrame = false;
else
return true;
}
pkt.stream_index = d->m_videoStreamIndex;
pkt.data = image.data( );
pkt.size = image.size( );
pkt.pts = pkt.dts = AV_NOPTS_VALUE;
return av_write_frame( d->m_context, &pkt ) >= 0;
}
bool VOutVideoStream::sendAudioFrame( std::vector& audio )
{
// Init packet
AVPacket pkt;
av_init_packet( &pkt );
pkt.stream_index = d->m_audioStreamIndex;
pkt.data = audio.data( );
pkt.size = audio.size( );
pkt.pts = pkt.dts = AV_NOPTS_VALUE;
return av_write_frame( d->m_context, &pkt ) >= 0;
}
</cassert></stdexcept>Here is how I use it :
BOOST_AUTO_TEST_CASE(testSendingVideo)
{
const int framesToGrab = 90000;
VOutVideoStream stream( VIDEO_WIDTH, VIDEO_HEIGHT, FPS, VIDEO_BITRATE );
if( stream.connectToServer( URI ) ) {
VAudioEncoder audioEncoder( AUDIO_SAMPLE_RATE, AUDIO_BIT_RATE );
VAudioCapture microphone( MICROPHONE_NAME, AUDIO_SAMPLE_RATE, audioEncoder.getFrameSize( ) );
VLogitecCamera camera( VIDEO_WIDTH, VIDEO_HEIGHT );
BOOST_REQUIRE( camera.open( CAMERA_PORT ) );
BOOST_REQUIRE( camera.startCapturing( ) );
std::vector image, encodedAudio;
std::vector voice;
boost::system_time startTime;
int delta;
for( int i = 0; i < framesToGrab; ++i ) {
startTime = boost::posix_time::microsec_clock::universal_time( );
BOOST_REQUIRE( camera.read( image ) );
BOOST_REQUIRE( microphone.read( voice ) );
audioEncoder.encode( voice, encodedAudio );
BOOST_REQUIRE( stream.sendVideoFrame( image ) );
BOOST_REQUIRE( stream.sendAudioFrame( encodedAudio ) );
delta = ( boost::posix_time::microsec_clock::universal_time( ) - startTime ).total_milliseconds( );
if( delta < 1000 / FPS )
boost::thread::sleep( startTime + boost::posix_time::milliseconds( 1000 / FPS - delta ) );
}
BOOST_REQUIRE( camera.stopCapturing( ) );
BOOST_REQUIRE( camera.close( ) );
}
else
std::cout << "failed to connect to server" << std::endl;
}I think my problem is in PTS and DTS. Can anyone help me ?