Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (16)
-
Les formats acceptés
28 janvier 2010, parLes commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
ffmpeg -codecs ffmpeg -formats
Les format videos acceptés en entrée
Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
Les formats vidéos de sortie possibles
Dans un premier temps on (...) -
Ajouter notes et légendes aux images
7 février 2011, parPour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
Modification lors de l’ajout d’un média
Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...) -
Gestion générale des documents
13 mai 2011, parMédiaSPIP ne modifie jamais le document original mis en ligne.
Pour chaque document mis en ligne il effectue deux opérations successives : la création d’une version supplémentaire qui peut être facilement consultée en ligne tout en laissant l’original téléchargeable dans le cas où le document original ne peut être lu dans un navigateur Internet ; la récupération des métadonnées du document original pour illustrer textuellement le fichier ;
Les tableaux ci-dessous expliquent ce que peut faire MédiaSPIP (...)
Sur d’autres sites (4942)
-
MOOV atom is not being written to the output.
18 juillet 2014, par AnilJI am facing a problem where a MOOV atom is not written to the end of the file, and the file is not playable by the vlc player. Also, FFmpeg command gives me the following error.
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x2334ba0] moov atom not found
queueout/1000_wrecord.mp4: Invalid data found when processing inputIn my design, I am using an object of DataChunkQueue class to initialize the IContainer object, where it writes into this queue when it calls IContainer.WritePacket(packet) method. Finally when the recording is complete, I am flushing out this queue into a file. However, when I try to play the file, above error is thrown. When I test with the FLV file type however, I am able to playout the file correctly.
I am not sure what is the issue with the MP4 file and why it does not write the MOOV item to the end. Can anyone offer me an suggestions as to how this can be resolved ?
I am pasting below some of the code snippets for reference.
public class DataChunkQueue implements ByteChannel {
private ConcurrentLinkedQueue<datachunk> mChunkQueue = null;
private static String BASE_PATH = null;
private static String mOutputFileName = null;
private FileChannel mOutputFileChannel = null;
// constructor
public DataChunkQueue() {
mChunkQueue = new ConcurrentLinkedQueue<datachunk>();
}
@Override
public void close() throws IOException {
return;
}
@Override
public boolean isOpen() {
return true;
}
@Override
public int write(ByteBuffer buffer) throws IOException {
DataChunk vChunk = new DataChunk(buffer);
mChunkQueue.add(vChunk);
return 0;
}
public int read(ByteBuffer buffer) throws IOException {
int result = 0;
buffer = mChunkQueue.poll().GetBuffer();
if (buffer != null ) {
result = 0;
} else {
result = 1;
}
return result;
}
}
private boolean InitStreamEncoder() {
DataChunkQueue mOutQueue = null;
// Change this to change the frame rate you record at
mFrameRate = IRational.make(Constants.FRAME_RATE, 1);
// try opening a container format
mOutFormat = IContainerFormat.make();
mOutFormat.setOutputFormat(mRecordFormat, null, null);
// Initialize the output container.
mOutputContainer = IContainer.make();
int retval = mOutputContainer.open(mOutQueue, IContainer.Type.WRITE, mOutFormat);
if (retval < 0)
throw new RuntimeException("could not open data output stream buffer");
// Guess the Encoding CODEC based on the type of input file.
ICodec videoCodec = ICodec.guessEncodingCodec(null, null, ("out." + mRecordFormat), null, ICodec.Type.CODEC_TYPE_VIDEO);
if (videoCodec == null)
throw new RuntimeException("could not guess a codec");
// Initialize the encoding parameters.
mOutStream = mOutputContainer.addNewStream(videoCodec);
mOutStreamCoder = mOutStream.getStreamCoder();
mOutStreamCoder.setNumPicturesInGroupOfPictures(Constants.GOP);
mOutStreamCoder.setCodec(videoCodec);
//mOutStreamCoder.setBitRate(Constants.BITRATE);
//mOutStreamCoder.setBitRateTolerance(Constants.TOLERANCE);
mOutStreamCoder.setPixelType(IPixelFormat.Type.YUV420P);
mOutStreamCoder.setWidth(Constants.MAIN_SCREEN_WIDTH);
mOutStreamCoder.setHeight(Constants.MAIN_SCREEN_HEIGHT);
//mOutStreamCoder.setFlag(IStreamCoder.Flags.FLAG_QSCALE, true);
//mOutStreamCoder.setGlobalQuality(0);
mOutStreamCoder.setFrameRate(mFrameRate);
mOutStreamCoder.setTimeBase(IRational.make(mFrameRate.getDenominator(), mFrameRate.getNumerator()));
retval = mOutStreamCoder.open(null, null);
if (retval < 0) {
System.out.println("could not open input decoder");
return false;
}
retval = mOutputContainer.writeHeader();
if (retval < 0) {
System.out.println("could not write file header");
return false;
}
return true;
}
</datachunk></datachunk>This function is called at the very end to write the trailer.
public void Cleanup() {
if (mOutputContainer != null) {
mOutputContainer.flushPackets();
mOutputContainer.writeTrailer();
mOutputContainer.close();
}
if (mOutStreamCoder != null) {
mOutStreamCoder.close();
mOutStreamCoder = null;
}
} -
FFmpeg - capture a rtsp stream and re-stream it to another rtsp-server
15 mars 2021, par moster67I want to capture a Rtsp-stream from a Live-CAM which I then want to re-stream to another Rtsp-server.
Basically, my computer will work as a relay-server using FFMpeg.


I have tried this temporary command but I cannot get it working i.e.


ffmpeg.exe -i rtsp://InputIPAddress:554/mystream -preset medium -vcodec libx264 -tune zerolatency -f rtsp -rtsp_transport tcp rtsp://localhost:8554/mysecondstream


I have then tried, for testing purposes, using FFplay to watch the stream from localhost as follows :


ffplay rtsp://localhost:8554/mysecondstream


but no luck.


Anyone who can help me out ? Thanks.


-
Encoding .png images with h264 to a file on disk
21 février 2021, par xyfixCan somebody help me to find out why I end up with a file on disk that is only 24 kb and not readable by vlc or so, while I send valid YUV images to the codec. I have added the .h and .cpp file. Up till "avcodec_receive_packet" everything seems to be OK. The function call "avcodec_send_frame" returns 0, so that must be OK but "avcodec_receive_packet" returns -11. If I flush the encoder (currently commented) then "avcodec_receive_packet" returns 0 and I can see encoded data if I store it on disk. Also the input image to the encoder is also correct (currently commented) and checked. I'm aiming for an intra-frame encoding, so I should get the encoded frame data back, but I don't get anything back even if I send 24 images to it.


.h file


#ifndef MOVIECODEC_H
#define MOVIECODEC_H

#include 

extern "C"
{
 #include "Codec/include/libavcodec/avcodec.h"
 #include "Codec/include/libavdevice/avdevice.h"
 #include "Codec/include/libavformat/avformat.h"
 #include "Codec/include/libavutil/avutil.h"
 #include "Codec/include/libavutil/imgutils.h"
 #include "Codec/include/libswscale/swscale.h"
}


class MovieCodec
{
public:

 MovieCodec(const char *filename);

 ~MovieCodec();

 void encodeImage( const cv::Mat& image );

 void close();
 
private :

 void add_stream();

 void openVideoCodec();

 void write_video_frame(const cv::Mat& image);

 void createFrame( const cv::Mat& image );

private:

 static int s_frameCount;

 int m_timeVideo = 0;

 std::string m_filename;

 FILE* m_file;

 AVCodec* m_encoder = NULL;

 AVOutputFormat* m_outputFormat = NULL;

 AVFormatContext* m_formatCtx = NULL;

 AVCodecContext* m_codecCtx = NULL;

 AVStream* m_streamOut = NULL;

 AVFrame* m_frame = NULL;

 AVPacket* m_packet = NULL;

};


.cpp file


#ifndef MOVIECODEC_CPP
#define MOVIECODEC_CPP

#include "moviecodec.h"


#define STREAM_DURATION 5.0
#define STREAM_FRAME_RATE 24
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */


static int sws_flags = SWS_BICUBIC;
int MovieCodec::s_frameCount = 0;

MovieCodec::MovieCodec( const char* filename ) :
 m_filename( filename ),
 m_encoder( avcodec_find_encoder( AV_CODEC_ID_H264 ))
{
 av_log_set_level(AV_LOG_VERBOSE);

 int ret(0);

 m_file = fopen( m_filename.c_str(), "wb");

 // allocate the output media context
 ret = avformat_alloc_output_context2( &m_formatCtx, m_outputFormat, NULL, m_filename.c_str());

 if (!m_formatCtx)
 return;

 m_outputFormat = m_formatCtx->oformat;

 // Add the video stream using H264 codec
 add_stream();

 // Open video codec and allocate the necessary encode buffers
 if (m_streamOut)
 openVideoCodec();

 av_dump_format( m_formatCtx, 0, m_filename.c_str(), 1);

 // Open the output media file, if needed
 if (!( m_outputFormat->flags & AVFMT_NOFILE))
 {
 ret = avio_open( &m_formatCtx->pb, m_filename.c_str(), AVIO_FLAG_WRITE);

 if (ret < 0)
 {
 char error[255];
 ret = av_strerror( ret, error, 255);
 fprintf(stderr, "Could not open '%s': %s\n", m_filename.c_str(), error);
 return ;
 }
 }
 else
 {
 return;
 }

 m_formatCtx->flush_packets = 1;

 ret = avformat_write_header( m_formatCtx, NULL );

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error occurred when opening output file: %s\n", error);
 return;
 }


 if ( m_frame )
 m_frame->pts = 0;
}



MovieCodec::~MovieCodec()
{
 close();
}



void MovieCodec::encodeImage(const cv::Mat &image)
{
 // Compute video time from last added video frame
 m_timeVideo = (double)m_frame->pts) * av_q2d(m_streamOut->time_base);

 // Stop media if enough time
 if (!m_streamOut /*|| m_timeVideo >= STREAM_DURATION*/)
 return;

 // Add a video frame
 write_video_frame( image );

 // Increase frame pts according to time base
 m_frame->pts += av_rescale_q(1, m_codecCtx->time_base, m_streamOut->time_base);
}


void MovieCodec::close()
{
 int ret( 0 );

 // Write media trailer
// if( m_formatCtx )
// ret = av_write_trailer( m_formatCtx );

 /* flush the encoder */
 ret = avcodec_send_frame(m_codecCtx, NULL);

 /* Close each codec. */
 if ( m_streamOut )
 {
 av_free( m_frame->data[0]);
 av_free( m_frame );
 }

 if (!( m_outputFormat->flags & AVFMT_NOFILE))
 /* Close the output file. */
 ret = avio_close( m_formatCtx->pb);


 /* free the stream */
 avformat_free_context( m_formatCtx );

 fflush( m_file );
}


void MovieCodec::createFrame( const cv::Mat& image )
{
 /**
 * \note allocate frame
 */
 m_frame = av_frame_alloc();
 m_frame->format = STREAM_PIX_FMT;
 m_frame->width = image.cols();
 m_frame->height = image.rows();
 m_frame->pict_type = AV_PICTURE_TYPE_I;
 int ret = av_image_alloc(m_frame->data, m_frame->linesize, m_frame->width, m_frame->height, STREAM_PIX_FMT, 1);

 if (ret < 0)
 {
 return;
 }

 struct SwsContext* sws_ctx = sws_getContext((int)image.cols(), (int)image.rows(), AV_PIX_FMT_RGB24,
 (int)image.cols(), (int)image.rows(), STREAM_PIX_FMT, 0, NULL, NULL, NULL);

 const uint8_t* rgbData[1] = { (uint8_t* )image.getData() };
 int rgbLineSize[1] = { 3 * image.cols() };

 sws_scale(sws_ctx, rgbData, rgbLineSize, 0, image.rows(), m_frame->data, m_frame->linesize);
}


/* Add an output stream. */
void MovieCodec::add_stream()
{
 AVCodecID codecId = AV_CODEC_ID_H264;

 if (!( m_encoder ))
 {
 fprintf(stderr, "Could not find encoder for '%s'\n",
 avcodec_get_name(codecId));
 return;
 }

 // Get the stream for codec
 m_streamOut = avformat_new_stream(m_formatCtx, m_encoder);

 if (!m_streamOut) {
 fprintf(stderr, "Could not allocate stream\n");
 return;
 }

 m_streamOut->id = m_formatCtx->nb_streams - 1;

 m_codecCtx = avcodec_alloc_context3( m_encoder);

 m_streamOut->codecpar->codec_id = codecId;
 m_streamOut->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 m_streamOut->codecpar->bit_rate = 400000;
 m_streamOut->codecpar->width = 800;
 m_streamOut->codecpar->height = 640;
 m_streamOut->codecpar->format = STREAM_PIX_FMT;
 m_streamOut->codecpar->codec_tag = 0x31637661;
 m_streamOut->codecpar->video_delay = 0;
 m_streamOut->time_base = { 1, STREAM_FRAME_RATE };


 avcodec_parameters_to_context( m_codecCtx, m_streamOut->codecpar);
 
 m_codecCtx->gop_size = 0; /* emit one intra frame every twelve frames at most */
 m_codecCtx->max_b_frames = 0;
 m_codecCtx->time_base = { 1, STREAM_FRAME_RATE };
 m_codecCtx->framerate = { STREAM_FRAME_RATE, 1 };
 m_codecCtx->pix_fmt = STREAM_PIX_FMT;



 if (m_streamOut->codecpar->codec_id == AV_CODEC_ID_H264)
 {
 av_opt_set( m_codecCtx, "preset", "ultrafast", 0 );
 av_opt_set( m_codecCtx, "vprofile", "baseline", 0 );
 av_opt_set( m_codecCtx, "tune", "zerolatency", 0 );
 }

// /* Some formats want stream headers to be separate. */
// if (m_formatCtx->oformat->flags & AVFMT_GLOBALHEADER)
// m_codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


}


void MovieCodec::openVideoCodec()
{
 int ret;

 /* open the codec */
 ret = avcodec_open2(m_codecCtx, m_encoder, NULL);

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Could not open video codec: %s\n", error);
 }
}



void MovieCodec::write_video_frame( const cv::Mat& image )
{
 int ret;

 createFrame( image );


 if (m_formatCtx->oformat->flags & 0x0020 )
 {
 /* Raw video case - directly store the picture in the packet */
 AVPacket pkt;
 av_init_packet(&pkt);

 pkt.flags |= AV_PKT_FLAG_KEY;
 pkt.stream_index = m_streamOut->index;
 pkt.data = m_frame->data[0];
 pkt.size = sizeof(AVPicture);

// ret = av_interleaved_write_frame(m_formatCtx, &pkt);
 ret = av_write_frame( m_formatCtx, &pkt );
 }
 else
 {
 AVPacket pkt;
 av_init_packet(&pkt);

 /* encode the image */

//cv::Mat yuv420p( m_frame->height + m_frame->height/2, m_frame->width, CV_8UC1, m_frame->data[0]);
//cv::Mat cvmIm;
//cv::cvtColor(yuv420p,cvmIm,CV_YUV420p2BGR);
//cv::imwrite("c:\\tmp\\YUVoriginal.png", cvmIm);

 ret = avcodec_send_frame(m_codecCtx, m_frame);

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error encoding video frame: %s\n", error);
 return;
 }

 /* If size is zero, it means the image was buffered. */
// ret = avcodec_receive_packet(m_codecCtx, &pkt);

 do
 {
 ret = avcodec_receive_packet(m_codecCtx, &pkt);

 if (ret == 0)
 {
 ret = av_write_frame( m_formatCtx, &pkt );
 av_packet_unref(&pkt);

 break;
 }
// else if ((ret < 0) && (ret != AVERROR(EAGAIN)))
// {
// return;
// }
// else if (ret == AVERROR(EAGAIN))
// {
// /* flush the encoder */
// ret = avcodec_send_frame(m_codecCtx, NULL);
//
// if (0 > ret)
// return;
// }
 } while (ret == 0);

 if( !ret && pkt.size)
 {
 pkt.stream_index = m_streamOut->index;

 /* Write the compressed frame to the media file. */
// ret = av_interleaved_write_frame(m_formatCtx, &pkt);
 ret = av_write_frame( m_formatCtx, &pkt );
 }
 else
 {
 ret = 0;
 }
 }

 if (ret != 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error while writing video frame: %s\n", error);
 return;
 }

 s_frameCount++;
}