
Recherche avancée
Autres articles (86)
-
Mise à jour de la version 0.1 vers 0.2
24 juin 2013, parExplications des différents changements notables lors du passage de la version 0.1 de MediaSPIP à la version 0.3. Quelles sont les nouveautés
Au niveau des dépendances logicielles Utilisation des dernières versions de FFMpeg (>= v1.2.1) ; Installation des dépendances pour Smush ; Installation de MediaInfo et FFprobe pour la récupération des métadonnées ; On n’utilise plus ffmpeg2theora ; On n’installe plus flvtool2 au profit de flvtool++ ; On n’installe plus ffmpeg-php qui n’est plus maintenu au (...) -
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...)
Sur d’autres sites (11995)
-
H264 - how to interpret encoded video data [closed]
30 décembre 2024, par Ragdoll CarI would like to know how to interpret encoded video data by H.264 codec e.g. FFmpeg's
AVPacket.data
.

Are there any good resources about that to study ?


I've been looking for the Internet, but I didn't find any documentation about H.264 format.


-
Decoding an MKA audio file into raw data (MKA Audio to raw audio data)
9 octobre 2020, par bbddMy task is to open an existing audio file with the
mka
extension (Matroska container) and extract the raw audio data. This example shows only an example of extracting raw data from anmp2
file. I do not know how to do this with themka
container. I would like to have something like this :

UPD


I found an option to save audio data in the format in which it was recorded in the audio file. An example is shown below.


PS. This is only a test version and most likely there are memory leaks and other problems.



#include <qfile>
#include <qdebug>
#include "audiodecoder.h"

int main(int argc, char* argv[])
{
 AudioDecoder decoder("/home/test/test.mka");
 bool started = decoder.start();
 if (!started) {
 return EXIT_FAILURE;
 }

 QFile file("/home/test/rawData.bin");
 file.open(QIODevice::WriteOnly);

 while (true) {
 auto data = decoder.getData(255);
 if (data.isEmpty()) {
 break;
 }
 file.write(data.data(), data.size());
 }
 file.close();
 return EXIT_SUCCESS;
}

</qdebug></qfile>


audiodecoder.h


class AudioDecoder {
public:
 AudioDecoder(const QString& fileName);
 AudioDecoder& operator=(const AudioDecoder& rhs) = delete;
 AudioDecoder& operator=(AudioDecoder&& rhs) = delete;
 AudioDecoder(const AudioDecoder& rhs) = delete;
 AudioDecoder(AudioDecoder&& rhs) = delete;
 virtual ~AudioDecoder(void);

 virtual bool start(void) noexcept;
 virtual QByteArray getData(const quint16& size) noexcept;
 virtual bool stop(void) noexcept;

protected:
 bool m_initialized;
 QString m_fileName;

 AVFrame* p_frame = nullptr;
 AVPacket* p_packet = nullptr;
 AVCodecContext* p_cdcCtx = nullptr;
 AVFormatContext* p_frmCtx = nullptr;
};



audiodecoder.cpp



static void logging(const char* message)
{
 qDebug() << message;
}

AudioDecoder::AudioDecoder(const QString& fileName)
 : m_initialized(false)
 , m_fileName(fileName)
 , p_cdcCtx(nullptr)
 , p_frmCtx(nullptr)
{
 av_register_all();
}

QByteArray AudioDecoder::getData(const quint16& dataSize) noexcept
{
 QByteArray data;
 qint32 response = 0;
 if (av_read_frame(p_frmCtx, p_packet) >= 0) {
 //logging(QString("AVPacket->pts %1").arg(p_packet->pts).toStdString().c_str());
 //response = decode_packet(p_packet, p_cdcCtx, p_frame);
 response = avcodec_send_packet(p_cdcCtx, p_packet);
 if (response < 0) {
 logging("Error while sending a packet to the decoder");
 return {};
 }
 while (response >= 0) {
 response = avcodec_receive_frame(p_cdcCtx, p_frame);
 if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
 break;
 }
 else if (response < 0) {
 logging("Error while receiving a frame from the decoder");
 return {};
 }
 if (response >= 0) {
 logging(QString("Frame %1 (type=%2, size=%3 bytes) pts %4 key_frame %5 [DTS %6], duration[%7]")
 .arg(p_cdcCtx->frame_number)
 .arg(av_get_picture_type_char(p_frame->pict_type))
 .arg(p_frame->pkt_size)
 .arg(p_frame->pts)
 .arg(p_frame->key_frame)
 .arg(p_frame->coded_picture_number)
 .arg(p_frame->pkt_duration)
 .toStdString()
 .c_str());

 for (int i = 0; i < p_frame->linesize[0]; ++i) {
 data.push_back(p_frame->data[0][i]);
 }
 }
 }
 av_packet_unref(p_packet);
 return data;
 }
 return {};
}

bool AudioDecoder::start(void) noexcept
{
 if (m_initialized) {
 return true;
 }

 int error;
 // Open the input file to read from it.
 if ((error = avformat_open_input(&p_frmCtx,
 m_fileName.toStdString().c_str(), nullptr, nullptr))
 < 0) {
 qDebug() << "Could not open input file: " << m_fileName;
 p_frmCtx = nullptr;
 return false;
 }
 // Get information on the input file (number of streams etc.).
 if ((error = avformat_find_stream_info(p_frmCtx, nullptr)) < 0) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }
 // Make sure that there is only one stream in the input file.
 if ((p_frmCtx)->nb_streams != 1) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }

 if (p_frmCtx->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }

 // Find a decoder for the audio stream.
 AVCodec* input_codec = nullptr;
 if (!(input_codec = avcodec_find_decoder((p_frmCtx)->streams[0]->codecpar->codec_id))) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }
 // Allocate a new decoding context.
 AVCodecContext* avctx = avcodec_alloc_context3(input_codec);
 if (!avctx) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }
 // Initialize the stream parameters with demuxer information.
 error = avcodec_parameters_to_context(avctx, (p_frmCtx)->streams[0]->codecpar);
 if (error < 0) {
 avformat_close_input(&p_frmCtx);
 avcodec_free_context(&avctx);
 qDebug() << __LINE__;
 return false;
 }
 /* Open the decoder for the audio stream to use it later. */
 if ((error = avcodec_open2(avctx, input_codec, NULL)) < 0) {
 avcodec_free_context(&avctx);
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }
 /* Save the decoder context for easier access later. */
 p_cdcCtx = avctx;
 av_dump_format(p_frmCtx, 0, m_fileName.toStdString().c_str(), 0);

 p_frame = av_frame_alloc();
 if (!p_frame) {
 logging("failed to allocated memory for AVFrame");
 return false;
 }
 p_packet = av_packet_alloc();
 if (!p_packet) {
 logging("failed to allocated memory for AVPacket");
 return false;
 }
 return m_initialized = true;
}

bool AudioDecoder::stop(void) noexcept
{
 if (p_cdcCtx != nullptr) {
 avcodec_free_context(&p_cdcCtx);
 }
 if (p_frmCtx != nullptr) {
 avformat_close_input(&p_frmCtx);
 }
 return true;
}

AudioDecoder::~AudioDecoder(void)
{
 stop();
}



But the problem in this example is that I didn't implement the ability to get exactly the requested size of audio data. In my case, it's just ignored.


-
Is there any way to extract RTP extension header data using ffmpeg functions ?
8 mai 2016, par kostylI want to extract RTP extension header data while reading ffmpeg packets using
int av_read_frame(AVFormatContext *s, AVPacket *pkt);
But seems that ffmpeg skips RTP extension header data while creatingAVPacket
data (link to code ). ffmpeg makesAVPackets
fromRTPPacket
data. So probably there is a way to get currentRTPPacket
after or before callingav_read_frame
? ... or probably somebody knows another way ?