
Recherche avancée
Médias (1)
-
Géodiversité
9 septembre 2011, par ,
Mis à jour : Août 2018
Langue : français
Type : Texte
Autres articles (53)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Use, discuss, criticize
13 avril 2011, parTalk to people directly involved in MediaSPIP’s development, or to people around you who could use MediaSPIP to share, enhance or develop their creative projects.
The bigger the community, the more MediaSPIP’s potential will be explored and the faster the software will evolve.
A discussion list is available for all exchanges between users. -
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)
Sur d’autres sites (7460)
-
C++ ffmpeg - export to wav error : Invalid PCM packet, data has size 2 but at least a size of 4 was expected
9 septembre 2024, par Chris PC++ code :


AudioSegment AudioSegment::from_file(const std::string& file_path, const std::string& format, const std::string& codec,
 const std::map& parameters, int start_second, int duration) {

 avformat_network_init();
 av_log_set_level(AV_LOG_ERROR); // Adjust logging level as needed

 AVFormatContext* format_ctx = nullptr;
 if (avformat_open_input(&format_ctx, file_path.c_str(), nullptr, nullptr) != 0) {
 std::cerr << "Error: Could not open audio file." << std::endl;
 return AudioSegment(); // Return an empty AudioSegment on failure
 }

 if (avformat_find_stream_info(format_ctx, nullptr) < 0) {
 std::cerr << "Error: Could not find stream information." << std::endl;
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 int audio_stream_index = -1;
 for (unsigned int i = 0; i < format_ctx->nb_streams; i++) {
 if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
 audio_stream_index = i;
 break;
 }
 }

 if (audio_stream_index == -1) {
 std::cerr << "Error: Could not find audio stream." << std::endl;
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 AVCodecParameters* codec_par = format_ctx->streams[audio_stream_index]->codecpar;
 const AVCodec* my_codec = avcodec_find_decoder(codec_par->codec_id);
 AVCodecContext* codec_ctx = avcodec_alloc_context3(my_codec);

 if (!codec_ctx) {
 std::cerr << "Error: Could not allocate codec context." << std::endl;
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 if (avcodec_parameters_to_context(codec_ctx, codec_par) < 0) {
 std::cerr << "Error: Could not initialize codec context." << std::endl;
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 if (avcodec_open2(codec_ctx, my_codec, nullptr) < 0) {
 std::cerr << "Error: Could not open codec." << std::endl;
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 SwrContext* swr_ctx = swr_alloc();
 if (!swr_ctx) {
 std::cerr << "Error: Could not allocate SwrContext." << std::endl;
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }
 codec_ctx->sample_rate = 44100;
 // Set up resampling context to convert to S16 format with 2 bytes per sample
 av_opt_set_chlayout(swr_ctx, "in_chlayout", &codec_ctx->ch_layout, 0);
 av_opt_set_int(swr_ctx, "in_sample_rate", codec_ctx->sample_rate, 0);
 av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", codec_ctx->sample_fmt, 0);

 AVChannelLayout dst_ch_layout;
 av_channel_layout_copy(&dst_ch_layout, &codec_ctx->ch_layout);
 av_channel_layout_uninit(&dst_ch_layout);
 av_channel_layout_default(&dst_ch_layout, 2);

 av_opt_set_chlayout(swr_ctx, "out_chlayout", &dst_ch_layout, 0);
 av_opt_set_int(swr_ctx, "out_sample_rate", codec_ctx->sample_rate, 0); // Match input sample rate
 av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); // Force S16 format

 if (swr_init(swr_ctx) < 0) {
 std::cerr << "Error: Failed to initialize the resampling context" << std::endl;
 swr_free(&swr_ctx);
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 AVPacket packet;
 AVFrame* frame = av_frame_alloc();
 if (!frame) {
 std::cerr << "Error: Could not allocate frame." << std::endl;
 swr_free(&swr_ctx);
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 std::vector<char> output;
 while (av_read_frame(format_ctx, &packet) >= 0) {
 if (packet.stream_index == audio_stream_index) {
 if (avcodec_send_packet(codec_ctx, &packet) == 0) {
 while (avcodec_receive_frame(codec_ctx, frame) == 0) {
 if (frame->pts != AV_NOPTS_VALUE) {
 frame->pts = av_rescale_q(frame->pts, codec_ctx->time_base, format_ctx->streams[audio_stream_index]->time_base);
 }

 uint8_t* output_buffer;
 int output_samples = av_rescale_rnd(
 swr_get_delay(swr_ctx, codec_ctx->sample_rate) + frame->nb_samples,
 codec_ctx->sample_rate, codec_ctx->sample_rate, AV_ROUND_UP);

 int output_buffer_size = av_samples_get_buffer_size(
 nullptr, 2, output_samples, AV_SAMPLE_FMT_S16, 1);

 output_buffer = (uint8_t*)av_malloc(output_buffer_size);

 if (output_buffer) {
 memset(output_buffer, 0, output_buffer_size); // Zero padding to avoid random noise
 int converted_samples = swr_convert(swr_ctx, &output_buffer, output_samples,
 (const uint8_t**)frame->extended_data, frame->nb_samples);

 if (converted_samples >= 0) {
 output.insert(output.end(), output_buffer, output_buffer + output_buffer_size);
 }
 else {
 std::cerr << "Error: Failed to convert audio samples." << std::endl;
 }
 // Make sure output_buffer is valid before freeing
 if (output_buffer != nullptr) {
 av_free(output_buffer);
 output_buffer = nullptr; // Prevent double-free
 }
 }
 else {
 std::cerr << "Error: Could not allocate output buffer." << std::endl;
 }
 }
 }
 else {
 std::cerr << "Error: Failed to send packet to codec context." << std::endl;
 }
 }
 av_packet_unref(&packet);
 }

 int frame_width = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * 2; // Use 2 bytes per sample and 2 channels

 std::map metadata = {
 {"sample_width", 2}, // S16 format has 2 bytes per sample
 {"frame_rate", codec_ctx->sample_rate}, // Use the input sample rate
 {"channels", 2}, // Assuming stereo output
 {"frame_width", frame_width}
 };

 av_frame_free(&frame);
 swr_free(&swr_ctx);
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);

 return AudioSegment(static_cast<const>(output.data()), output.size(), metadata);
}

std::ofstream AudioSegment::export_segment_to_wav_file(const std::string& out_f) {
 std::cout << this->get_channels() << std::endl;
 av_log_set_level(AV_LOG_ERROR);
 AVCodecContext* codec_ctx = nullptr;
 AVFormatContext* format_ctx = nullptr;
 AVStream* stream = nullptr;
 AVFrame* frame = nullptr;
 AVPacket* pkt = nullptr;
 int ret;

 // Initialize format context for WAV
 if (avformat_alloc_output_context2(&format_ctx, nullptr, "wav", out_f.c_str()) < 0) {
 throw std::runtime_error("Could not allocate format context.");
 }

 // Find encoder for PCM
 const AVCodec* codec_ptr = avcodec_find_encoder(AV_CODEC_ID_PCM_S16LE);
 if (!codec_ptr) {
 throw std::runtime_error("PCM encoder not found.");
 }

 // Add stream
 stream = avformat_new_stream(format_ctx, codec_ptr);
 if (!stream) {
 throw std::runtime_error("Failed to create new stream.");
 }

 // Allocate codec context
 codec_ctx = avcodec_alloc_context3(codec_ptr);
 if (!codec_ctx) {
 throw std::runtime_error("Could not allocate audio codec context.");
 }

 // Set codec parameters for PCM
 codec_ctx->bit_rate = 128000; // Bitrate
 codec_ctx->sample_rate = this->get_frame_rate(); // Use correct sample rate
 codec_ctx->ch_layout.nb_channels = this->get_channels(); // Set the correct channel count

 // Set the channel layout: stereo or mono
 if (this->get_channels() == 2) {
 av_channel_layout_default(&codec_ctx->ch_layout, 2); // Stereo layout
 }
 else {
 av_channel_layout_default(&codec_ctx->ch_layout, 1); // Mono layout
 }

 codec_ctx->sample_fmt = AV_SAMPLE_FMT_S16; // PCM 16-bit format

 // Open codec
 if (avcodec_open2(codec_ctx, codec_ptr, nullptr) < 0) {
 throw std::runtime_error("Could not open codec.");
 }

 // Set codec parameters to the stream
 if (avcodec_parameters_from_context(stream->codecpar, codec_ctx) < 0) {
 throw std::runtime_error("Could not initialize stream codec parameters.");
 }

 // Open output file
 std::ofstream out_file(out_f, std::ios::binary);
 if (!out_file) {
 throw std::runtime_error("Failed to open output file.");
 }

 if (!(format_ctx->oformat->flags & AVFMT_NOFILE)) {
 if (avio_open(&format_ctx->pb, out_f.c_str(), AVIO_FLAG_WRITE) < 0) {
 throw std::runtime_error("Could not open output file.");
 }
 }

 // Write file header
 if (avformat_write_header(format_ctx, nullptr) < 0) {
 throw std::runtime_error("Error occurred when writing file header.");
 }

 // Initialize packet
 pkt = av_packet_alloc();
 if (!pkt) {
 throw std::runtime_error("Could not allocate AVPacket.");
 }

 // Initialize frame
 frame = av_frame_alloc();
 if (!frame) {
 throw std::runtime_error("Could not allocate AVFrame.");
 }

 // Set the frame properties
 frame->format = codec_ctx->sample_fmt;
 frame->ch_layout = codec_ctx->ch_layout;

 // Number of audio samples available in the data buffer
 int total_samples = data_.size() / (av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * codec_ctx->ch_layout.nb_channels);
 int samples_read = 0;

 // Set the number of samples per frame dynamically based on the input data
 while (samples_read < total_samples) {
 // Determine how many samples to read in this iteration (don't exceed the total sample count)
 int num_samples = std::min(codec_ctx->frame_size, total_samples - samples_read);
 if (num_samples == 0) {
 num_samples = 1024;
 codec_ctx->frame_size = 1024;
 }
 // Ensure num_samples is not zero
 if (num_samples <= 0) {
 throw std::runtime_error("Invalid number of samples in frame.");
 }

 // Set the number of samples in the frame
 frame->nb_samples = num_samples;

 // Allocate the frame buffer based on the number of samples
 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 std::cerr << "Error allocating frame buffer: " << ret << std::endl;
 throw std::runtime_error("Could not allocate audio data buffers.");
 }

 // Copy the audio data into the frame's buffer (interleaving if necessary)
 /*if (codec_ctx->ch_layout.nb_channels == 2) {
 // If stereo, interleave planar data into packed format
 for (int i = 0; i < num_samples; ++i) {
 ((int16_t*)frame->data[0])[2 * i] = ((int16_t*)data_.data())[i]; // Left channel
 ((int16_t*)frame->data[0])[2 * i + 1] = ((int16_t*)data_.data())[total_samples + i]; // Right channel
 }
 }
 else {
 // For mono or packed data, directly copy the samples
 std::memcpy(frame->data[0], data_.data() + samples_read * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * codec_ctx->ch_layout.nb_channels,
 num_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * codec_ctx->ch_layout.nb_channels);
 }
 */
 std::memcpy(frame->data[0], data_.data() + samples_read * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * codec_ctx->ch_layout.nb_channels,
 num_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * codec_ctx->ch_layout.nb_channels);

 // Send the frame for encoding
 ret = avcodec_send_frame(codec_ctx, frame);
 if (ret < 0) {
 std::cerr << "Error sending frame for encoding: " << ret << std::endl;
 throw std::runtime_error("Error sending frame for encoding.");
 }

 // Receive and write encoded packets
 while (ret >= 0) {
 ret = avcodec_receive_packet(codec_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
 break;
 }
 else if (ret < 0) {
 throw std::runtime_error("Error during encoding.");
 }

 out_file.write(reinterpret_cast(pkt->data), pkt->size);
 av_packet_unref(pkt);
 }

 samples_read += num_samples;
 }

 // Flush the encoder
 if (avcodec_send_frame(codec_ctx, nullptr) < 0) {
 throw std::runtime_error("Error flushing the encoder.");
 }

 while (avcodec_receive_packet(codec_ctx, pkt) >= 0) {
 out_file.write(reinterpret_cast(pkt->data), pkt->size);
 av_packet_unref(pkt);
 }

 // Write file trailer
 av_write_trailer(format_ctx);

 // Cleanup
 av_frame_free(&frame);
 av_packet_free(&pkt);
 avcodec_free_context(&codec_ctx);

 if (!(format_ctx->oformat->flags & AVFMT_NOFILE)) {
 avio_closep(&format_ctx->pb);
 }
 avformat_free_context(format_ctx);

 out_file.close();
 return out_file;
}

</const></char>


Run code :


#include "audio_segment.h"
#include "effects.h"
#include "playback.h"
#include "cppaudioop.h"
#include "exceptions.h"
#include "generators.h"
#include "silence.h"
#include "utils.h"

#include <iostream>
#include <filesystem>

using namespace cppdub;

int main() {
 try {
 // Load the source audio file
 AudioSegment seg_1 = AudioSegment::from_file("../data/test10.mp3");
 std::string out_file_name = "ah-ah-ah.wav";

 // Export the audio segment to a new file with specified settings
 //seg_1.export_segment(out_file_name, "mp3");
 seg_1.export_segment_to_wav_file(out_file_name);


 // Optionally play the audio segment to verify
 // play(seg_1);

 // Load the exported audio file
 AudioSegment seg_2 = AudioSegment::from_file(out_file_name);

 // Play segments
 //play(seg_1);
 play(seg_2);
 }
 catch (const std::exception& e) {
 std::cerr << "An error occurred: " << e.what() << std::endl;
 }

 return 0;
}
</filesystem></iostream>


Error in second call of from_file function :


[pcm_s16le @ 000002d82ca5bfc0] Invalid PCM packet, data has size 2 but at least a size of 4 was expected


The process continue, i call hear the seg_2 with play(seg_2) call, but i can't directly play seg_2 export wav file (from windows explorer).


I had a guess that error may be because packed vs plannar formats missmatch but i am not quit sure. Maybe a swr_convert is necessary.


-
Stack AVFrame side by side (libav/ffmpeg)
22 février 2018, par dronemastersagaSo I am trying to combine two H264 livestreams of 1920x1080 resolution side-by-side to a livestream of 3840x1080 resolution.
For this, I can decode streams to AVFrames in libav/FFmpeg that I would like to combine into a bigger frame. The Input AVFrames : Two 1920x1080 frames in NV12 format (description : planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V))
The way I have figured out is with colorspace conversion (YUV to BGR) in libav, then to change it to OpenCV Mat, then to use hconcat in OpenCV to stack together, then colorspace conversion (BGR to YUV) in AVFormat.
Below is the method currently being used :
//Prior code is too long: Basically it decodes 2 streams to AVFrames frame1 and frame2 in a loop
sws_scale(swsContext, (const uint8_t *const *) frame1->data, frame1->linesize, 0, 1080, (uint8_t *const *) frameBGR1->data, frameBGR1->linesize);
sws_scale(swsContext, (const uint8_t *const *) frame2->data, frame2->linesize, 0, 1080, (uint8_t *const *) frameBGR2->data, frameBGR2->linesize);
Mat matFrame1(1080, 1920, CV_8UC3, frameBGR1->data[0], (size_t) frameBGR1->linesize[0]);
Mat matFrame2(1080, 1920, CV_8UC3, frameBGR2->data[0], (size_t) frameBGR2->linesize[0]);
Mat fullFrame;
hconcat(matFrame1, matFrame2, fullFrame);
const int stride[] = { static_cast<int>(fullFrame.step[0]) };
sws_scale(modifyContext, (const uint8_t * const *)&fullFrame.data, stride, 0, fullFrame.rows, newFrame->data, newFrame->linesize);
//From here, newFrame is sent to the encoder
</int>The resulting image is satisfactory but it does lose quality in colorspace conversion. However this method is too slow to use (I’m at 15 fps and I need 30). Is there a way to stack AVFrames directly without colorspace conversion ? Or is there any better way to do this ? I searched a lot about this and I couldn’t find any solution to this. Please advise.
-
libavcodec audio decoding is producing garbeld samples
14 novembre 2024, par user28288805I'm trying to write a function that extracts the raw sample data from audio files. But when debugging with a test file I found that the samples in floating point planar format were not in range of -1.0f to 1.0f as specified in the documentation.


Here is the function :


AudioResource::ReturnCode AudioResource::LoadFromFile(std::string FilePath)
{
 std::string FileURL = "file:" + FilePath;
 AVFormatContext* FormatContext = nullptr;
 int Error = avformat_open_input(&FormatContext, FileURL.c_str(), nullptr, nullptr);

 if (Error < 0)
 {
 return ERROR_OPENING_FILE;
 }

 Error = avformat_find_stream_info(FormatContext, nullptr);
 if (Error < 0)
 {
 return ERROR_FINDING_STREAM_INFO;
 }

 int AudioStream = -1;
 AVCodecParameters* CodecParams;
 for (int i = 0; i < FormatContext->nb_streams; i++)
 {
 if (FormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
 {
 CodecParams = FormatContext->streams[i]->codecpar;
 AudioStream = i;
 }
 }

 if (AudioStream == -1)
 {
 return ERROR_AUDIO_STREAM_NOT_FOUND;
 }

 SampleRate = FormatContext->streams[AudioStream]->codecpar->sample_rate;
 AVSampleFormat SampleFormat = (AVSampleFormat) FormatContext->streams[AudioStream]->codecpar->format;
 Channels = FormatContext->streams[AudioStream]->codecpar->ch_layout.nb_channels;

 if (Channels > 2)
 {
 return ERROR_UNSUPPORTED_CHANNEL_COUNT;
 }

 switch (SampleFormat)
 {
 case AV_SAMPLE_FMT_NONE:
 return ERROR_UNKWON_SAMPLE_FORMAT;
 break;
 case AV_SAMPLE_FMT_U8:
 BytesPerSample = 1;
 if (Channels == 1)
 {
 SampleType == MONO_8BIT;
 }
 else
 {
 SampleType = STEREO_8BIT;
 }
 break;
 case AV_SAMPLE_FMT_S16:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_S32:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType= STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_FLT:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_DBL:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_U8P:
 BytesPerSample = 1;
 if (Channels == 1)
 {
 SampleType = MONO_8BIT;
 }
 else
 {
 SampleType = STEREO_8BIT;
 }
 break;
 case AV_SAMPLE_FMT_S16P:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_S32P:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_FLTP:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_DBLP:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_S64:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 case AV_SAMPLE_FMT_S64P:
 BytesPerSample = 2;
 if (Channels == 1)
 {
 SampleType = MONO_16BIT;
 }
 else
 {
 SampleType = STEREO_16BIT;
 }
 break;
 default:
 return ERROR_UNKWON_SAMPLE_FORMAT;
 break;
 }

 const AVCodec* AudioCodec = avcodec_find_decoder(CodecParams->codec_id);
 AVCodecContext* CodecContext = avcodec_alloc_context3(AudioCodec);
 avcodec_parameters_to_context(CodecContext, CodecParams);
 avcodec_open2(CodecContext, AudioCodec, nullptr);

 AVPacket* CurrentPacket = av_packet_alloc();
 AVFrame* CurrentFrame = av_frame_alloc();

 while (av_read_frame(FormatContext, CurrentPacket) >= 0)
 {
 avcodec_send_packet(CodecContext, CurrentPacket);
 for (;;)
 {
 Error = avcodec_receive_frame(CodecContext, CurrentFrame);
 if ((Error == AVERROR(EAGAIN)) || (Error == AVERROR_EOF))
 {
 break;
 }
 else if (Error == AVERROR(EINVAL))
 {
 return ERROR_RECIVING_FRAME;
 }
 else if (Error != 0)
 {
 return ERROR_UNEXSPECTED;
 }

 if (SampleFormat == AV_SAMPLE_FMT_U8)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_S16)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_S32)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_FLT)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_DBL)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_U8P)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_S16P)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_S32P)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_FLTP) //
 {
 if (Channels == 2)
 {
 for (size_t i = 0; i < CurrentFrame->linesize[0]; i += sizeof(float))
 {
 float CurrentLeftSample = 0.0f;
 float CurrentRightSample = 0.0f;
 memcpy(&CurrentLeftSample, &CurrentFrame->data[0][i], sizeof(float));
 memcpy(&CurrentRightSample, &CurrentFrame->data[1][i], sizeof(float));

 short int QuantizedLeftSample = roundf(CurrentLeftSample * 0x7fff);
 short int QuantizedRightSample = roundf(CurrentRightSample * 0x7fff);

 LoadByteData<short int="int">(QuantizedLeftSample, AudioData);
 LoadByteData<short int="int">(QuantizedRightSample, AudioData);
 }
 }
 else
 {

 }
 }
 else if (SampleFormat == AV_SAMPLE_FMT_DBLP)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_S64)
 {

 }
 else if (SampleFormat == AV_SAMPLE_FMT_S64P)
 {

 }
 else
 {
 return ERROR_UNEXSPECTED;
 }
 }
 }

 av_frame_free(&CurrentFrame);
 av_packet_free(&CurrentPacket);
 avcodec_free_context(&CodecContext);
 avformat_free_context(FormatContext);
 return OK;
}
</short></short>


Here is where I am reading from AVFrame's buffer


for (size_t i = 0; i < CurrentFrame->linesize[0]; i += sizeof(float))
 {
 float CurrentLeftSample = 0.0f;
 float CurrentRightSample = 0.0f;
 memcpy(&CurrentLeftSample, &CurrentFrame->data[0][i], sizeof(float));
 memcpy(&CurrentRightSample, &CurrentFrame->data[1][i], sizeof(float));

 short int QuantizedLeftSample = roundf(CurrentLeftSample * 0x7fff);
 short int QuantizedRightSample = roundf(CurrentRightSample * 0x7fff);

 LoadByteData<short int="int">(QuantizedLeftSample, AudioData);
 LoadByteData<short int="int">(QuantizedRightSample, AudioData);
 }
</short></short>


Ive tried using different parameters like
CurrentFrame->nb_samples
,CurrentFrame->buf[0].size
in the for loop with no success it still produces the same results.

Any help would be much appreciated.