
Recherche avancée
Médias (2)
-
GetID3 - Bloc informations de fichiers
9 avril 2013, par
Mis à jour : Mai 2013
Langue : français
Type : Image
-
GetID3 - Boutons supplémentaires
9 avril 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Image
Autres articles (38)
-
Support audio et vidéo HTML5
10 avril 2011MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...) -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...) -
De l’upload à la vidéo finale [version standalone]
31 janvier 2010, parLe chemin d’un document audio ou vidéo dans SPIPMotion est divisé en trois étapes distinctes.
Upload et récupération d’informations de la vidéo source
Dans un premier temps, il est nécessaire de créer un article SPIP et de lui joindre le document vidéo "source".
Au moment où ce document est joint à l’article, deux actions supplémentaires au comportement normal sont exécutées : La récupération des informations techniques des flux audio et video du fichier ; La génération d’une vignette : extraction d’une (...)
Sur d’autres sites (4374)
-
Segmentation fault on debian 9 when decoding audio with ffmpeg and libopus
27 août 2021, par Ramil DautovI wrote the program that takes as an input some .opus file, decodes it using libavcodec and libopus and then plays it using SDL2. Program works on Windows 10 and Ubuntu 18.04, however it crashes with the segmentation fault on Debian 9.


I've tried to update libavcodec and libopus libraries, tried to compile using clang and gcc - nothing helped.


Address sanitizer shows that stack overflow happens :


ASAN:DEADLYSIGNAL
=================================================================
==12167==ERROR: AddressSanitizer: stack-overflow on address 0x2b3e74c81ff8 (pc 0x2b3e7a098803 bp 0x2b3e74c82690 sp 0x2b3e74c81eb0 T2)
 #0 0x2b3e7a098802 in quant_all_bands celt/bands.c:1403
 #1 0x2b3e7a0a2a37 in celt_decode_with_ec celt/celt_decoder.c:1083
 #2 0x2b3e7a0c8afb in opus_decode_frame src/opus_decoder.c:518
 #3 0x2b3e7a0c9e40 in opus_decode_native src/opus_decoder.c:721
 #4 0x2b3e7a0d33f3 in opus_multistream_decode_native src/opus_multistream_decoder.c:253
 #5 0x2b3e7a0d37a8 in opus_multistream_decode src/opus_multistream_decoder.c:398
 #6 0x2b3e760ad83c (/usr/lib/x86_64-linux-gnu/libavcodec.so.57+0x43583c)
 #7 0x2b3e75e4ca27 (/usr/lib/x86_64-linux-gnu/libavcodec.so.57+0x1d4a27)
 #8 0x2b3e75e4f62a in avcodec_send_packet (/usr/lib/x86_64-linux-gnu/libavcodec.so.57+0x1d762a)
 #9 0x2b3e75e4f9e6 (/usr/lib/x86_64-linux-gnu/libavcodec.so.57+0x1d79e6)
 #10 0x55ef09511882 in decode(AVCodecContext*, AVPacket*, unsigned char*, int) /home/ram/my/player3/speaker.cpp:296
 #11 0x55ef09511626 in fillBuffer(AVCodecContext*, unsigned char*, int) /home/ram/my/player3/speaker.cpp:251
 #12 0x55ef09511294 in process(AVCodecContext*, unsigned char*, int) /home/ram/my/player3/speaker.cpp:194
 #13 0x55ef095105b9 in audio_callback(void*, unsigned char*, int) /home/ram/my/player3/speaker.cpp:69
 #14 0x2b3e7815cc31 (/usr/lib/x86_64-linux-gnu/libSDL2-2.0.so.0+0x1fc31)
 #15 0x2b3e781bcf8b (/usr/lib/x86_64-linux-gnu/libSDL2-2.0.so.0+0x7ff8b)
 #16 0x2b3e7820c6c8 (/usr/lib/x86_64-linux-gnu/libSDL2-2.0.so.0+0xcf6c8)
 #17 0x2b3e77be74a3 in start_thread (/lib/x86_64-linux-gnu/libpthread.so.0+0x74a3)
 #18 0x2b3e7940ed0e in __clone (/lib/x86_64-linux-gnu/libc.so.6+0xe8d0e)

SUMMARY: AddressSanitizer: stack-overflow celt/bands.c:1403 in quant_all_bands
Thread T2 (SDLAudioDev2) created by T0 here:
 #0 0x2b3e74d0df59 in __interceptor_pthread_create (/usr/lib/x86_64-linux-gnu/libasan.so.3+0x30f59)
 #1 0x2b3e7820c732 (/usr/lib/x86_64-linux-gnu/libSDL2-2.0.so.0+0xcf732)




I also tried to increase stack size using ulimit -s unlimited and tried to increase stack size for the thread that starts decoding, didn't work.


In main.cpp file I have this :


#include <iostream>
#include <memory>
#include <mutex>
#include "speaker.h"
#include "SDL2/SDL.h"

extern "C"{
#include <libavutil></libavutil>opt.h>
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libswresample></libswresample>swresample.h>
}

static int decode_audio_file(const char* path) {
 
 av_register_all();

 // get format from audio file
 AVFormatContext* format = avformat_alloc_context();
 if (avformat_open_input(&format, path, NULL, NULL) != 0) {
 std::cout << "Could not open file" << std::endl;
 return -1;
 }
 if (avformat_find_stream_info(format, NULL) < 0) {
 std::cout << "Could not retrieve stream info from file" << std::endl;
 return -1;
 }

 // Find the index of the first audio stream
 int stream_index =- 1;
 for (int i=0; i< format->nb_streams; i++) {
 if (format->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
 stream_index = i;
 break;
 }
 }
 if (stream_index == -1) {
 std::cout << "Could not retrieve audio stream from file" << std::endl;
 return -1;
 }
 AVStream* stream = format->streams[stream_index];

 // Initialize speaker
 init_Speaker("OPUS",
 48000,
 2,
 15,
 3,
 av_get_channel_layout("stereo"),
 av_get_sample_fmt("s16"));

 // prepare to read data
 AVPacket* packet;
 packet = av_packet_alloc();
 av_init_packet(packet);

 // iterate through frames
 while (av_read_frame(format, packet) >= 0) {
 play(packet->data, packet->size, std::chrono::microseconds{packet->pts},
 std::chrono::microseconds{packet->dts});
 av_packet_unref(packet);
 }

 // clean up
 avformat_free_context(format);
 close_Speaker();

 // success
 return 0;
}

int main(int argc, char const *argv[]) {
 // check parameters
 if (argc < 2) {
 std::cout << "Please provide the path to an audio file as first command-line argument.\n";
 return -1;
 }

 // Init Audio
 SDL_Init(SDL_INIT_AUDIO);

 // decode data
 if (decode_audio_file(argv[1]) != 0) {
 return -1;
 }

 std::cout << "Finish" << std::endl;
 return 0;
}

</mutex></memory></iostream>


In speaker.cpp :


#include "speaker.h"
#include "pthread.h"
#include "avcodec.h"
#include "common.h"
#include <iostream>

extern "C"
{
#include <libswresample></libswresample>swresample.h>
#include <libavutil></libavutil>hwcontext.h>
}

using std::chrono::microseconds;

SDL_AudioDeviceID m_id;

AVCodecParserContext *parser = nullptr;

//
constexpr static auto buffer_size{1024}; // 2048
constexpr static auto buffer_max_size{AVCODEC_MAX_AUDIO_FRAME_SIZE * 4};
uint32_t m_samplerate;
uint32_t m_queue_limit;
uint32_t m_queue_dropfactor;
int64_t m_channel_layout;
AVSampleFormat m_device_format;
AVCodecID audio_codec_id{AV_CODEC_ID_NONE};
AVCodecContext* adecoder;

player::PacketQueue queue(0, true); 


static uint8_t* buf = nullptr;
uint32_t absize{};
uint32_t abpos{};
int32_t max_decoder_size{};
// need a converter?
uint8_t* convbuf{};
SwrContext* swrctx{};
int32_t sframes{};
AVCodec* codec{};

uint8_t * audio_buffer_init() {
 if(buf == nullptr) {
 buf = (uint8_t*) malloc(buffer_max_size);
 if(buf == nullptr) {
 return nullptr;
 }
 }
 return buf;
}

void audio_callback(void* userdata, uint8_t* stream, int len) {
 AVCodecContext* decoder = (AVCodecContext*)userdata;
 process(decoder, stream, len);
};

void init_Speaker(const std::string& codecName,
 int32_t samplerate,
 uint8_t channels,
 uint32_t queue_limit,
 uint32_t queue_dropfactor,
 int64_t channel_layout,
 AVSampleFormat format)
{
 m_samplerate = samplerate;
 m_queue_limit = queue_limit;
 m_queue_dropfactor = queue_dropfactor;
 m_device_format = format;
 m_channel_layout = channel_layout;


 SDL_SetHint(SDL_HINT_THREAD_STACK_SIZE, "8388608");

 if(codecName.empty())
 throw std::runtime_error("audio decoder: no codec specified.");

 auto names = player::lookup_ffmpeg_decoders(codecName);
 if(names == nullptr)
 throw std::runtime_error("audio decoder: cannot find decoder names for {}"+codecName);

 audio_codec_id = player::lookup_codec_id(codecName);
 codec = player::avcodec_find_decoder(names, AV_CODEC_ID_NONE);
 if(codec == nullptr)
 throw std::runtime_error("audio decoder: cannot find the decoder for {}"+codecName);

 adecoder = avcodec_alloc_context3(codec);
 if(adecoder == nullptr)
 throw std::runtime_error("audio decoder: cannot allocate context");

 adecoder->channels = channels;
 adecoder->sample_rate = samplerate;
 if(adecoder->channels == 1)
 {
 adecoder->channel_layout = AV_CH_LAYOUT_MONO;
 }
 else if(adecoder->channels == 2)
 {
 adecoder->channel_layout = AV_CH_LAYOUT_STEREO;
 }
 else
 throw std::runtime_error("audio decoder: unsupported number of channels ({})"+ adecoder->channels);
 

 if(avcodec_open2(adecoder, codec, nullptr) != 0)
 throw std::runtime_error("audio decoder: cannot open decoder");

 parser = av_parser_init(codec->id);

 SDL_AudioSpec wanted, spec;
 wanted.freq = samplerate;
 wanted.format = AUDIO_S16SYS;
 wanted.channels = channels;
 wanted.silence = 0;
 wanted.samples = buffer_size;
 wanted.userdata = adecoder;
 wanted.callback = audio_callback;
 

 m_id = SDL_OpenAudioDevice(nullptr, 0, &wanted, &spec, 0);
 if(m_id == 0)
 throw std::runtime_error(SDL_GetError());

 SDL_PauseAudioDevice(m_id, 0);
}

void close_Speaker()
{
 SDL_CloseAudioDevice(m_id);
 
 if(adecoder != nullptr)
 player::avcodec_close(adecoder);

}

void play(uint8_t* buffer, size_t bufsize, microseconds pts, microseconds dts)
{
 if(!buffer || !bufsize) {
 return;
 }
 
 AVPacket* avpkt;
 avpkt = av_packet_alloc();
 av_init_packet(avpkt);
 uint8_t bf[bufsize + 64];
 memcpy(bf, buffer, bufsize);

 av_parser_parse2(parser, adecoder, &avpkt->data, &avpkt->size,
 bf, bufsize,
 pts.count(), dts.count(), 0);

 queue.put(av_packet_clone(avpkt));
 queue.drop(m_queue_limit, m_queue_dropfactor); 

}

//

void process(AVCodecContext* decoder, uint8_t* stream, int ssize)
{
 auto filled = fillBuffer(decoder, stream, ssize);

 auto unfilled{(ssize - filled) / 4};
 auto dummy{sframes};

 sframes = unfilled == 0 ? 0 : sframes + unfilled;
 memset(stream + filled, 0, unfilled * 4);
 
 if(sframes != dummy)
 queue.add_silence((int64_t)sframes * 1000000 / m_samplerate);
}

int fillBuffer(AVCodecContext* decoder, uint8_t* stream, int ssize)
{
 int filled{};
 AVPacket avpkt;
 audio_buffer_init();
 while(filled < ssize)
 {
 int dsize{}, delta{};

 // buffer has enough data
 if(absize - abpos >= static_cast<unsigned int="int">(ssize - filled))
 {
 delta = ssize - filled;
 std::copy(buf + abpos, buf + abpos + delta, stream);
 abpos += delta;
 filled += delta;
 return ssize;
 }
 else if(absize - abpos > 0)
 {
 delta = absize - abpos;
 std::copy(buf + abpos, buf + abpos + delta, stream);
 stream += delta;
 filled += delta;
 abpos = absize = 0;
 }
 // move data to head, leave more ab buffers
 if(abpos != 0)
 {
 std::copy(buf + abpos, buf + abpos + absize - abpos, buf);
 absize -= abpos;
 abpos = 0;
 }
 // decode more packets
 if(!queue.get(&avpkt, false))
 break;
 if((dsize = decode(decoder, &avpkt, buf + absize, buffer_max_size - absize)) < 0)
 break;
 absize += dsize;
 }

 return filled;
}

int decode(AVCodecContext* decoder, AVPacket* pkt, uint8_t* dstbuf, int dstlen)
{
 const uint8_t* srcplanes[SWR_CH_MAX];
 uint8_t* dstplanes[SWR_CH_MAX];
 int filled{};

 AVFrame* aframe = av_frame_alloc();

 auto saveptr = pkt->data;

 while(pkt->size > 0)
 {
 int len{}, got_frame{};
 unsigned char* srcbuf{};
 int datalen{};

 if((len = avcodec_decode_audio4(decoder, aframe, &got_frame, pkt)) < 0)
 {
 return -1;
 }
 if(got_frame == 0)
 {
 pkt->size -= len;
 pkt->data += len;
 continue;
 }

 if(aframe->format == m_device_format)
 {
 datalen = av_samples_get_buffer_size(nullptr,
 aframe->channels /*rtspconf->audio_channels*/,
 aframe->nb_samples,
 (AVSampleFormat)aframe->format,
 1 /*no-alignment*/);
 srcbuf = aframe->data[0];
 }
 else
 {
 // need conversion!
 if(swrctx == nullptr)
 {
 if((swrctx = swr_alloc_set_opts(nullptr,
 m_channel_layout,
 m_device_format,
 m_samplerate,
 aframe->channel_layout,
 (AVSampleFormat)aframe->format,
 aframe->sample_rate,
 0,
 nullptr)) == nullptr)
 {
 return -1;
 }
 auto err = swr_init(swrctx);
 if(err < 0)
 {
 char msg[1024];
 av_strerror(err, msg, 1024);
 return -1;
 }
 max_decoder_size = av_samples_get_buffer_size(nullptr,
 2, 
 m_samplerate,
 m_device_format,
 1 /*no-alignment*/);
 if((convbuf = (unsigned char*)::malloc(max_decoder_size)) == nullptr)
 {
 return -1;
 }
 }
 datalen = av_samples_get_buffer_size(nullptr,
 2,
 aframe->nb_samples,
 m_device_format,
 1 /*no-alignment*/);
 if(datalen > max_decoder_size)
 {
 return -1;
 }
 srcplanes[0] = aframe->data[0];
 if(av_sample_fmt_is_planar((AVSampleFormat)aframe->format) != 0)
 {
 // planar
 int i;
 for(i = 1; i < aframe->channels; i++)
 {
 srcplanes[i] = aframe->data[i];
 }
 srcplanes[i] = nullptr;
 }
 else
 {
 srcplanes[1] = nullptr;
 }
 dstplanes[0] = convbuf;
 dstplanes[1] = nullptr;

 swr_convert(swrctx, dstplanes, aframe->nb_samples, srcplanes, aframe->nb_samples);
 srcbuf = convbuf;
 }
 if(datalen > dstlen)
 {
 datalen = dstlen;
 }

 std::copy(srcbuf, srcbuf + datalen, dstbuf);
 dstbuf += datalen;
 dstlen -= datalen;
 filled += datalen;

 pkt->size -= len;
 pkt->data += len;
 av_frame_unref(aframe);
 }
 pkt->data = saveptr;
 if(pkt->data)
 av_packet_unref(pkt);
 if(aframe != nullptr)
 av_frame_free(&aframe);
 
 return filled;
}
</unsigned></iostream>


In packet_queue.cpp :


#include "packet_queue.h"

using player::PacketQueue;
using lock_guard = std::lock_guard;
using unique_lock = std::unique_lock;
using std::chrono::milliseconds;

PacketQueue::PacketQueue(uint32_t playback_queue_silence, bool playback_queue_debug) :
 m_playback_queue_debug(playback_queue_debug), m_playback_queue_silence(playback_queue_silence)
{
}

void PacketQueue::clear()
{
 lock_guard lk{m_mtx};
 for(auto& pkt : queue)
 av_packet_unref(pkt);

 m_size = 0;
 queue.clear();
}

void PacketQueue::add_silence(int64_t silence_pts)
{
 if(m_playback_queue_silence == 0)
 {
 return;
 }

 lock_guard lk{m_mtx};
 silence_pts = filtered_packets > 0 ? silence_pts : last_pts + silence_pts;

 auto tv = std::chrono::microseconds{last_pts};
 auto tv2 = std::chrono::microseconds{silence_pts};
}

bool PacketQueue::put(AVPacket* pkt)
{
 if(pkt == nullptr)
 {
 return false;
 }

 lock_guard lk{m_mtx};
 if((silence_pts - pkt->pts) > (m_playback_queue_silence * 1000))
 {
 auto tv = std::chrono::microseconds{pkt->pts};
 filtered_packets++;
 if(m_playback_queue_debug)
 return true;
 }

 queue.push_back(pkt);
 filtered_packets = 0;
 return true;
}

bool PacketQueue::get(AVPacket* pkt, bool block, milliseconds timeout)
{
 unique_lock lk{m_mtx};

 for(;;)
 {
 if(queue.size() > 0)
 {
 auto ptr = queue.front();
 queue.pop_front();
 m_size -= ptr->size;
 last_pts = ptr->pts;
 av_packet_move_ref(pkt, ptr);
 return true;
 }
 else if(!block)
 {
 return false;
 }
 else if(!m_cv.wait_for(lk, timeout, [&] { return !queue.empty(); }))
 {
 return false;
 }

 }
 return false;
}

bool PacketQueue::drop(size_t limit, size_t dropfactor)
{
 int dropped, count = 0;

 lock_guard lk{m_mtx};

 // queue size exceeded?
 if(queue.size() <= limit)
 {
 return false;
 }

 // start dropping
 dropped = queue.size() / dropfactor;
 // keep at least one
 if(dropped == queue.size())
 dropped--;

 AVPacket* pkt;
 while(dropped-- > 0 && !queue.empty())
 {
 pkt = queue.front();

 if(pkt->flags != AV_PKT_FLAG_KEY)
 {
 queue.pop_front();
 m_size -= pkt->size;
 av_packet_unref(pkt);
 ++count;
 }
 }

 return true; // count;
}

int PacketQueue::drop2(size_t limit, bool error)
{
 int count = 0;

 // dropping enabled?
 if(limit <= 0 && !error)
 return 0;

 lock_guard lk{m_mtx};
 // queue size exceeded?
 if(queue.size() <= limit && !error)
 return false;

 for(auto i = queue.begin(); i != queue.end();)
 {
 AVPacket* pkt = *i;
 if(pkt->flags != AV_PKT_FLAG_KEY)
 {
 m_size -= pkt->size;
 av_packet_unref(pkt);
 i = queue.erase(i);
 count++;
 }
 else
 ++i;
 }

 return count;
}



1403 line of celt/bands.c :
screenshot


Versions of libraries that I tried on Debian 9 :
libavcodec.so.57.64.101 and libopus.so.0.5.3


also I built manually libavcodec.so.57.107.100 and libopus.so.0.8.0 and tried to use them - the same error appears.


As I already mentioned, everything works fine on Windows 10 and Ubuntu 18.04. So I have no clue what could be the reason of the issue. Any help is appreciated.


-
Use Named Pipe (C++) to send images to FFMPEG
28 août 2015, par user1829136I have the following code in C++ :
#include <iostream>
#include
#include <iostream> // std::cout
#include <fstream> // std::ifstream
#include <vector>
#include
using namespace std;
int main(int argc, const char **argv)
{
wcout << "Creating an instance of a named pipe..." << endl;
// Create a pipe to send data
HANDLE pipe = CreateNamedPipe(
L"\\\\.\\pipe\\my_pipe", // name of the pipe
PIPE_ACCESS_OUTBOUND, // 1-way pipe -- send only
PIPE_TYPE_BYTE, // send data as a byte stream
1, // only allow 1 instance of this pipe
0, // no outbound buffer
0, // no inbound buffer
0, // use default wait time
NULL // use default security attributes
);
if (pipe == NULL || pipe == INVALID_HANDLE_VALUE) {
wcout << "Failed to create outbound pipe instance.";
// look up error code here using GetLastError()
system("pause");
return 1;
}
wcout << "Waiting for a client to connect to the pipe..." << endl;
// This call blocks until a client process connects to the pipe
BOOL result = ConnectNamedPipe(pipe, NULL);
if (!result) {
wcout << "Failed to make connection on named pipe." << endl;
// look up error code here using GetLastError()
CloseHandle(pipe); // close the pipe
system("pause");
return 1;
}
wcout << "Sending data to pipe..." << endl;
//opening file
ifstream infile;
infile.open("E:/xmen.jpg",std::ios::binary);
ofstream out("E:/lelel.jpg",std::ios::binary);
infile.seekg(0,std::ios::end);
size_t file_size_in_byte = infile.tellg();
vector<char> file_vec;
file_vec.resize(file_size_in_byte);
infile.seekg(0,std::ios::beg);
infile.read(&file_vec[0],file_size_in_byte);
out.write(&file_vec[0],file_vec.size());
wcout</ This call blocks until a client process reads all the data
DWORD numBytesWritten = 0;
result = WriteFile(
pipe, // handle to our outbound pipe
&file_vec[0], // data to send
61026, // length of data to send (bytes)
&numBytesWritten, // will store actual amount of data sent
NULL // not using overlapped IO
);
if (result) {
wcout << "Number of bytes sent: " << numBytesWritten << endl;
} else {
wcout << "Failed to send data." << endl;
// look up error code here using GetLastError()
}
// Close the pipe (automatically disconnects client too)
CloseHandle(pipe);
wcout << "Done." << endl;
system("pause");
return 0;
}
</char></vector></fstream></iostream></iostream>Which I use to create a named pipe \.\pipe\my_pipe, to which FFMPEG connects to, using the following command :
64-static\bin\Video>ffmpeg.exe -loop 1 -s 4cif -f image2 -y -i \\.\pipe\\my_pipe
-r 25 -vframes 250 -vcodec rawvideo -an eaeew.mov
Output :
ffmpeg version N-54233-g86190af Copyright (c) 2000-2013 the FFmpeg developers
built on Jun 27 2013 16:49:12 with gcc 4.7.3 (GCC)
configuration: --enable-gpl --enable-version3 --disable-w32threads --enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libcaca --enable-libfreetype --enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libschroedinger --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvo-aacenc --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libxavs --enable-libxvid --enable-zlib libavutil 52. 37.101 / 52. 37.101
libavcodec 55. 17.100 / 55. 17.100
libavformat 55. 10.100 / 55. 10.100
libavdevice 55. 2.100 / 55. 2.100
libavfilter 3. 77.101 / 3. 77.101
libswscale 2. 3.100 / 2. 3.100
libswresample 0. 17.102 / 0. 17.102
libpostproc 52. 3.100 / 52. 3.100
[image2 @ 0000000003ee04a0] Could find no file with with path '\\.\pipe\\my_pipe
' and index in the range 0-4
\\.\pipe\\my_pipe: No such file or directoryI can see on my console that my C++ app received a connection, but I get the error above in FFMPEG. Can someone please advise ?
EDIT 1
Using the command belowffmpeg.exe -s 4cif -i \\.\pipe\my_pipe -r 25 -vframes 250 -vcodec rawvideo -an tess.mov
I get the following output
ffmpeg version N-54233-g86190af Copyright (c) 2000-2013 the FFmpeg developers
built on Jun 27 2013 16:49:12 with gcc 4.7.3 (GCC)
configuration: --enable-gpl --enable-version3 --disable-w32threads --enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libcaca --enable-libfreetype --enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libschroedinger --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvo-aacenc --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libxavs --enable-libxvid --enable-zlib
libavutil 52. 37.101 / 52. 37.101
libavcodec 55. 17.100 / 55. 17.100
libavformat 55. 10.100 / 55. 10.100
libavdevice 55. 2.100 / 55. 2.100
libavfilter 3. 77.101 / 3. 77.101
libswscale 2. 3.100 / 2. 3.100
libswresample 0. 17.102 / 0. 17.102
libpostproc 52. 3.100 / 52. 3.100
\\.\pipe\my_pipe: Invalid data found when processing inputSo, now it seems it was able to connect to the pipe but is not able to process the input.
-
Uploading video to Twitter sometimes doesn't work
22 juillet 2021, par K-s S-kI have a very difficult situation. I've already spent 2 days and couldn't find a solution. Project on Laravel. I want to upload videos to Twitter using the Twitter API endpoints. But sometimes I am getting this error :




file is currently unsupported




I did everything as recommended in the official documentation Video specifications and recommendations. I get an error when I set an audio codec is aac in my video file, despite the fact that it is recommended in the official documentation, but when I set the audio codec to mp3, the video is uploaded, but the sound quality is very poor, and sometimes there is no sound at all. Please forgive me if this is awkward to read, but I want to provide all of my code. Because I don't know how to solve this anymore and I think it might help.


<?php

namespace App\Jobs;

use App\Models\PublishedContent;
use Atymic\Twitter\Facades\Twitter;
use GuzzleHttp\Client as GuzzleClient;
use GuzzleHttp\Exception\GuzzleException;
use Illuminate\Bus\Queueable;
use Illuminate\Support\Facades\Log;
use Illuminate\Support\Facades\File;
use Illuminate\Queue\SerializesModels;
use Illuminate\Queue\InteractsWithQueue;
use Illuminate\Contracts\Queue\ShouldQueue;
use Illuminate\Foundation\Bus\Dispatchable;
use Illuminate\Support\Str;


class PublishToTwitter implements ShouldQueue
{
 use Dispatchable, InteractsWithQueue, Queueable, SerializesModels;

 /**
 * @var
 */
 protected $publishingData;

 /**
 * Create a new job instance.
 *
 * @param $publishingData
 */
 public function __construct($publishingData)
 {
 $this->publishingData = $publishingData;
 }

 /**
 * Execute the job.
 *
 * @return void
 */
 public function handle()
 {
 $publishingData = $this->publishingData;

 if (is_array($publishingData)) {
 $publishingResult = $this->publishing(...array_values($publishingData));
 sendNotification($publishingResult['message'], $publishingResult['status'], 'Twitter', $publishingResult['link'], $publishingData['post_name'], $publishingData['user']);
 } else {
 $scheduledData = processingScheduledPost($publishingData);
 $postName = $scheduledData['scheduleData']['post_name'];
 $postContent = $scheduledData['scheduleData']['post_content'];
 $userToken = json_decode($publishingData->user_token,true);
 $requestToken = [
 'token' => $userToken['oauth_token'],
 'secret' => $userToken['oauth_token_secret'],
 ];
 $publishingResult = $this->publishing($scheduledData['file'], $postName, $postContent, $requestToken);
 $publishingResult['status'] && PublishedContent::add($scheduledData['craft'], $scheduledData['file'], "twitter_share");
 sendResultToUser($publishingData, $scheduledData['user'], $publishingResult['message'], $postName, $publishingResult['link'], $publishingResult['publishing_status'], $scheduledData['social_media']);
 sendNotification($publishingResult['message'], $publishingResult['status'], 'Twitter', $publishingResult['link'], $postName, $scheduledData['user']);
 }
 }

 /**
 * @param $file
 * @param $postName
 * @param $postContent
 * @param $requestToken
 * @return array
 */
 private function publishing($file, $postName, $postContent, $requestToken): array
 {
 $result = [
 'status' => false,
 'link' => null,
 'message' => 'Your content can\'t successfully published on Twitter. This file is not supported for publishing.',
 'publishing_status' => 'error'
 ];

 if ((($file->refe_type !== 'text') || $file->refe_file_path) && !checkIfFileExist($file->refe_file_path)) {
 $result['message'] = 'Missing or invalid file.';
 return $result;
 }

 $filePath = $file->refe_file_path;
 $fileSize = $file->content_length;
 $tempFileName = 'temp-' . $file->refe_file_name;
 $ext = $file->file_type;
 $mediaCategory = 'tweet_' . $file->refe_type;
 $mediaType = $file->refe_type . '/' . $ext;
 $remoteFile = file_get_contents($filePath);
 $tempFolder = public_path('/storage/uploads/temp');

 if (!file_exists($tempFolder)) {
 mkdir($tempFolder, 0777, true);
 }

 $tempFile = public_path('/storage/uploads/temp/' . $tempFileName);
 File::put($tempFile, $remoteFile);
 $convertedFileName = 'converted-' . $file->refe_file_name;
 $convertedFile = public_path('/storage/uploads/temp/' . $convertedFileName);
 $command = 'ffmpeg -y -i '.$tempFile.' -b:v 5000k -b:a 380k -c:a aac -profile:a aac_low -threads 1 '.$convertedFile.'';
 exec($command);
 @File::delete($tempFile);

 try {
 $twitter = Twitter::usingCredentials($requestToken['token'], $requestToken['secret']);
 if ($file->refe_type === 'text') {
 $twitter->postTweet([
 'status' => urldecode($postContent),
 'format' => 'json',
 ]);

 $result['link'] = 'https://twitter.com/home';
 $result['status'] = true;
 $result['message'] = 'Your content successfully published on Twitter. You can visit to Twitter and check it.';
 $result['publishing_status'] = 'done';
 } else if ($file->refe_type === 'video' || $file->refe_type === 'image') {
 if ($file->refe_type === 'video') {
 $duration = getVideoDuration($file->refe_file_path);

 if ($duration > config('constant.sharing_configs.max_video_duration.twitter')) {
 throw new \Exception('The duration of the video file must not exceed 140 seconds.');
 }
 }

 $isFileTypeSupported = checkPublishedFileType('twitter', $file->refe_type, strtolower($ext));
 $isFileSizeSupported = checkPublishedFileSize('twitter', $file->refe_type, $fileSize, strtolower($ext));

 if (!$isFileTypeSupported) {
 throw new \Exception('Your content can\'t successfully published on Twitter. This file type is not supported for publishing.');
 }

 if (!$isFileSizeSupported) {
 throw new \Exception('Your content can\'t successfully published on Twitter. The file size is exceeded.');
 }

 if ($file->refe_type === 'video') $fileSize = filesize($convertedFile);

 if (strtolower($ext) === 'gif') {
 $initMedia = $twitter->uploadMedia([
 'command' => 'INIT',
 'total_bytes' => (int)$fileSize
 ]);
 } else {
 $initMedia = $twitter->uploadMedia([
 'command' => 'INIT',
 'media_type' => $mediaType,
 'media_category' => $mediaCategory,
 'total_bytes' => (int)$fileSize
 ]);
 }

 $mediaId = (int)$initMedia->media_id_string;

 $fp = fopen($convertedFile, 'r');
 $segmentId = 0;

 while (!feof($fp)) {
 $chunk = fread($fp, 1048576);

 $twitter->uploadMedia([
 'media_data' => base64_encode($chunk),
 'command' => 'APPEND',
 'segment_index' => $segmentId,
 'media_id' => $mediaId
 ]);

 $segmentId++;
 }

 fclose($fp);

 $twitter->uploadMedia([
 'command' => 'FINALIZE',
 'media_id' => $mediaId
 ]);

 if ($file->refe_type === 'video') {
 $waits = 0;

 while ($waits <= 4) {
 // Authorizing header for Twitter API
 $oauth = [
 'command' => 'STATUS',
 'media_id' => $mediaId,
 'oauth_consumer_key' => config('twitter.consumer_key'),
 'oauth_nonce' => Str::random(42),
 'oauth_signature_method' => 'HMAC-SHA1',
 'oauth_timestamp' => time(),
 'oauth_token' => $requestToken['token'],
 'oauth_version' => '1.0'
 ];

 // Generate an OAuth 1.0a HMAC-SHA1 signature for an HTTP request
 $baseInfo = $this->buildBaseString('https://upload.twitter.com/1.1/media/upload.json', 'GET', $oauth);
 // Getting a signing key
 $compositeKey = rawurlencode(config('twitter.consumer_secret')) . '&' . rawurlencode($requestToken['secret']);
 // Calculating the signature
 $oauthSignature = base64_encode(hash_hmac('sha1', $baseInfo, $compositeKey, true));
 $oauth['oauth_signature'] = $oauthSignature;
 $headers['Authorization'] = $this->buildAuthorizationHeader($oauth);

 try {
 $guzzle = new GuzzleClient([
 'headers' => $headers
 ]);
 $response = $guzzle->request( 'GET', 'https://upload.twitter.com/1.1/media/upload.json?command=STATUS&media_id=' . $mediaId);
 $uploadStatus = json_decode($response->getBody()->getContents());
 } catch (\Exception | GuzzleException $e) {
 dd($e->getMessage(), $e->getLine(), $e->getFile());
 }

 if (isset($uploadStatus->processing_info->state)) {
 switch ($uploadStatus->processing_info->state) {
 case 'succeeded':
 $waits = 5; // break out of the while loop
 break;
 case 'failed':
 File::delete($tempFile);
 Log::error('File processing failed: ' . $uploadStatus->processing_info->error->message);
 throw new \Exception('File processing failed: ' . $uploadStatus->processing_info->error->message);
 default:
 sleep($uploadStatus->processing_info->check_after_secs);
 $waits++;
 }
 } else {
 throw new \Exception('There was an unknown error uploading your file');
 }
 }
 }

 $twitter->postTweet(['status' => urldecode($postContent), 'media_ids' => $initMedia->media_id_string]);
 @File::delete($convertedFile);
 $result['link'] = 'https://twitter.com/home';
 $result['status'] = true;
 $result['message'] = 'Your content successfully published on Twitter. You can visit to Twitter and check it.';
 $result['publishing_status'] = 'done';
 }
 } catch (\Exception $e) {
 dd($e->getMessage());
 $result['message'] = $e->getMessage();
 return $result;
 }

 return $result;
 }

 /**
 * @param $baseURI
 * @param $method
 * @param $params
 * @return string
 *
 * Creating the signature base string
 */
 protected function buildBaseString($baseURI, $method, $params): string
 {
 $r = array();
 ksort($params);
 foreach($params as $key=>$value){
 $r[] = "$key=" . rawurlencode($value);
 }
 return $method . "&" . rawurlencode($baseURI) . '&' . rawurlencode(implode('&', $r));
 }

 /**
 * @param $oauth
 * @return string
 *
 * Collecting parameters
 */
 protected function buildAuthorizationHeader($oauth): string
 {
 $r = 'OAuth ';
 $values = array();
 foreach($oauth as $key=>$value)
 $values[] = "$key=\"" . rawurlencode($value) . "\"";
 $r .= implode(', ', $values);
 return $r;
 }
}




I would be very grateful if someone would help me.