
Recherche avancée
Autres articles (79)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Les formats acceptés
28 janvier 2010, parLes commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
ffmpeg -codecs ffmpeg -formats
Les format videos acceptés en entrée
Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
Les formats vidéos de sortie possibles
Dans un premier temps on (...)
Sur d’autres sites (8770)
-
audio do not stop recording after pause ffmpeg c++
15 septembre 2021, par C1ngh10I am developing an application that record the screen and the audio from microphone. I implemented the pause function stopping video and audio thread on a condition variable, resuming them with a notify on the same condition variable. This is done in
captureAudio()
, in the mainwhile
. In this way works on macOS and linux, where I use avfoudation and alsa respectively, but on windows, with dshow, keep recording audio during the pause, when the thread is waiting on the condition variable. Does anybody know how can I fix this behaviour ?

#include "ScreenRecorder.h"

using namespace std;

ScreenRecorder::ScreenRecorder() : pauseCapture(false), stopCapture(false), started(false), activeMenu(true) {
 avcodec_register_all();
 avdevice_register_all();

 width = 1920;
 height = 1200;
}

ScreenRecorder::~ScreenRecorder() {

 if (started) {
 value = av_write_trailer(outAVFormatContext);
 if (value < 0) {
 cerr << "Error in writing av trailer" << endl;
 exit(-1);
 }

 avformat_close_input(&inAudioFormatContext);
 if(inAudioFormatContext == nullptr){
 cout << "inAudioFormatContext close successfully" << endl;
 }
 else{
 cerr << "Error: unable to close the inAudioFormatContext" << endl;
 exit(-1);
 //throw "Error: unable to close the file";
 }
 avformat_free_context(inAudioFormatContext);
 if(inAudioFormatContext == nullptr){
 cout << "AudioFormat freed successfully" << endl;
 }
 else{
 cerr << "Error: unable to free AudioFormatContext" << endl;
 exit(-1);
 }
 
 avformat_close_input(&pAVFormatContext);
 if (pAVFormatContext == nullptr) {
 cout << "File close successfully" << endl;
 }
 else {
 cerr << "Error: unable to close the file" << endl;
 exit(-1);
 //throw "Error: unable to close the file";
 }

 avformat_free_context(pAVFormatContext);
 if (pAVFormatContext == nullptr) {
 cout << "VideoFormat freed successfully" << endl;
 }
 else {
 cerr << "Error: unable to free VideoFormatContext" << endl;
 exit(-1);
 }
 }
}

/*==================================== VIDEO ==============================*/

int ScreenRecorder::openVideoDevice() throw() {
 value = 0;
 options = nullptr;
 pAVFormatContext = nullptr;

 pAVFormatContext = avformat_alloc_context();

 string dimension = to_string(width) + "x" + to_string(height);
 av_dict_set(&options, "video_size", dimension.c_str(), 0); //option to set the dimension of the screen section to record

#ifdef _WIN32
 pAVInputFormat = av_find_input_format("gdigrab");
 if (avformat_open_input(&pAVFormatContext, "desktop", pAVInputFormat, &options) != 0) {
 cerr << "Couldn't open input stream" << endl;
 exit(-1);
 }

#elif defined linux
 
 int offset_x = 0, offset_y = 0;
 string url = ":0.0+" + to_string(offset_x) + "," + to_string(offset_y); //custom string to set the start point of the screen section
 pAVInputFormat = av_find_input_format("x11grab");
 value = avformat_open_input(&pAVFormatContext, url.c_str(), pAVInputFormat, &options);

 if (value != 0) {
 cerr << "Error in opening input device (video)" << endl;
 exit(-1);
 }
#else

 value = av_dict_set(&options, "pixel_format", "0rgb", 0);
 if (value < 0) {
 cerr << "Error in setting pixel format" << endl;
 exit(-1);
 }

 value = av_dict_set(&options, "video_device_index", "1", 0);

 if (value < 0) {
 cerr << "Error in setting video device index" << endl;
 exit(-1);
 }

 pAVInputFormat = av_find_input_format("avfoundation");

 if (avformat_open_input(&pAVFormatContext, "Capture screen 0:none", pAVInputFormat, &options) != 0) { //TODO trovare un modo per selezionare sempre lo schermo (forse "Capture screen 0")
 cerr << "Error in opening input device" << endl;
 exit(-1);
 }



#endif
 //set frame per second

 value = av_dict_set(&options, "framerate", "30", 0);
 if (value < 0) {
 cerr << "Error in setting dictionary value (setting framerate)" << endl;
 exit(-1);
 }

 value = av_dict_set(&options, "preset", "medium", 0);
 if (value < 0) {
 cerr << "Error in setting dictionary value (setting preset value)" << endl;
 exit(-1);
 }
 /*
 value = av_dict_set(&options, "vsync", "1", 0);
 if(value < 0){
 cerr << "Error in setting dictionary value (setting vsync value)" << endl;
 exit(-1);
 }
 */

 value = av_dict_set(&options, "probesize", "60M", 0);
 if (value < 0) {
 cerr << "Error in setting probesize value" << endl;
 exit(-1);
 }

 //get video stream infos from context
 value = avformat_find_stream_info(pAVFormatContext, nullptr);
 if (value < 0) {
 cerr << "Error in retrieving the stream info" << endl;
 exit(-1);
 }

 VideoStreamIndx = -1;
 for (int i = 0; i < pAVFormatContext->nb_streams; i++) {
 if (pAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
 VideoStreamIndx = i;
 break;
 }
 }
 if (VideoStreamIndx == -1) {
 cerr << "Error: unable to find video stream index" << endl;
 exit(-2);
 }

 pAVCodecContext = pAVFormatContext->streams[VideoStreamIndx]->codec;
 pAVCodec = avcodec_find_decoder(pAVCodecContext->codec_id/*params->codec_id*/);
 if (pAVCodec == nullptr) {
 cerr << "Error: unable to find decoder video" << endl;
 exit(-1);
 }

 cout << "Insert height and width [h w]: "; //custom screen dimension to record
 cin >> h >> w;*/


 return 0;
}

/*========================================== AUDIO ============================*/

int ScreenRecorder::openAudioDevice() {
 audioOptions = nullptr;
 inAudioFormatContext = nullptr;

 inAudioFormatContext = avformat_alloc_context();
 value = av_dict_set(&audioOptions, "sample_rate", "44100", 0);
 if (value < 0) {
 cerr << "Error: cannot set audio sample rate" << endl;
 exit(-1);
 }
 value = av_dict_set(&audioOptions, "async", "1", 0);
 if (value < 0) {
 cerr << "Error: cannot set audio sample rate" << endl;
 exit(-1);
 }

#if defined linux
 audioInputFormat = av_find_input_format("alsa");
 value = avformat_open_input(&inAudioFormatContext, "hw:0", audioInputFormat, &audioOptions);
 if (value != 0) {
 cerr << "Error in opening input device (audio)" << endl;
 exit(-1);
 }
#endif

#if defined _WIN32
 audioInputFormat = av_find_input_format("dshow");
 value = avformat_open_input(&inAudioFormatContext, "audio=Microfono (Realtek(R) Audio)", audioInputFormat, &audioOptions);
 if (value != 0) {
 cerr << "Error in opening input device (audio)" << endl;
 exit(-1);
 }
#endif

 value = avformat_find_stream_info(inAudioFormatContext, nullptr);
 if (value != 0) {
 cerr << "Error: cannot find the audio stream information" << endl;
 exit(-1);
 }

 audioStreamIndx = -1;
 for (int i = 0; i < inAudioFormatContext->nb_streams; i++) {
 if (inAudioFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
 audioStreamIndx = i;
 break;
 }
 }
 if (audioStreamIndx == -1) {
 cerr << "Error: unable to find audio stream index" << endl;
 exit(-2);
 }
}

int ScreenRecorder::initOutputFile() {
 value = 0;

 outAVFormatContext = nullptr;
 outputAVFormat = av_guess_format(nullptr, "output.mp4", nullptr);
 if (outputAVFormat == nullptr) {
 cerr << "Error in guessing the video format, try with correct format" << endl;
 exit(-5);
 }
 avformat_alloc_output_context2(&outAVFormatContext, outputAVFormat, outputAVFormat->name, "..\\media\\output.mp4");
 if (outAVFormatContext == nullptr) {
 cerr << "Error in allocating outAVFormatContext" << endl;
 exit(-4);
 }

 /*===========================================================================*/
 this->generateVideoStream();
 this->generateAudioStream();

 //create an empty video file
 if (!(outAVFormatContext->flags & AVFMT_NOFILE)) {
 if (avio_open2(&outAVFormatContext->pb, "..\\media\\output.mp4", AVIO_FLAG_WRITE, nullptr, nullptr) < 0) {
 cerr << "Error in creating the video file" << endl;
 exit(-10);
 }
 }

 if (outAVFormatContext->nb_streams == 0) {
 cerr << "Output file does not contain any stream" << endl;
 exit(-11);
 }
 value = avformat_write_header(outAVFormatContext, &options);
 if (value < 0) {
 cerr << "Error in writing the header context" << endl;
 exit(-12);
 }
 return 0;
}

/*=================================== VIDEO ==================================*/

void ScreenRecorder::generateVideoStream() {
 //Generate video stream
 videoSt = avformat_new_stream(outAVFormatContext, nullptr);
 if (videoSt == nullptr) {
 cerr << "Error in creating AVFormatStream" << endl;
 exit(-6);
 }

 outVideoCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4); //AV_CODEC_ID_MPEG4
 if (outVideoCodec == nullptr) {
 cerr << "Error in finding the AVCodec, try again with the correct codec" << endl;
 exit(-8);
 }
avcodec_alloc_context3(outAVCodec)
 outVideoCodecContext = avcodec_alloc_context3(outVideoCodec);
 if (outVideoCodecContext == nullptr) {
 cerr << "Error in allocating the codec context" << endl;
 exit(-7);
 }

 //set properties of the video file (stream)
 outVideoCodecContext = videoSt->codec;
 outVideoCodecContext->codec_id = AV_CODEC_ID_MPEG4;
 outVideoCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
 outVideoCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
 outVideoCodecContext->bit_rate = 10000000;
 outVideoCodecContext->width = width;
 outVideoCodecContext->height = height;
 outVideoCodecContext->gop_size = 10;
 outVideoCodecContext->global_quality = 500;
 outVideoCodecContext->max_b_frames = 2;
 outVideoCodecContext->time_base.num = 1;
 outVideoCodecContext->time_base.den = 30;
 outVideoCodecContext->bit_rate_tolerance = 400000;

 if (outVideoCodecContext->codec_id == AV_CODEC_ID_H264) {
 av_opt_set(outVideoCodecContext->priv_data, "preset", "slow", 0);
 }

 if (outAVFormatContext->oformat->flags & AVFMT_GLOBALHEADER) {
 outVideoCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
 }

 value = avcodec_open2(outVideoCodecContext, outVideoCodec, nullptr);
 if (value < 0) {
 cerr << "Error in opening the AVCodec" << endl;
 exit(-9);
 }

 outVideoStreamIndex = -1;
 for (int i = 0; i < outAVFormatContext->nb_streams; i++) {
 if (outAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN) {
 outVideoStreamIndex = i;
 }
 }
 if (outVideoStreamIndex < 0) {
 cerr << "Error: cannot find a free stream index for video output" << endl;
 exit(-1);
 }
 avcodec_parameters_from_context(outAVFormatContext->streams[outVideoStreamIndex]->codecpar, outVideoCodecContext);
}

/*=============================== AUDIO ==================================*/

void ScreenRecorder::generateAudioStream() {
 AVCodecParameters* params = inAudioFormatContext->streams[audioStreamIndx]->codecpar;
 inAudioCodec = avcodec_find_decoder(params->codec_id);
 if (inAudioCodec == nullptr) {
 cerr << "Error: cannot find the audio decoder" << endl;
 exit(-1);
 }

 inAudioCodecContext = avcodec_alloc_context3(inAudioCodec);
 if (avcodec_parameters_to_context(inAudioCodecContext, params) < 0) {
 cout << "Cannot create codec context for audio input" << endl;
 }

 value = avcodec_open2(inAudioCodecContext, inAudioCodec, nullptr);
 if (value < 0) {
 cerr << "Error: cannot open the input audio codec" << endl;
 exit(-1);
 }

 //Generate audio stream
 outAudioCodecContext = nullptr;
 outAudioCodec = nullptr;
 int i;

 AVStream* audio_st = avformat_new_stream(outAVFormatContext, nullptr);
 if (audio_st == nullptr) {
 cerr << "Error: cannot create audio stream" << endl;
 exit(1);
 }

 outAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
 if (outAudioCodec == nullptr) {
 cerr << "Error: cannot find requested encoder" << endl;
 exit(1);
 }

 outAudioCodecContext = avcodec_alloc_context3(outAudioCodec);
 if (outAudioCodecContext == nullptr) {
 cerr << "Error: cannot create related VideoCodecContext" << endl;
 exit(1);
 }

 if ((outAudioCodec)->supported_samplerates) {
 outAudioCodecContext->sample_rate = (outAudioCodec)->supported_samplerates[0];
 for (i = 0; (outAudioCodec)->supported_samplerates[i]; i++) {
 if ((outAudioCodec)->supported_samplerates[i] == inAudioCodecContext->sample_rate)
 outAudioCodecContext->sample_rate = inAudioCodecContext->sample_rate;
 }
 }
 outAudioCodecContext->codec_id = AV_CODEC_ID_AAC;
 outAudioCodecContext->sample_fmt = (outAudioCodec)->sample_fmts ? (outAudioCodec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
 outAudioCodecContext->channels = inAudioCodecContext->channels;
 outAudioCodecContext->channel_layout = av_get_default_channel_layout(outAudioCodecContext->channels);
 outAudioCodecContext->bit_rate = 96000;
 outAudioCodecContext->time_base = { 1, inAudioCodecContext->sample_rate };

 outAudioCodecContext->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

 if ((outAVFormatContext)->oformat->flags & AVFMT_GLOBALHEADER) {
 outAudioCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
 }

 if (avcodec_open2(outAudioCodecContext, outAudioCodec, nullptr) < 0) {
 cerr << "error in opening the avcodec" << endl;
 exit(1);
 }

 //find a free stream index
 outAudioStreamIndex = -1;
 for (i = 0; i < outAVFormatContext->nb_streams; i++) {
 if (outAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN) {
 outAudioStreamIndex = i;
 }
 }
 if (outAudioStreamIndex < 0) {
 cerr << "Error: cannot find a free stream for audio on the output" << endl;
 exit(1);
 }

 avcodec_parameters_from_context(outAVFormatContext->streams[outAudioStreamIndex]->codecpar, outAudioCodecContext);
}

int ScreenRecorder::init_fifo()
{
 /* Create the FIFO buffer based on the specified output sample format. */
 if (!(fifo = av_audio_fifo_alloc(outAudioCodecContext->sample_fmt,
 outAudioCodecContext->channels, 1))) {
 fprintf(stderr, "Could not allocate FIFO\n");
 return AVERROR(ENOMEM);
 }
 return 0;
}

int ScreenRecorder::add_samples_to_fifo(uint8_t** converted_input_samples, const int frame_size) {
 int error;
 /* Make the FIFO as large as it needs to be to hold both,
 * the old and the new samples. */
 if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
 fprintf(stderr, "Could not reallocate FIFO\n");
 return error;
 }
 /* Store the new samples in the FIFO buffer. */
 if (av_audio_fifo_write(fifo, (void**)converted_input_samples, frame_size) < frame_size) {
 fprintf(stderr, "Could not write data to FIFO\n");
 return AVERROR_EXIT;
 }
 return 0;
}

int ScreenRecorder::initConvertedSamples(uint8_t*** converted_input_samples,
 AVCodecContext* output_codec_context,
 int frame_size) {
 int error;
 /* Allocate as many pointers as there are audio channels.
 * Each pointer will later point to the audio samples of the corresponding
 * channels (although it may be NULL for interleaved formats).
 */
 if (!(*converted_input_samples = (uint8_t**)calloc(output_codec_context->channels,
 sizeof(**converted_input_samples)))) {
 fprintf(stderr, "Could not allocate converted input sample pointers\n");
 return AVERROR(ENOMEM);
 }
 /* Allocate memory for the samples of all channels in one consecutive
 * block for convenience. */
 if (av_samples_alloc(*converted_input_samples, nullptr,
 output_codec_context->channels,
 frame_size,
 output_codec_context->sample_fmt, 0) < 0) {

 exit(1);
 }
 return 0;
}

static int64_t pts = 0;
void ScreenRecorder::captureAudio() {
 int ret;
 AVPacket* inPacket, * outPacket;
 AVFrame* rawFrame, * scaledFrame;
 uint8_t** resampledData;

 init_fifo();

 //allocate space for a packet
 inPacket = (AVPacket*)av_malloc(sizeof(AVPacket));
 if (!inPacket) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }
 av_init_packet(inPacket);

 //allocate space for a packet
 rawFrame = av_frame_alloc();
 if (!rawFrame) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }

 scaledFrame = av_frame_alloc();
 if (!scaledFrame) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }

 outPacket = (AVPacket*)av_malloc(sizeof(AVPacket));
 if (!outPacket) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }

 //init the resampler
 SwrContext* resampleContext = nullptr;
 resampleContext = swr_alloc_set_opts(resampleContext,
 av_get_default_channel_layout(outAudioCodecContext->channels),
 outAudioCodecContext->sample_fmt,
 outAudioCodecContext->sample_rate,
 av_get_default_channel_layout(inAudioCodecContext->channels),
 inAudioCodecContext->sample_fmt,
 inAudioCodecContext->sample_rate,
 0,
 nullptr);
 if (!resampleContext) {
 cerr << "Cannot allocate the resample context" << endl;
 exit(1);
 }
 if ((swr_init(resampleContext)) < 0) {
 fprintf(stderr, "Could not open resample context\n");
 swr_free(&resampleContext);
 exit(1);
 }

 while (true) {
 if (pauseCapture) {
 cout << "Pause audio" << endl;
 }
 cv.wait(ul, [this]() { return !pauseCapture; });

 if (stopCapture) {
 break;
 }

 ul.unlock();

 if (av_read_frame(inAudioFormatContext, inPacket) >= 0 && inPacket->stream_index == audioStreamIndx) {
 //decode audio routing
 av_packet_rescale_ts(outPacket, inAudioFormatContext->streams[audioStreamIndx]->time_base, inAudioCodecContext->time_base);
 if ((ret = avcodec_send_packet(inAudioCodecContext, inPacket)) < 0) {
 cout << "Cannot decode current audio packet " << ret << endl;
 continue;
 }
 
 while (ret >= 0) {
 ret = avcodec_receive_frame(inAudioCodecContext, rawFrame);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 break;
 else if (ret < 0) {
 cerr << "Error during decoding" << endl;
 exit(1);
 }
 if (outAVFormatContext->streams[outAudioStreamIndex]->start_time <= 0) {
 outAVFormatContext->streams[outAudioStreamIndex]->start_time = rawFrame->pts;
 }
 initConvertedSamples(&resampledData, outAudioCodecContext, rawFrame->nb_samples);

 swr_convert(resampleContext,
 resampledData, rawFrame->nb_samples,
 (const uint8_t**)rawFrame->extended_data, rawFrame->nb_samp

 add_samples_to_fifo(resampledData, rawFrame->nb_samples);

 //raw frame ready
 av_init_packet(outPacket);
 outPacket->data = nullptr;
 outPacket->size = 0;

 const int frame_size = FFMAX(av_audio_fifo_size(fifo), outAudioCodecContext->frame_size);

 scaledFrame = av_frame_alloc();
 if (!scaledFrame) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }

 scaledFrame->nb_samples = outAudioCodecContext->frame_size;
 scaledFrame->channel_layout = outAudioCodecContext->channel_layout;
 scaledFrame->format = outAudioCodecContext->sample_fmt;
 scaledFrame->sample_rate = outAudioCodecContext->sample_rate;
 av_frame_get_buffer(scaledFrame, 0);

 while (av_audio_fifo_size(fifo) >= outAudioCodecContext->frame_size) {

 ret = av_audio_fifo_read(fifo, (void**)(scaledFrame->data), outAudioCodecContext->frame_size);
 scaledFrame->pts = pts;
 pts += scaledFrame->nb_samples;
 if (avcodec_send_frame(outAudioCodecContext, scaledFrame) < 0) {
 cout << "Cannot encode current audio packet " << endl;
 exit(1);
 }
 while (ret >= 0) {
 ret = avcodec_receive_packet(outAudioCodecContext, outPacket);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 break;
 else if (ret < 0) {
 cerr << "Error during encoding" << endl;
 exit(1);
 }
 av_packet_rescale_ts(outPacket, outAudioCodecContext->time_base, outAVFormatContext->streams[outAudioStreamIndex]->time_base);

 outPacket->stream_index = outAudioStreamIndex;

 write_lock.lock();
 
 if (av_write_frame(outAVFormatContext, outPacket) != 0)
 {
 cerr << "Error in writing audio frame" << endl;
 }
 write_lock.unlock();
 av_packet_unref(outPacket);
 }
 ret = 0;
 }
 av_frame_free(&scaledFrame);
 av_packet_unref(outPacket);
 }
 }
 }
}

int ScreenRecorder::captureVideoFrames() {
 int64_t pts = 0;
 int flag;
 int frameFinished = 0;
 bool endPause = false;
 int numPause = 0;

 ofstream outFile{ "..\\media\\log.txt", ios::out };

 int frameIndex = 0;
 value = 0;

 pAVPacket = (AVPacket*)av_malloc(sizeof(AVPacket));
 if (pAVPacket == nullptr) {
 cerr << "Error in allocating AVPacket" << endl;
 exit(-1);
 }

 pAVFrame = av_frame_alloc();
 if (pAVFrame == nullptr) {
 cerr << "Error: unable to alloc the AVFrame resources" << endl;
 exit(-1);
 }

 outFrame = av_frame_alloc();
 if (outFrame == nullptr) {
 cerr << "Error: unable to alloc the AVFrame resources for out frame" << endl;
 exit(-1);
 }

 int videoOutBuffSize;
 int nBytes = av_image_get_buffer_size(outVideoCodecContext->pix_fmt, outVideoCodecContext->width, outVideoCodecContext->height, 32);
 uint8_t* videoOutBuff = (uint8_t*)av_malloc(nBytes);

 if (videoOutBuff == nullptr) {
 cerr << "Error: unable to allocate memory" << endl;
 exit(-1);
 }

 value = av_image_fill_arrays(outFrame->data, outFrame->linesize, videoOutBuff, AV_PIX_FMT_YUV420P, outVideoCodecContext->width, outVideoCodecContext->height, 1);
 if (value < 0) {
 cerr << "Error in filling image array" << endl;
 }

 SwsContext* swsCtx_;
 if (avcodec_open2(pAVCodecContext, pAVCodec, nullptr) < 0) {
 cerr << "Could not open codec" << endl;
 exit(-1);
 }
 swsCtx_ = sws_getContext(pAVCodecContext->width, pAVCodecContext->height, pAVCodecContext->pix_fmt, outVideoCodecContext->width, outVideoCodecContext->height, outVideoCodecContext->pix_fmt, SWS_BICUBIC,
 nullptr, nullptr, nullptr);

 AVPacket outPacket;
 int gotPicture;

 time_t startTime;
 time(&startTime);

 while (true) {

 if (pauseCapture) {
 cout << "Pause" << endl;
 outFile << "/////////////////// Pause ///////////////////" << endl;
 cout << "outVideoCodecContext->time_base: " << outVideoCodecContext->time_base.num << ", " << outVideoCodecContext->time_base.den << endl;
 }
 cv.wait(ul, [this]() { return !pauseCapture; }); //pause capture (not busy waiting)
 if (endPause) {
 endPause = false;
 }

 if (stopCapture) //check if the capture has to stop
 break;
 ul.unlock();

 if (av_read_frame(pAVFormatContext, pAVPacket) >= 0 && pAVPacket->stream_index == VideoStreamIndx) {
 av_packet_rescale_ts(pAVPacket, pAVFormatContext->streams[VideoStreamIndx]->time_base, pAVCodecContext->time_base);
 value = avcodec_decode_video2(pAVCodecContext, pAVFrame, &frameFinished, pAVPacket);
 if (value < 0) {
 cout << "Unable to decode video" << endl;
 }

 if (frameFinished) { //frame successfully decoded
 //sws_scale(swsCtx_, pAVFrame->data, pAVFrame->linesize, 0, pAVCodecContext->height, outFrame->data, outFrame->linesize);
 av_init_packet(&outPacket);
 outPacket.data = nullptr;
 outPacket.size = 0;

 if (outAVFormatContext->streams[outVideoStreamIndex]->start_time <= 0) {
 outAVFormatContext->streams[outVideoStreamIndex]->start_time = pAVFrame->pts;
 }

 //disable warning on the console
 outFrame->width = outVideoCodecContext->width;
 outFrame->height = outVideoCodecContext->height;
 outFrame->format = outVideoCodecContext->pix_fmt;

 sws_scale(swsCtx_, pAVFrame->data, pAVFrame->linesize, 0, pAVCodecContext->height, outFrame->data, outFrame->linesize);

 avcodec_encode_video2(outVideoCodecContext, &outPacket, outFrame, &gotPicture);

 if (gotPicture) {
 if (outPacket.pts != AV_NOPTS_VALUE) {
 outPacket.pts = av_rescale_q(outPacket.pts, videoSt->codec->time_base, videoSt->time_base);
 }
 if (outPacket.dts != AV_NOPTS_VALUE) {
 outPacket.dts = av_rescale_q(outPacket.dts, videoSt->codec->time_base, videoSt->time_base);
 }

 //cout << "Write frame " << j++ << " (size = " << outPacket.size / 1000 << ")" << endl;
 //cout << "(size = " << outPacket.size << ")" << endl;

 //av_packet_rescale_ts(&outPacket, outVideoCodecContext->time_base, outAVFormatContext->streams[outVideoStreamIndex]->time_base);
 //outPacket.stream_index = outVideoStreamIndex;

 outFile << "outPacket->duration: " << outPacket.duration << ", " << "pAVPacket->duration: " << pAVPacket->duration << endl;
 outFile << "outPacket->pts: " << outPacket.pts << ", " << "pAVPacket->pts: " << pAVPacket->pts << endl;
 outFile << "outPacket.dts: " << outPacket.dts << ", " << "pAVPacket->dts: " << pAVPacket->dts << endl;

 time_t timer;
 double seconds;

 mu.lock();
 if (!activeMenu) {
 time(&timer);
 seconds = difftime(timer, startTime);
 int h = (int)(seconds / 3600);
 int m = (int)(seconds / 60) % 60;
 int s = (int)(seconds) % 60;

 std::cout << std::flush << "\r" << std::setw(2) << std::setfill('0') << h << ':'
 << std::setw(2) << std::setfill('0') << m << ':'
 << std::setw(2) << std::setfill('0') << s << std::flush;
 }
 mu.unlock();

 write_lock.lock();
 if (av_write_frame(outAVFormatContext, &outPacket) != 0) {
 cerr << "Error in writing video frame" << endl;
 }
 write_lock.unlock();
 av_packet_unref(&outPacket);
 }

 av_packet_unref(&outPacket);
 av_free_packet(pAVPacket); //avoid memory saturation
 }
 }
 }

 outFile.close();

 av_free(videoOutBuff);

 return 0;
}



-
record mediasoup RTP stream using FFmpeg for Firefox
30 juillet 2024, par Hadi AghandehI am trying to record WebRTC stream using mediasoup. I could record successfully on chrome and safari 13/14/15. However on Firefox the does not work.


Client side code is a vue js component which gets rtp-compabilities using socket.io and create producers after the server creates the transports. This works good on chrome and safari.


const { connect , createLocalTracks } = require('twilio-video');
const SocketClient = require("socket.io-client");
const SocketPromise = require("socket.io-promise").default;
const MediasoupClient = require("mediasoup-client");

export default {
 data() {
 return {
 errors: [],
 isReady: false,
 isRecording: false,
 loading: false,
 sapio: {
 token: null,
 connectionId: 0
 },
 server: {
 host: 'https://rtc.test',
 ws: '/server',
 socket: null,
 },
 peer: {},
 }
 },
 mounted() {
 this.init();
 },
 methods: {
 async init() {
 await this.startCamera();

 if (this.takeId) {
 await this.recordBySapioServer();
 }
 },
 startCamera() {
 return new Promise( (resolve, reject) => {
 if (window.videoMediaStreamObject) {
 this.setVideoElementStream(window.videoMediaStreamObject);
 resolve();
 } else {
 // Get user media as required
 try {
 this.localeStream = navigator.mediaDevices.getUserMedia({
 audio: true,
 video: true,
 }).then((stream) => {
 this.setVideoElementStream(stream);
 resolve();
 })
 } catch (err) {
 console.error(err);
 reject();
 }
 }
 })
 },
 setVideoElementStream(stream) {
 this.localStream = stream;
 this.$refs.video.srcObject = stream;
 this.$refs.video.muted = true;
 this.$refs.video.play().then((video) => {
 this.isStreaming = true;
 this.height = this.$refs.video.videoHeight;
 this.width = this.$refs.video.videoWidth;
 });
 },
 // first thing we need is connecting to websocket
 connectToSocket() {
 const serverUrl = this.server.host;
 console.log("Connect with sapio rtc server:", serverUrl);

 const socket = SocketClient(serverUrl, {
 path: this.server.ws,
 transports: ["websocket"],
 });
 this.socket = socket;

 socket.on("connect", () => {
 console.log("WebSocket connected");
 // we ask for rtp-capabilities from server to send to us
 socket.emit('send-rtp-capabilities');
 });

 socket.on("error", (err) => {
 this.loading = true;
 console.error("WebSocket error:", err);
 });

 socket.on("router-rtp-capabilities", async (msg) => {
 const { routerRtpCapabilities, sessionId, externalId } = msg;
 console.log('[rtpCapabilities:%o]', routerRtpCapabilities);
 this.routerRtpCapabilities = routerRtpCapabilities;

 try {
 const device = new MediasoupClient.Device();
 // Load the mediasoup device with the router rtp capabilities gotten from the server
 await device.load({ routerRtpCapabilities });

 this.peer.sessionId = sessionId;
 this.peer.externalId = externalId;
 this.peer.device = device;

 this.createTransport();
 } catch (error) {
 console.error('failed to init device [error:%o]', error);
 socket.disconnect();
 }
 });

 socket.on("create-transport", async (msg) => {
 console.log('handleCreateTransportRequest() [data:%o]', msg);

 try {
 // Create the local mediasoup send transport
 this.peer.sendTransport = await this.peer.device.createSendTransport(msg);
 console.log('send transport created [id:%s]', this.peer.sendTransport.id);

 // Set the transport listeners and get the users media stream
 this.handleSendTransportListeners();
 this.setTracks();
 this.loading = false;
 } catch (error) {
 console.error('failed to create transport [error:%o]', error);
 socket.disconnect();
 }
 });

 socket.on("connect-transport", async (msg) => {
 console.log('handleTransportConnectRequest()');
 try {
 const action = this.connectTransport;

 if (!action) {
 throw new Error('transport-connect action was not found');
 }

 await action(msg);
 } catch (error) {
 console.error('ailed [error:%o]', error);
 }
 });

 socket.on("produce", async (msg) => {
 console.log('handleProduceRequest()');
 try {
 if (!this.produce) {
 throw new Error('produce action was not found');
 }
 await this.produce(msg);
 } catch (error) {
 console.error('failed [error:%o]', error);
 }
 });

 socket.on("recording", async (msg) => {
 this.isRecording = true;
 });

 socket.on("recording-error", async (msg) => {
 this.isRecording = false;
 console.error(msg);
 });

 socket.on("recording-closed", async (msg) => {
 this.isRecording = false;
 console.warn(msg)
 });

 },
 createTransport() {
 console.log('createTransport()');

 if (!this.peer || !this.peer.device.loaded) {
 throw new Error('Peer or device is not initialized');
 }

 // First we must create the mediasoup transport on the server side
 this.socket.emit('create-transport',{
 sessionId: this.peer.sessionId
 });
 },
 handleSendTransportListeners() {
 this.peer.sendTransport.on('connect', this.handleTransportConnectEvent);
 this.peer.sendTransport.on('produce', this.handleTransportProduceEvent);
 this.peer.sendTransport.on('connectionstatechange', connectionState => {
 console.log('send transport connection state change [state:%s]', connectionState);
 });
 },
 handleTransportConnectEvent({ dtlsParameters }, callback, errback) {
 console.log('handleTransportConnectEvent()');
 try {
 this.connectTransport = (msg) => {
 console.log('connect-transport action');
 callback();
 this.connectTransport = null;
 };

 this.socket.emit('connect-transport',{
 sessionId: this.peer.sessionId,
 transportId: this.peer.sendTransport.id,
 dtlsParameters
 });

 } catch (error) {
 console.error('handleTransportConnectEvent() failed [error:%o]', error);
 errback(error);
 }
 },
 handleTransportProduceEvent({ kind, rtpParameters }, callback, errback) {
 console.log('handleTransportProduceEvent()');
 try {
 this.produce = jsonMessage => {
 console.log('handleTransportProduceEvent callback [data:%o]', jsonMessage);
 callback({ id: jsonMessage.id });
 this.produce = null;
 };

 this.socket.emit('produce', {
 sessionId: this.peer.sessionId,
 transportId: this.peer.sendTransport.id,
 kind,
 rtpParameters
 });
 } catch (error) {
 console.error('handleTransportProduceEvent() failed [error:%o]', error);
 errback(error);
 }
 },
 async recordBySapioServer() {
 this.loading = true;
 this.connectToSocket();
 },
 async setTracks() {
 // Start mediasoup-client's WebRTC producers
 const audioTrack = this.localStream.getAudioTracks()[0];
 this.peer.audioProducer = await this.peer.sendTransport.produce({
 track: audioTrack,
 codecOptions :
 {
 opusStereo : 1,
 opusDtx : 1
 }
 });


 let encodings;
 let codec;
 const codecOptions = {videoGoogleStartBitrate : 1000};

 codec = this.peer.device.rtpCapabilities.codecs.find((c) => c.kind.toLowerCase() === 'video');
 if (codec.mimeType.toLowerCase() === 'video/vp9') {
 encodings = { scalabilityMode: 'S3T3_KEY' };
 } else {
 encodings = [
 { scaleResolutionDownBy: 4, maxBitrate: 500000 },
 { scaleResolutionDownBy: 2, maxBitrate: 1000000 },
 { scaleResolutionDownBy: 1, maxBitrate: 5000000 }
 ];
 }
 const videoTrack = this.localStream.getVideoTracks()[0];
 this.peer.videoProducer =await this.peer.sendTransport.produce({
 track: videoTrack,
 encodings,
 codecOptions,
 codec
 });

 },
 startRecording() {
 this.Q.answer.recordingId = this.peer.externalId;
 this.socket.emit("start-record", {
 sessionId: this.peer.sessionId
 });
 },
 stopRecording() {
 this.socket.emit("stop-record" , {
 sessionId: this.peer.sessionId
 });
 },
 },

}






console.log of my ffmpeg process :


// sdp string
[sdpString:v=0
 o=- 0 0 IN IP4 127.0.0.1
 s=FFmpeg
 c=IN IP4 127.0.0.1
 t=0 0
 m=video 25549 RTP/AVP 101 
 a=rtpmap:101 VP8/90000
 a=sendonly
 m=audio 26934 RTP/AVP 100 
 a=rtpmap:100 opus/48000/2
 a=sendonly
 ]

// ffmpeg args
commandArgs:[
 '-loglevel',
 'debug',
 '-protocol_whitelist',
 'pipe,udp,rtp',
 '-fflags',
 '+genpts',
 '-f',
 'sdp',
 '-i',
 'pipe:0',
 '-map',
 '0:v:0',
 '-c:v',
 'copy',
 '-map',
 '0:a:0',
 '-strict',
 '-2',
 '-c:a',
 'copy',
 '-f',
 'webm',
 '-flags',
 '+global_header',
 '-y',
 'storage/recordings/26e63cb3-4f81-499e-941a-c0bb7f7f52ce.webm',
 [length]: 26
]
// ffmpeg log
ffmpeg::process::data [data:'ffmpeg version n4.4']
ffmpeg::process::data [data:' Copyright (c) 2000-2021 the FFmpeg developers']
ffmpeg::process::data [data:'\n']
ffmpeg::process::data [data:' built with gcc 11.1.0 (GCC)\n']
ffmpeg::process::data [data:' configuration: --prefix=/usr --disable-debug --disable-static --disable-stripping --enable-amf --enable-avisynth --enable-cuda-llvm --enable-lto --enable-fontconfig --enable-gmp --enable-gnutls --enable-gpl --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libdav1d --enable-libdrm --enable-libfreetype --enable-libfribidi --enable-libgsm --enable-libiec61883 --enable-libjack --enable-libmfx --enable-libmodplug --enable-libmp3lame --enable-libopencore_amrnb --enable-libopencore_amrwb --enable-libopenjpeg --enable-libopus --enable-libpulse --enable-librav1e --enable-librsvg --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libsvtav1 --enable-libtheora --enable-libv4l2 --enable-libvidstab --enable-libvmaf --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxcb --enable-libxml2 --enable-libxvid --enable-libzimg --enable-nvdec --enable-nvenc --enable-shared --enable-version3\n']
ffmpeg::process::data [data:' libavutil 56. 70.100 / 56. 70.100\n' +
 ' libavcodec 58.134.100 / 58.134.100\n' +
 ' libavformat 58. 76.100 / 58. 76.100\n' +
 ' libavdevice 58. 13.100 / 58. 13.100\n' +
 ' libavfilter 7.110.100 / 7.110.100\n' +
 ' libswscale 5. 9.100 / 5. 9.100\n' +
 ' libswresample 3. 9.100 / 3. 9.100\n' +
 ' libpostproc 55. 9.100 / 55. 9.100\n' +
 'Splitting the commandline.\n' +
 "Reading option '-loglevel' ... matched as option 'loglevel' (set logging level) with argument 'debug'.\n" +
 "Reading option '-protocol_whitelist' ..."]
ffmpeg::process::data [data:" matched as AVOption 'protocol_whitelist' with argument 'pipe,udp,rtp'.\n" +
 "Reading option '-fflags' ..."]
ffmpeg::process::data [data:" matched as AVOption 'fflags' with argument '+genpts'.\n" +
 "Reading option '-f' ... matched as option 'f' (force format) with argument 'sdp'.\n" +
 "Reading option '-i' ... matched as input url with argument 'pipe:0'.\n" +
 "Reading option '-map' ... matched as option 'map' (set input stream mapping) with argument '0:v:0'.\n" +
 "Reading option '-c:v' ... matched as option 'c' (codec name) with argument 'copy'.\n" +
 "Reading option '-map' ... matched as option 'map' (set input stream mapping) with argument '0:a:0'.\n" +
 "Reading option '-strict' ...Routing option strict to both codec and muxer layer\n" +
 " matched as AVOption 'strict' with argument '-2'.\n" +
 "Reading option '-c:a' ... matched as option 'c' (codec name) with argument 'copy'.\n" +
 "Reading option '-f' ... matched as option 'f' (force format) with argument 'webm'.\n" +
 "Reading option '-flags' ... matched as AVOption 'flags' with argument '+global_header'.\n" +
 "Reading option '-y' ... matched as option 'y' (overwrite output files) with argument '1'.\n" +
 "Reading option 'storage/recordings/26e63cb3-4f81-499e-941a-c0bb7f7f52ce.webm' ... matched as output url.\n" +
 'Finished splitting the commandline.\n' +
 'Parsing a group of options: global .\n' +
 'Applying option loglevel (set logging level) with argument debug.\n' +
 'Applying option y (overwrite output files) with argument 1.\n' +
 'Successfully parsed a group of options.\n' +
 'Parsing a group of options: input url pipe:0.\n' +
 'Applying option f (force format) with argument sdp.\n' +
 'Successfully parsed a group of options.\n' +
 'Opening an input file: pipe:0.\n' +
 "[sdp @ 0x55604dc58400] Opening 'pipe:0' for reading\n" +
 '[sdp @ 0x55604dc58400] video codec set to: vp8\n' +
 '[sdp @ 0x55604dc58400] audio codec set to: opus\n' +
 '[sdp @ 0x55604dc58400] audio samplerate set to: 48000\n' +
 '[sdp @ 0x55604dc58400] audio channels set to: 2\n' +
 '[udp @ 0x55604dc6c500] end receive buffer size reported is 425984\n' +
 '[udp @ 0x55604dc6c7c0] end receive buffer size reported is 425984\n' +
 '[sdp @ 0x55604dc58400] setting jitter buffer size to 500\n' +
 '[udp @ 0x55604dc6d900] end receive buffer size reported is 425984\n' +
 '[udp @ 0x55604dc6d2c0] end receive buffer size reported is 425984\n' +
 '[sdp @ 0x55604dc58400] setting jitter buffer size to 500\n']
ffmpeg::process::data [data:'[sdp @ 0x55604dc58400] Before avformat_find_stream_info() pos: 210 bytes read:210 seeks:0 nb_streams:2\n']
 **mediasoup:Consumer resume() +1s**
 **mediasoup:Channel request() [method:consumer.resume, id:12] +1s**
 **mediasoup:Channel request succeeded [method:consumer.resume, id:12] +0ms**
 **mediasoup:Consumer resume() +1ms**
 **mediasoup:Channel request() [method:consumer.resume, id:13] +0ms**
 **mediasoup:Channel request succeeded [method:consumer.resume, id:13] +0ms**
ffmpeg::process::data [data:'[sdp @ 0x55604dc58400] Could not find codec parameters for stream 0 (Video: vp8, 1 reference frame, yuv420p): unspecified size\n' +
 "Consider increasing the value for the 'analyzeduration' (0) and 'probesize' (5000000) options\n"]
ffmpeg::process::data [data:'[sdp @ 0x55604dc58400] After avformat_find_stream_info() pos: 210 bytes read:210 seeks:0 frames:0\n' +
 "Input #0, sdp, from 'pipe:0':\n" +
 ' Metadata:\n' +
 ' title : FFmpeg\n' +
 ' Duration: N/A, bitrate: N/A\n' +
 ' Stream #0:0, 0, 1/90000: Video: vp8, 1 reference frame, yuv420p, 90k tbr, 90k tbn, 90k tbc\n' +
 ' Stream #0:1, 0, 1/48000: Audio: opus, 48000 Hz, stereo, fltp\n' +
 'Successfully opened the file.\n' +
 'Parsing a group of options: output url storage/recordings/26e63cb3-4f81-499e-941a-c0bb7f7f52ce.webm.\n' +
 'Applying option map (set input stream mapping) with argument 0:v:0.\n' +
 'Applying option c:v (codec name) with argument copy.\n' +
 'Applying option map (set input stream mapping) with argument 0:a:0.\n' +
 'Applying option c:a (codec name) with argument copy.\n' +
 'Applying option f (force format) with argument webm.\n' +
 'Successfully parsed a group of options.\n' +
 'Opening an output file: storage/recordings/26e63cb3-4f81-499e-941a-c0bb7f7f52ce.webm.\n' +
 "[file @ 0x55604dce5bc0] Setting default whitelist 'file,crypto,data'\n"]
ffmpeg::process::data [data:'Successfully opened the file.\n' +
 '[webm @ 0x55604dce0fc0] dimensions not set\n' +
 'Could not write header for output file #0 (incorrect codec parameters ?): Invalid argument\n' +
 'Error initializing output stream 0:1 -- \n' +
 'Stream mapping:\n' +
 ' Stream #0:0 -> #0:0 (copy)\n' +
 ' Stream #0:1 -> #0:1 (copy)\n' +
 ' Last message repeated 1 times\n' +
 '[AVIOContext @ 0x55604dc6dcc0] Statistics: 0 seeks, 0 writeouts\n' +
 '[AVIOContext @ 0x55604dc69380] Statistics: 210 bytes read, 0 seeks\n']
ffmpeg::process::close




FFmpeg says
dimensions not set
andCould not write header for output file
when I use Firefox. This might be enough for understanding the problem, but if you need more information you can read how server side is performing.
Server-Side in summary can be something like this :
lets say we initialized worker and router at run time using following functions.

// Start the mediasoup workers
module.exports.initializeWorkers = async () => {
 const { logLevel, logTags, rtcMinPort, rtcMaxPort } = config.worker;

 console.log('initializeWorkers() creating %d mediasoup workers', config.numWorkers);

 for (let i = 0; i < config.numWorkers; ++i) {
 const worker = await mediasoup.createWorker({
 logLevel, logTags, rtcMinPort, rtcMaxPort
 });

 worker.once('died', () => {
 console.error('worker::died worker has died exiting in 2 seconds... [pid:%d]', worker.pid);
 setTimeout(() => process.exit(1), 2000);
 });

 workers.push(worker);
 }
};



module.exports.createRouter = async () => {
 const worker = getNextWorker();

 console.log('createRouter() creating new router [worker.pid:%d]', worker.pid);

 console.log(`config.router.mediaCodecs:${JSON.stringify(config.router.mediaCodecs)}`)

 return await worker.createRouter({ mediaCodecs: config.router.mediaCodecs });
};



We pass
router.rtpCompatibilities
to the client. clients get thertpCompatibilities
and create a device and loads it. after that a transport must be created at server side.

const handleCreateTransportRequest = async (jsonMessage) => {

 const transport = await createTransport('webRtc', router);

 var peer;
 try {peer = peers.get(jsonMessage.sessionId);}
 catch{console.log('peer not found')}
 
 peer.addTransport(transport);

 peer.socket.emit('create-transport',{
 id: transport.id,
 iceParameters: transport.iceParameters,
 iceCandidates: transport.iceCandidates,
 dtlsParameters: transport.dtlsParameters
 });
};



Then after the client side also created the transport we listen to connect event an at the time of event, we request the server to create connection.


const handleTransportConnectRequest = async (jsonMessage) => {
 var peer;
 try {peer = peers.get(jsonMessage.sessionId);}
 catch{console.log('peer not found')}

 if (!peer) {
 throw new Error(`Peer with id ${jsonMessage.sessionId} was not found`);
 }

 const transport = peer.getTransport(jsonMessage.transportId);

 if (!transport) {
 throw new Error(`Transport with id ${jsonMessage.transportId} was not found`);
 }

 await transport.connect({ dtlsParameters: jsonMessage.dtlsParameters });
 console.log('handleTransportConnectRequest() transport connected');
 peer.socket.emit('connect-transport');
};



Similar thing happen on produce event.


const handleProduceRequest = async (jsonMessage) => {
 console.log('handleProduceRequest [data:%o]', jsonMessage);

 var peer;
 try {peer = peers.get(jsonMessage.sessionId);}
 catch{console.log('peer not found')}

 if (!peer) {
 throw new Error(`Peer with id ${jsonMessage.sessionId} was not found`);
 }

 const transport = peer.getTransport(jsonMessage.transportId);

 if (!transport) {
 throw new Error(`Transport with id ${jsonMessage.transportId} was not found`);
 }

 const producer = await transport.produce({
 kind: jsonMessage.kind,
 rtpParameters: jsonMessage.rtpParameters
 });

 peer.addProducer(producer);

 console.log('handleProducerRequest() new producer added [id:%s, kind:%s]', producer.id, producer.kind);

 peer.socket.emit('produce',{
 id: producer.id,
 kind: producer.kind
 });
};



For Recording, first I create plain transports for audio and video producers.


const rtpTransport = router.createPlainTransport(config.plainRtpTransport);



then rtp transport must be connected to ports :


await rtpTransport.connect({
 ip: '127.0.0.1',
 port: remoteRtpPort,
 rtcpPort: remoteRtcpPort
 });



Then the consumer must also be created.


const rtpConsumer = await rtpTransport.consume({
 producerId: producer.id,
 rtpCapabilities,
 paused: true
 });



After that we can start recording using following code :


this._rtpParameters = args;
 this._process = undefined;
 this._observer = new EventEmitter();
 this._peer = args.peer;

 this._sdpString = createSdpText(this._rtpParameters);
 this._sdpStream = convertStringToStream(this._sdpString);
 // create dir
 const dir = process.env.REOCRDING_PATH ?? 'storage/recordings';
 if (!fs.existsSync(dir)) shelljs.mkdir('-p', dir);
 
 this._extension = 'webm';
 // create file path
 this._path = `${dir}/${args.peer.sessionId}.${this._extension}`
 let loop = 0;
 while(fs.existsSync(this._path)) {
 this._path = `${dir}/${args.peer.sessionId}-${++loop}.${this._extension}`
 }

this._recordingnModel = await Recording.findOne({sessionIds: { $in: [this._peer.sessionId] }})
 this._recordingnModel.files.push(this._path);
 this._recordingnModel.save();

let proc = ffmpeg(this._sdpStream)
 .inputOptions([
 '-protocol_whitelist','pipe,udp,rtp',
 '-f','sdp',
 ])
 .format(this._extension)
 .output(this._path)
 .size('720x?')
 .on('start', ()=>{
 this._peer.socket.emit('recording');
 })
 .on('end', ()=>{
 let path = this._path.replace('storage/recordings/', '');
 this._peer.socket.emit('recording-closed', {
 url: `${process.env.APP_URL}/recording/file/${path}`
 });
 });

 proc.run();
 this._process = proc;
 }




-
Why does My Discord bot stop playing music
27 septembre 2021, par KonglnwzaSo My Discord bot will be able to play music for a while then it will stop and i have to restart the bot to fix it.
And if it stopped then i use command skip it will crash the bot with the errors below


C:\Users\User\Desktop\Discord bot\Song\play.js:96
 server_queue.connection.dispatcher.end();
 ^

TypeError: Cannot read property 'end' of null
 at skip_song (C:\Users\User\Desktop\Discord bot\Song\play.js:96:40)
 at Object.execute (C:\Users\User\Desktop\Discord bot\Song\play.js:65:47)
 at Client.<anonymous> (C:\Users\User\Desktop\Discord bot\bot.js:78:74)
 at Client.emit (node:events:394:28)
 at MessageCreateAction.handle (C:\Users\User\Desktop\Discord bot\node_modules\discord.js\src\client\actions\MessageCreate.js:31:14)
 at Object.module.exports [as MESSAGE_CREATE] (C:\Users\User\Desktop\Discord bot\node_modules\discord.js\src\client\websocket\handlers\MESSAGE_CREATE.js:4:32)
 at WebSocketManager.handlePacket (C:\Users\User\Desktop\Discord bot\node_modules\discord.js\src\client\websocket\WebSocketManager.js:384:31)
 at WebSocketShard.onPacket (C:\Users\User\Desktop\Discord bot\node_modules\discord.js\src\client\websocket\WebSocketShard.js:444:22)
 at WebSocketShard.onMessage (C:\Users\User\Desktop\Discord bot\node_modules\discord.js\src\client\websocket\WebSocketShard.js:301:10)
 at WebSocket.onMessage (C:\Users\User\Desktop\Discord bot\node_modules\ws\lib\event-target.js:132:16)
</anonymous>


I guess the problem is something with ffmpeg or i must have some npms more.


npm i already installed


- 

- discord.js
- ytdl-core
- ytdl-search
and also i already installed ffmpeg in my computer and set the path








i want to ask that do i have to install any npms more ? or anyone know how to fixed this problem ?


const ytdl = require('ytdl-core');
const ytSearch = require('yt-search');

const queue = new Map();

module.exports = {
 name: 'play',
 aliases: ['skip', 'stop', 'queue', 'leave', 'join'],
 description: 'Joins and play',
 async execute(message, args , cmd, client, Discord){
 const voice_channel = message.member.voice.channel;
 if(!voice_channel) return message.channel.send('เข้าไปอยู่ในดิสก่อนดิวะ :angry: ');//you must in voice channel
 
 
 const server_queue = queue.get(message.guild.id);
 if(cmd==='play' || cmd==='p'){
 if(!args.length) return message.channel.send('จะเปิดอะไรล่ะพี่ :triumph:');//you must have argument
 let song = {};

 if(ytdl.validateURL(args[0])){
 const song_info = await ytdl.getInfo(args[0]);
 song = {title: song_info.videoDetails.title, url: song_info.videoDetails.video_url}
 } else {
 const videoFinder = async (query) => {
 const videoResult = await ytSearch(query);
 return (videoResult.videos.length > 1) ? videoResult.videos[0] : null;
 }

 const video = await videoFinder(args.join(' '));
 if(video){
 song = {title: video.title, url: video.url}
 
 } else{
 message.channel.send('หาไม่เจอ :cry: ');//cant find song
 }
 }
 //const connection = await voiceChannel.join(); 
 if(!server_queue){
 const queue_constructor = {
 voice_channel: voice_channel,
 text_channel: message.channel,
 connection: null,
 songs: []
 }

 queue.set(message.guild.id, queue_constructor);
 queue_constructor.songs.push(song);

 try{
 const connection = await voice_channel.join();
 queue_constructor.connection = connection;
 video_player(message.guild, queue_constructor.songs[0]);
 } catch (err) {
 queue.delete(message.guild.id);
 message.channel.send('error');
 throw err;
 }
 } else{
 server_queue.songs.push(song);
 return message.channel.send(`:regional_indicator_k: :regional_indicator_o: :regional_indicator_n: :regional_indicator_g: :star_struck: **${song.title}** ใส่เข้าคิวแล้ว`)//added to queue
 }
 

 }
 else if(cmd === 'skip' || cmd ==='s') skip_song(message, server_queue);
 else if(cmd === 'clear' || cmd==='c') clear_song(message, server_queue);
 else if(cmd === 'join') join_song(message);
 else if(cmd === 'leave') leave_song(message);
 else if(cmd === 'queue' || cmd ==='q') queue_show(message,server_queue,Discord);


 }
}

const video_player = async (guild, song) => {
 const song_queue = queue.get(guild.id);

 if(!song){
 //song_queue.text_channel.send('ไปละบาย :kissing_heart: ');
 song_queue.voice_channel.leave();
 queue.delete(guild.id);
 return;
 }
 const stream = ytdl(song.url,{filter: 'audioonly'},{ highWaterMark: 1<<25 });
 song_queue.connection.play(stream, { seek: 0, volume: 0.5 })
 .on('finish', () => {
 song_queue.songs.shift();
 video_player(guild, song_queue.songs[0]);
 });
 await song_queue.text_channel.send(`:regional_indicator_k: :regional_indicator_o: :regional_indicator_n: :regional_indicator_g: :sunglasses: กำลังเล่นเด็ก ***${song.title}***`);//playing song
}

const skip_song = (message, server_queue) => {
 if(!message.member.voice.channel) return message.channel.send('เข้าดิสก่อนดิ :angry: ');//you must be in voice channel
 if(!server_queue) return message.channel.send('ไม่มีเพลงในคิวแล้ว :relieved: ');//no song in queue
 server_queue.connection.dispatcher.end();
}

const clear_song = (message, server_queue) => {
 if(!message.member.voice.channel) return message.channel.send('เข้าดิสก่อนดิ :angry: ');//you must be in voice channel
 if(!server_queue) return message.channel.send('ไม่มีเพลงในคิวแล้ว :relieved: ');//no song in queue
 server_queue.songs = [];
 server_queue.connection.dispatcher.end();
}

const join_song = (message) => {
 if(!message.member.voice.channel) return message.channel.send('เข้าดิสก่อนดิ :angry: ');//you must be in voice channel
 message.member.voice.channel.join();
}

const leave_song = (message) => {
 if(!message.member.voice.channel) return message.channel.send('เข้าดิสก่อนดิ :angry: ');//you must be in voice channel
 message.member.voice.channel.leave();
}

const queue_show = (message,server_queue,Discord) => {
 if(!server_queue) return message.channel.send('ไม่มีเพลงในคิวแล้ว :relieved: ');//no song in queue
 const queueList = server_queue.songs.map((song, i) => `[${++i}] - ${song.title}`);
 const queueEmbed = new Discord.MessageEmbed()
 .setDescription(queueList);
 message.channel.send(queueEmbed);
}```