
Recherche avancée
Médias (1)
-
Bug de détection d’ogg
22 mars 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Video
Autres articles (106)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...) -
ANNEXE : Les plugins utilisés spécifiquement pour la ferme
5 mars 2010, parLe site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)
Sur d’autres sites (16473)
-
Ffmpeg error "avcodec_send_frame" return "invalid argument"
17 octobre 2023, par Paulo CoutinhoI have a problem in function
avcodec_send_frame
throwing errorError sending frame for encoding: Invalid argument
(-22). I already search, check, recheck and nothing. It is near the ffmpeg examples. Can anyone help me ? Thanks.

This is my code :


static void callbackAddSubtitle(const Message &m, const Response r)
{
 try
 {
 av_log_set_level(AV_LOG_DEBUG);

 spdlog::debug("[Mapping :: callbackAddSubtitle] Adding subtitle...");

 auto inputOpt = m.get("input");
 auto outputOpt = m.get("output");

 if (!inputOpt.has_value() || !outputOpt.has_value())
 {
 r(std::string{"INVALID-PARAMS"});
 return;
 }

 const std::string &input = inputOpt.value();
 const std::string &output = outputOpt.value();

 // initialize input
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing input video...");

 AVFormatContext *inputFormatCtx = avformat_alloc_context();
 if (avformat_open_input(&inputFormatCtx, input.c_str(), nullptr, nullptr) != 0)
 {
 spdlog::error("Failed to open input");
 r(std::string{"ERROR-OPEN-INPUT"});
 return;
 }

 if (avformat_find_stream_info(inputFormatCtx, nullptr) < 0)
 {
 spdlog::error("Failed to find stream information");
 avformat_close_input(&inputFormatCtx);
 r(std::string{"ERROR-FIND-STREAM"});
 return;
 }

 int videoStreamIndex = av_find_best_stream(inputFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
 if (videoStreamIndex < 0)
 {
 spdlog::error("Could not find a video stream");
 r(std::string{"ERROR-FIND-VIDEO-STREAM"});
 return;
 }

 AVRational timeBase = inputFormatCtx->streams[videoStreamIndex]->time_base;

 AVCodecParameters *inputCodecPar = inputFormatCtx->streams[videoStreamIndex]->codecpar;
 const AVCodec *inputCodec = avcodec_find_decoder(inputCodecPar->codec_id);
 AVCodecContext *inputCodecCtx = avcodec_alloc_context3(inputCodec);

 avcodec_parameters_to_context(inputCodecCtx, inputCodecPar);
 avcodec_open2(inputCodecCtx, inputCodec, nullptr);

 // initialize input audio
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing input audio...");

 int audioStreamIndex = av_find_best_stream(inputFormatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
 if (audioStreamIndex < 0)
 {
 spdlog::error("Could not find an audio stream");
 r(std::string{"ERROR-FIND-AUDIO-STREAM"});
 return;
 }

 AVCodecParameters *inputAudioCodecPar = inputFormatCtx->streams[audioStreamIndex]->codecpar;
 const AVCodec *inputAudioCodec = avcodec_find_decoder(inputAudioCodecPar->codec_id);
 AVCodecContext *inputAudioCodecCtx = avcodec_alloc_context3(inputAudioCodec);

 avcodec_parameters_to_context(inputAudioCodecCtx, inputAudioCodecPar);
 avcodec_open2(inputAudioCodecCtx, inputAudioCodec, nullptr);

 // initialize output video
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing output video...");

 AVFormatContext *outputFormatCtx = nullptr;
 avformat_alloc_output_context2(&outputFormatCtx, nullptr, nullptr, output.c_str());
 AVStream *outputStream = avformat_new_stream(outputFormatCtx, nullptr);

 AVCodecContext *outputCodecCtx = avcodec_alloc_context3(inputCodec);
 avcodec_parameters_to_context(outputCodecCtx, inputCodecPar);
 int retOutVideo = avcodec_open2(outputCodecCtx, inputCodec, nullptr);

 if (retOutVideo < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retOutVideo);
 spdlog::error("Failed to initialize output video: {}", err);
 r(std::string{"ERROR-INIT-OUTPUT-VIDEO"});
 return;
 }

 outputStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 outputStream->codecpar->codec_id = inputCodec->id;
 avcodec_parameters_from_context(outputStream->codecpar, outputCodecCtx);

 if (!(outputFormatCtx->oformat->flags & AVFMT_NOFILE))
 {
 avio_open(&outputFormatCtx->pb, output.c_str(), AVIO_FLAG_WRITE);
 }

 const char *pixelFormatName = getPixelFormatName(outputCodecCtx->pix_fmt);
 spdlog::debug("Pixel Format: {}", pixelFormatName);

 // initialize output audio
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing output audio...");

 AVStream *outputAudioStream = avformat_new_stream(outputFormatCtx, nullptr);
 AVCodecContext *outputAudioCodecCtx = avcodec_alloc_context3(inputAudioCodec);
 avcodec_parameters_to_context(outputAudioCodecCtx, inputAudioCodecPar);
 int retOutAudio = avcodec_open2(outputAudioCodecCtx, inputAudioCodec, nullptr);

 if (retOutAudio < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retOutAudio);
 spdlog::error("Failed to initialize output audio: {}", err);
 r(std::string{"ERROR-INIT-OUTPUT-AUDIO"});
 return;
 }

 outputAudioStream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
 outputAudioStream->codecpar->codec_id = inputAudioCodec->id;
 avcodec_parameters_from_context(outputAudioStream->codecpar, outputAudioCodecCtx);

 // initialize filters
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing filters...");

 AVFilterGraph *filterGraph = avfilter_graph_alloc();
 if (!filterGraph)
 {
 spdlog::error("Failed to allocate filter graph");
 r(std::string{"ERROR-FILTER-GRAPH"});
 return;
 }

 AVFilterContext *bufferSinkCtx;
 AVFilterContext *bufferSrcCtx;

 const AVFilter *bufferSink = avfilter_get_by_name("buffersink");
 const AVFilter *bufferSrc = avfilter_get_by_name("buffer");

 // input filter
 char filterInArgs[512];
 snprintf(filterInArgs, sizeof(filterInArgs), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", inputCodecPar->width, inputCodecPar->height, inputCodecCtx->pix_fmt, timeBase.num, timeBase.den, inputCodecCtx->sample_aspect_ratio.num, inputCodecCtx->sample_aspect_ratio.den);

 spdlog::debug("[Mapping :: callbackAddSubtitle] Buffer src args: {}", filterInArgs);

 int retFilterIn = avfilter_graph_create_filter(&bufferSrcCtx, bufferSrc, "in", filterInArgs, nullptr, filterGraph);
 if (retFilterIn < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retFilterIn);
 spdlog::error("Failed to create bufferSrcCtx: {}", err);
 r(std::string{"ERROR-CREATE-FILTER-SRC"});
 return;
 }

 // output filter
 int retFilterOut = avfilter_graph_create_filter(&bufferSinkCtx, bufferSink, "out", nullptr, nullptr, filterGraph);

 if (retFilterOut < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retFilterOut);
 spdlog::error("Failed to create bufferSinkCtx: {}", err);
 r(std::string{"ERROR-CREATE-FILTER-SINK"});
 return;
 }

 enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};
 av_opt_set_int_list(bufferSinkCtx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);

 // add filters to graph and link them
 const char *filterSpec = "drawtext=text='Legenda Adicionada Automaticamente Via FFMPEG e C++': fontcolor=yellow: bordercolor=black: fontfile='/Users/paulo/Downloads/roboto/Roboto-Black.ttf'";
 const AVFilter *filter = avfilter_get_by_name("drawtext");

 AVFilterInOut *outputs = avfilter_inout_alloc();
 AVFilterInOut *inputs = avfilter_inout_alloc();

 outputs->name = av_strdup("in");
 outputs->filter_ctx = bufferSrcCtx;
 outputs->pad_idx = 0;
 outputs->next = nullptr;
 inputs->name = av_strdup("out");
 inputs->filter_ctx = bufferSinkCtx;
 inputs->pad_idx = 0;
 inputs->next = nullptr;

 if (avfilter_graph_parse_ptr(filterGraph, filterSpec, &inputs, &outputs, nullptr) < 0)
 {
 spdlog::error("Failed to parse filter graph");
 r(std::string{"ERROR-PARSE-FILTER"});
 return;
 }

 if (avfilter_graph_config(filterGraph, nullptr) < 0)
 {
 spdlog::error("Failed to configure filter graph");
 r(std::string{"ERROR-CONFIG-FILTER"});
 return;
 }

 // header
 spdlog::debug("[Mapping :: callbackAddSubtitle] Writing header...");

 if (avformat_write_header(outputFormatCtx, nullptr) < 0)
 {
 spdlog::error("Error writing header");
 r(std::string{"ERROR-WRITE-HEADER"});
 return;
 }

 // read frames and write to output
 AVPacket *packet = av_packet_alloc();
 AVFrame *frame = av_frame_alloc();

 frame->format = inputCodecCtx->pix_fmt;
 frame->width = inputCodecCtx->width;
 frame->height = inputCodecCtx->height;

 AVFrame *filt_frame = av_frame_alloc();

 filt_frame->format = inputCodecCtx->pix_fmt;
 filt_frame->width = inputCodecCtx->width;
 filt_frame->height = inputCodecCtx->height;

 while (av_read_frame(inputFormatCtx, packet) >= 0)
 {
 if (packet->stream_index == videoStreamIndex)
 {
 if (avcodec_send_packet(inputCodecCtx, packet) < 0)
 {
 spdlog::error("Error sending packet for decoding");
 r(std::string{"ERROR-SEND-PACKET-DECODE"});
 return;
 }

 while (avcodec_receive_frame(inputCodecCtx, frame) == 0)
 {
 // Envia o quadro decodificado para o gráfico de filtro
 if (av_buffersrc_add_frame_flags(bufferSrcCtx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
 {
 spdlog::error("Error while feeding the filtergraph");
 r(std::string{"ERROR-FEED-FILTERGRAPH"});
 return;
 }

 // Recebe um quadro do gráfico de filtro
 if (av_buffersink_get_frame(bufferSinkCtx, filt_frame) < 0)
 {
 spdlog::error("Error while receiving the filtered frame");
 r(std::string{"ERROR-RECEIVE-FILTERED-FRAME"});
 return;
 }

 // Envia o quadro decodificado para re-codificação
 int retSendFrame = avcodec_send_frame(outputCodecCtx, filt_frame);
 if (retSendFrame < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retSendFrame);
 spdlog::error("Error sending frame for encoding: {}", err);
 r(std::string{"ERROR-SEND-FRAME-ENCODE"});
 return;
 }

 AVPacket *output_packet = av_packet_alloc();
 output_packet->data = nullptr;
 output_packet->size = 0;

 // Re-codifica filt_frame para um pacote
 if (avcodec_receive_packet(outputCodecCtx, output_packet) == 0)
 {
 // Escreve o pacote no fluxo de saída
 av_write_frame(outputFormatCtx, output_packet);
 av_packet_unref(output_packet);
 }

 av_frame_unref(filt_frame);
 }

 // time
 packet->pts = av_rescale_q_rnd(packet->pts, inputFormatCtx->streams[videoStreamIndex]->time_base, outputFormatCtx->streams[videoStreamIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 packet->dts = av_rescale_q_rnd(packet->dts, inputFormatCtx->streams[videoStreamIndex]->time_base, outputFormatCtx->streams[videoStreamIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 packet->duration = av_rescale_q(packet->duration, inputFormatCtx->streams[videoStreamIndex]->time_base, outputFormatCtx->streams[videoStreamIndex]->time_base);
 packet->stream_index = videoStreamIndex;

 // write packet to output video stream
 av_interleaved_write_frame(outputFormatCtx, packet);
 }
 else if (packet->stream_index == audioStreamIndex)
 {
 // rescale timestamps
 packet->pts = av_rescale_q_rnd(packet->pts, inputFormatCtx->streams[audioStreamIndex]->time_base, outputFormatCtx->streams[audioStreamIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 packet->dts = av_rescale_q_rnd(packet->dts, inputFormatCtx->streams[audioStreamIndex]->time_base, outputFormatCtx->streams[audioStreamIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 packet->duration = av_rescale_q(packet->duration, inputFormatCtx->streams[audioStreamIndex]->time_base, outputFormatCtx->streams[audioStreamIndex]->time_base);
 packet->stream_index = audioStreamIndex;

 // write packet to output audio stream
 av_interleaved_write_frame(outputFormatCtx, packet);
 }

 av_packet_unref(packet);
 }

 av_packet_free(&packet);
 av_frame_free(&frame);
 av_frame_free(&filt_frame);

 spdlog::debug("[Mapping :: callbackAddSubtitle] Writing trailer...");

 if (av_write_trailer(outputFormatCtx) < 0)
 {
 spdlog::error("Error writing trailer");
 r(std::string{"ERROR-WRITE-TRAILER"});
 return;
 }

 // cleanup
 spdlog::debug("[Mapping :: callbackAddSubtitle] Cleaning...");

 if (!(outputFormatCtx->oformat->flags & AVFMT_NOFILE))
 {
 avio_closep(&outputFormatCtx->pb);
 }

 avcodec_free_context(&inputCodecCtx);
 avcodec_free_context(&inputAudioCodecCtx);
 avcodec_free_context(&outputCodecCtx);
 avcodec_free_context(&outputAudioCodecCtx);

 avformat_free_context(inputFormatCtx);
 avformat_free_context(outputFormatCtx);

 r(std::string{"OK"});
 }
 catch (const std::exception &e)
 {
 spdlog::error("Error: {}", e.what());
 r(std::string{"ERROR"});
 }
}



The error is :


[2023-10-17 06:30:16.936] [debug] [Mapping :: callbackAddSubtitle] Adding subtitle...
[2023-10-17 06:30:16.936] [debug] [Mapping :: callbackAddSubtitle] Initializing input video...
[NULL @ 0x153604a60] Opening '/Users/paulo/Downloads/movie.mp4' for reading
[file @ 0x6000001fd170] Setting default whitelist 'file,crypto,data'
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Format mov,mp4,m4a,3gp,3g2,mj2 probed with size=2048 and score=100
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] ISO: File Type Major Brand: isom
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Unknown dref type 0x206c7275 size 12
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Processing st: 0, edit list 0 - media time: 0, duration: 2669670
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Unknown dref type 0x206c7275 size 12
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Processing st: 1, edit list 0 - media time: 1024, duration: 4272096
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] drop a frame at curr_cts: 0 @ 0
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Before avformat_find_stream_info() pos: 113542488 bytes read:110788 seeks:1 nb_streams:2
[h264 @ 0x153604cd0] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 0x153604cd0] Decoding VUI
[h264 @ 0x153604cd0] nal_unit_type: 8(PPS), nal_ref_idc: 3
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] demuxer injecting skip 1024 / discard 0
[aac @ 0x1536056f0] skip 1024 / discard 0 samples due to side data
[h264 @ 0x153604cd0] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 0x153604cd0] Decoding VUI
[h264 @ 0x153604cd0] nal_unit_type: 8(PPS), nal_ref_idc: 3
[h264 @ 0x153604cd0] nal_unit_type: 6(SEI), nal_ref_idc: 0
[h264 @ 0x153604cd0] nal_unit_type: 5(IDR), nal_ref_idc: 3
[h264 @ 0x153604cd0] Format yuv420p chosen by get_format().
[h264 @ 0x153604cd0] Reinit context to 1088x1920, pix_fmt: yuv420p
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] All info found
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] After avformat_find_stream_info() pos: 195211 bytes read:305951 seeks:2 frames:2
[2023-10-17 06:30:18.160] [debug] [Mapping :: callbackAddSubtitle] Initializing input audio...
[h264 @ 0x143604330] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 0x143604330] Decoding VUI
[h264 @ 0x143604330] nal_unit_type: 8(PPS), nal_ref_idc: 3
[2023-10-17 06:30:18.160] [debug] [Mapping :: callbackAddSubtitle] Initializing output video...
[h264 @ 0x143611ec0] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 0x143611ec0] Decoding VUI
[h264 @ 0x143611ec0] nal_unit_type: 8(PPS), nal_ref_idc: 3
[file @ 0x6000001f4000] Setting default whitelist 'file,crypto,data'
[2023-10-17 06:30:18.167] [debug] Pixel Format: YUV420P
[2023-10-17 06:30:18.167] [debug] [Mapping :: callbackAddSubtitle] Initializing output audio...
[2023-10-17 06:30:18.167] [debug] [Mapping :: callbackAddSubtitle] Initializing filters...
[2023-10-17 06:30:18.168] [debug] [Mapping :: callbackAddSubtitle] Buffer src args: video_size=1080x1920:pix_fmt=0:time_base=1/30000:pixel_aspect=1/1
detected 10 logical cores
[in @ 0x6000004ec0b0] Setting 'video_size' to value '1080x1920'
[in @ 0x6000004ec0b0] Setting 'pix_fmt' to value '0'
[in @ 0x6000004ec0b0] Setting 'time_base' to value '1/30000'
[in @ 0x6000004ec0b0] Setting 'pixel_aspect' to value '1/1'
[in @ 0x6000004ec0b0] w:1080 h:1920 pixfmt:yuv420p tb:1/30000 fr:0/1 sar:1/1
[AVFilterGraph @ 0x6000017e8000] Setting 'text' to value 'Legenda Adicionada Automaticamente Via FFMPEG e C++'
[AVFilterGraph @ 0x6000017e8000] Setting 'fontcolor' to value 'yellow'
[AVFilterGraph @ 0x6000017e8000] Setting 'bordercolor' to value 'black'
[AVFilterGraph @ 0x6000017e8000] Setting 'fontfile' to value '/Users/paulo/Downloads/roboto/Roboto-Black.ttf'
[AVFilterGraph @ 0x6000017e8000] query_formats: 3 queried, 2 merged, 0 already done, 0 delayed
[2023-10-17 06:30:18.172] [debug] [Mapping :: callbackAddSubtitle] Writing header...
[h264 @ 0x143604330] nal_unit_type: 6(SEI), nal_ref_idc: 0
[h264 @ 0x143604330] nal_unit_type: 5(IDR), nal_ref_idc: 3
[h264 @ 0x143604330] Format yuv420p chosen by get_format().
[h264 @ 0x143604330] Reinit context to 1088x1920, pix_fmt: yuv420p
[Parsed_drawtext_0 @ 0x6000004f4160] Copying data in avfilter.
[Parsed_drawtext_0 @ 0x6000004f4160] n:0 t:0.000000 text_w:424 text_h:16 x:0 y:0
[2023-10-17 06:30:18.182] [error] Error sending frame for encoding: Invalid argument
Returned Value: ERROR-SEND-FRAME-ENCODE



-
Read existing MP4 File and write into a new MP4 file using libavcodec
4 octobre 2023, par TahfimulI am new to the libavcodec space.


I am trying to read video and audio streams from an existing mp4 file and take the data from the two streams and then mux the two streams and write the muxed data into a new mp4 file using libavcodec in C++. Essentially, I am aiming to split the original (existing) mp4 file into small chunks of 1 second clips that then can be played back using a video player. I would like to preserve the original mp4 video's video stream (i.e. preserve its color, resolution and etc.) and preserve the mp4 video's audio stream (i.e. preserve its bit rate and etc.). I am trying to achieve this using libavcodec in C++. But there does not seem to be any tutorial or documentation online that points me to that direction.


So far, I have looked at and tried to implement a solution using this tutorial (tutorial#1) : https://github.com/leandromoreira/ffmpeg-libav-tutorial/blob/master/0_hello_world.c


However, tutorial#1 aimed to save each video frame from the existing (original) mp4 video stream into individual .pgm files, which meant that the .pgm files would store a grayscale image of each video frame.


Since, I want to preserve the colors of the original (existing) mp4 file, I looked at this tutorial (tutorial#2) that aimed to convert the grayscale video frame into color using the swscale library : https://www.youtube.com/watch?v=Y7SUm7Xf1sc&ab_channel=Bartholomew
However, in tutorial#2, they exported the output from swscale library to a GUI library to be viewed in a GUI application and did not show hwo to write the output data into a new mp4 file that can be played back by a video player.


So then, I looked at this tutorial(tutorial#3) which showed how to create an MP4 file using libavcodec : C++ FFmpeg create mp4 file
However, the problem with that solution is that I was not able to take a video frame from the original mp4 video and store it into another mp4 file. I kept getting errors when attempting to do so and I did not succeed in taking the data from the original(existing) mp4 file and storing it into a new mp4 file.


Here is the code that I have written so far :


#include<fstream>
#include 
#include 
#include 
extern "C"
{
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>mathematics.h>
#include <libswscale></libswscale>swscale.h>
#include <libavfilter></libavfilter>buffersrc.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libavutil></libavutil>time.h>
#include <libavutil></libavutil>opt.h>
}
#pragma comment(lib, "avfilter.lib")
#ifdef av_err2str
#undef av_err2str
#include <string>
av_always_inline std::string av_err2string(int errnum) {
 char str[AV_ERROR_MAX_STRING_SIZE];
 return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);
}
#define av_err2str(err) av_err2string(err).c_str()
#endif // av_err2str

#include <chrono>
#include <thread>


// decode packets into frames
static int decode_packet(AVPacket *pPacket, AVCodecContext *pCodecContext, AVFrame *pFrame);

static void pushFrame(AVFrame* frame, AVCodecContext* outputCodecContext, AVPacket * outputPacket, AVFormatContext* outputFormatContext, AVCodec *outputCodec) {
 
 std::cout<<"outputCodecContext: "<format = AV_PIX_FMT_YUV420P;
 frame->width = 800;
 frame->height = 800;
 if ((err = av_frame_get_buffer(frame, 32)) < 0) {
 std::cout << "Failed to allocate picture" << err << std::endl;
 return;
 }
 }
 SwsContext* swsCtx = nullptr;
 if (!swsCtx) {
 swsCtx = sws_getContext(800, 800, AV_PIX_FMT_RGB24, 800, 
 800, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
 }
 int inLinesize[1] = { 3 * 800 };
 // From RGB to YUV
 // sws_scale(swsCtx, (const uint8_t* const*)&data, inLinesize, 0, 800, 
 // frame->data, frame->linesize);
 std::cout<<"frame "<pts = (1.0 / 30.0) * 90000 * (1);
 // std::cout << videoFrame->pts << " " << cctx->time_base.num << " " << 
 // cctx->time_base.den << " " << 1 << std::endl;
 if ((err = avcodec_send_frame(outputCodecContext, frame)) < 0) {
 std::cout << "Failed to send frame" << err << std::endl;
 return;
 }
 AV_TIME_BASE;
 AVPacket pkt;
 av_init_packet(&pkt);
 pkt.data = NULL;
 pkt.size = 0;
 pkt.flags |= AV_PKT_FLAG_KEY;
 std::cout<<"here\n";
 if (avcodec_receive_packet(outputCodecContext, outputPacket) == 0) {
 static int counter = 0;
 if (counter == 0) {
 FILE* fp = fopen("dump_first_frame1.dat", "wb");
 fwrite(outputPacket->data, outputPacket->size, 1, fp);
 fclose(fp);
 }
 // std::cout << "pkt key: " << (pkt.flags & AV_PKT_FLAG_KEY) << " " << 
 // pkt.size << " " << (counter++) << std::endl;
 // uint8_t* size = ((uint8_t*)pkt.data);
 // std::cout << "first: " << (int)size[0] << " " << (int)size[1] << 
 // " " << (int)size[2] << " " << (int)size[3] << " " << (int)size[4] << 
 // " " << (int)size[5] << " " << (int)size[6] << " " << (int)size[7] << 
 // std::endl;
 av_interleaved_write_frame(outputFormatContext, outputPacket);
 av_packet_unref(outputPacket);
 }
}

int main()
{

 char* filename = "c++.mp4";

 AVFormatContext *pFormatContext = avformat_alloc_context();

 AVOutputFormat* outputFormat = NULL;

 AVFormatContext* outputFormatContext = nullptr;

 AVCodecContext* outputCodecContext = nullptr;

 if (!pFormatContext) {
 std::cerr<<"ERROR could not allocate memory for Format Context\n";
 return -1;
 }

 if (avformat_open_input(&pFormatContext, filename , NULL, NULL) != 0) {
 std::cerr<<"ERROR could not open the file\n";
 return -1;
 }

 std::cout<<"format: "<iformat->name<<" , duration:"<<(double)(pFormatContext->duration/AV_TIME_BASE)<<"seconds, bit_rate:"<bit_rate<video_codec);

 
 if (!outputCodec)
 {
 std::cout << "can't create output codec" << std::endl;
 return -1;
 } 
 

 AVStream* outputStream = avformat_new_stream(outputFormatContext, outputCodec);

 if (!outputStream)
 {
 std::cout << "can't find output format" << std::endl;
 return -1;
 }

 outputCodecContext = avcodec_alloc_context3(outputCodec);

 if (!outputCodecContext)
 {
 std::cout << "can't create output codec context" << std::endl;
 return -1;
 }

 AVCodec *pCodec = NULL;

 AVCodecParameters *pCodecParameters = NULL;

 int video_stream_index = -1;

 AVStream* stream = NULL;
 
 // loop though all the streams and print its main information
 for (int i = 0; i < pFormatContext->nb_streams; i++)
 {
 
 AVCodecParameters *pLocalCodecParameters = NULL;
 pLocalCodecParameters = pFormatContext->streams[i]->codecpar;

 AVCodec *pLocalCodec = NULL;
 pLocalCodec = avcodec_find_decoder(pLocalCodecParameters->codec_id);
 if (pLocalCodec==NULL) {
 std::cerr<<"ERROR unsupported codec!\n";
 // In this example if the codec is not found we just skip it
 continue;
 }


 if (pLocalCodecParameters->codec_type == AVMEDIA_TYPE_VIDEO) {
 if (video_stream_index == -1) {
 video_stream_index = i;
 pCodec = pLocalCodec;
 pCodecParameters = pLocalCodecParameters;
 stream = pFormatContext->streams[i];
 std::cout<<"codec id: "<codecpar->codec_id<codecpar->codec_type<codecpar->width<codecpar->height<codecpar->format<codecpar->bit_rate<codecpar->codec_id = outputFormat->video_codec;
 // outputStream->codecpar->codec_id = stream->codecpar->codec_id;
 outputStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 outputStream->codecpar->width = stream->codecpar->width;
 outputStream->codecpar->height = stream->codecpar->height;
 outputStream->codecpar->format = AV_PIX_FMT_YUV420P;
 outputStream->codecpar->bit_rate = stream->codecpar->bit_rate;
 
 avcodec_parameters_to_context(outputCodecContext, outputStream->codecpar);
 } 

 std::cout<<"Video Codec: resolution " << pLocalCodecParameters->width << " x "<height<codec_type == AVMEDIA_TYPE_AUDIO) {
 std::cout<<"Audio Codec: "<channels<<" channels, sample rate "<sample_rate<name << " ID: " <id<< " bit_rate: "<bit_rate</ outputStream->codecpar->codec_id = outputFormat->video_codec;
 // outputStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 // outputStream->codecpar->width = 300;
 // outputStream->codecpar->height = 300;
 // outputStream->codecpar->format = AV_PIX_FMT_YUV420P;
 // outputStream->codecpar->bit_rate = 200 * 1000;
 outputCodecContext->time_base = (AVRational){ 1, 1 };
 outputCodecContext->max_b_frames = 2;
 outputCodecContext->gop_size = 12;
 outputCodecContext->framerate = (AVRational){ 30, 1 };

 if (avcodec_parameters_to_context(pCodecContext, pCodecParameters) < 0)
 {
 std::cerr<<"failed to copy codec params to codec context\n";
 return -1;
 }

 // std::cout<<"pCodecContext->time_base: "<time_base)</ outputCodecContext->time_base = pCodecContext->time_base;
 // outputCodecContext->max_b_frames = pCodecContext->max_b_frames;
 // outputCodecContext->gop_size = pCodecContext->gop_size;
 // outputCodecContext->framerate = pCodecContext->framerate;

 if (outputStream->codecpar->codec_id == AV_CODEC_ID_H264) {
 // av_opt_set(pCodecContext, "preset", "ultrafast", 0);
 av_opt_set(outputCodecContext, "preset", "ultrafast", 0);
 }
 else if (outputStream->codecpar->codec_id == AV_CODEC_ID_H265)
 {
 // av_opt_set(pCodecContext, "preset", "ultrafast", 0);
 av_opt_set(outputCodecContext, "preset", "ultrafast", 0);
 }

 // avcodec_parameters_from_context(stream->codecpar, pCodecContext);
 avcodec_parameters_from_context(outputStream->codecpar, outputCodecContext);

 if (avcodec_open2(pCodecContext, pCodec, NULL) < 0)
 {
 std::cerr<<"failed to open codec through avcodec_open2\n";
 return -1;
 }

 if (avcodec_open2(outputCodecContext, outputCodec, NULL) < 0)
 {
 std::cerr<<"failed to open output codec through avcodec_open2\n";
 return -1;
 }


 if (!(outputFormat->flags & AVFMT_NOFILE)) {
 if (avio_open(&outputFormatContext->pb, "test.mp4", AVIO_FLAG_WRITE) < 0) {
 std::cout << "Failed to open file" << std::endl;
 return -1;
 }
 }

 if (avformat_write_header(outputFormatContext, NULL) < 0) {
 std::cout << "Failed to write header" << std::endl;
 return -1;
 }

 av_dump_format(outputFormatContext, 0, "test.mp4", 1);


 AVFrame *pFrame = av_frame_alloc();
 if (!pFrame)
 {
 std::cerr<<"failed to allocate memory for AVFrame\n";
 return -1;
 }
 
 // https://ffmpeg.org/doxygen/trunk/structAVPacket.html
 AVPacket *pPacket = av_packet_alloc();
 if (!pPacket)
 {
 std::cerr<<"failed to allocate memory for AVPacket\n";
 return -1;
 }

 int response = 0;
 int how_many_packets_to_process = 300;

 // fill the Packet with data from the Stream
 // https://ffmpeg.org/doxygen/trunk/group__lavf__decoding.html#ga4fdb3084415a82e3810de6ee60e46a61
 while (av_read_frame(pFormatContext, pPacket) >= 0)
 {
 // if it's the video stream
 if (pPacket->stream_index == video_stream_index) {
 std::cout<<"AVPacket->pts "<pts;
 // if(av_write_frame(outputFormatContext, pPacket)<0)
 // std::cout<<"error writing output frame\n";
 // pushFrame(pFrame, outputCodecContext, pPacket, outputFormatContext, outputCodec);
 response = decode_packet(pPacket, pCodecContext, pFrame);
 if (response < 0)
 break;
 // stop it, otherwise we'll be saving hundreds of frames
 if (--how_many_packets_to_process <= 0) break;
 }
 // https://ffmpeg.org/doxygen/trunk/group__lavc__packet.html#ga63d5a489b419bd5d45cfd09091cbcbc2
 av_packet_unref(pPacket);
 } 

 if(av_write_trailer(outputFormatContext)<0)
 std::cout <<"Error writing output trailer\n";


 return 0;
}

int save_frame_as_mpeg(AVCodecContext* pCodecCtx, AVFrame* pFrame, int FrameNo) {
 int ret = 0;

 const AVCodec* mpegCodec = avcodec_find_encoder(pCodecCtx->codec_id);
 if (!mpegCodec) {
 std::cout<<"failed to open mpegCodec\n";
 return -1;
 }
 AVCodecContext* mpegContext = avcodec_alloc_context3(mpegCodec);
 if (!mpegContext) {
 std::cout<<"failed to open mpegContext\n";
 return -1;
 }

 mpegContext->pix_fmt = pCodecCtx->pix_fmt;
 mpegContext->height = pFrame->height;
 mpegContext->width = pFrame->width;
 mpegContext->time_base = AVRational{ 1,10 };

 ret = avcodec_open2(mpegContext, mpegCodec, NULL);
 if (ret < 0) {
 return ret;
 }
 FILE* MPEGFile;
 char MPEGFName[256];

 AVPacket packet;
 packet.data = NULL;
 packet.size = 0;
 av_init_packet(&packet);

 int gotFrame;

 ret = avcodec_send_frame(mpegContext, pFrame);
 if (ret < 0) {
 std::cout<<"failed to send frame for mpegContext\n";
 return ret;
 }

 ret = avcodec_receive_packet(mpegContext, &packet);
 if (ret < 0) {
 std::cout<<"failed to receive packet for mpegContext\terrocode: "<pix_fmt = pCodecCtx->pix_fmt;
 jpegContext->height = pFrame->height;
 jpegContext->width = pFrame->width;
 jpegContext->time_base = AVRational{ 1,10 };

 ret = avcodec_open2(jpegContext, jpegCodec, NULL);
 if (ret < 0) {
 return ret;
 }
 FILE* JPEGFile;
 char JPEGFName[256];

 AVPacket packet;
 packet.data = NULL;
 packet.size = 0;
 av_init_packet(&packet);

 int gotFrame;

 ret = avcodec_send_frame(jpegContext, pFrame);
 if (ret < 0) {
 return ret;
 }

 ret = avcodec_receive_packet(jpegContext, &packet);
 if (ret < 0) {
 return ret;
 }

 sprintf(JPEGFName, "c:\\folder\\dvr-%06d.jpg", FrameNo);
 JPEGFile = fopen(JPEGFName, "wb");
 fwrite(packet.data, 1, packet.size, JPEGFile);
 fclose(JPEGFile);

 av_packet_unref(&packet);
 avcodec_close(jpegContext);
 return 0;
}

static int decode_packet(AVPacket *pPacket, AVCodecContext *pCodecContext, AVFrame *pFrame)
{
 // Supply raw packet data as input to a decoder
 // https://ffmpeg.org/doxygen/trunk/group__lavc__decoding.html#ga58bc4bf1e0ac59e27362597e467efff3
 int response = avcodec_send_packet(pCodecContext, pPacket);
 if (response < 0) {
 std::cerr<<"Error while sending a packet to the decoder: "<= 0)
 {
 // Return decoded output data (into a frame) from a decoder
 // https://ffmpeg.org/doxygen/trunk/group__lavc__decoding.html#ga11e6542c4e66d3028668788a1a74217c
 response = avcodec_receive_frame(pCodecContext, pFrame);
 if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
 break;
 } else if (response < 0) {
 std::cerr<<"Error while receiving a frame from the decoder: "<= 0) {

 response = save_frame_as_jpeg(pCodecContext, pFrame, pCodecContext->frame_number);

 if(response<0)
 {
 std::cerr<<"Failed to save frame as jpeg\n";
 return -1;
 }

 response = save_frame_as_mpeg(pCodecContext, pFrame, pCodecContext->frame_number);

 if(response<0)
 {
 std::cerr<<"Failed to save frame as mpeg\n";
 return -1;
 }


 std::cout<<
 "Frame "<frame_number<< "type= "<pict_type)<<" size= "<pkt_size<<" bytes, format= "<format<<" "<pts<<"pts key_frame "<key_frame<< " [DTS"<coded_picture_number<<" ]\n";
 
 char frame_filename[1024];
 snprintf(frame_filename, sizeof(frame_filename), "%s-%d.pgm", "frame", pCodecContext->frame_number);
 // Check if the frame is a planar YUV 4:2:0, 12bpp
 // That is the format of the provided .mp4 file
 // RGB formats will definitely not give a gray image
 // Other YUV image may do so, but untested, so give a warning
 if (pFrame->format != AV_PIX_FMT_YUV420P)
 {
 std::cout<<"Warning: the generated file may not be a grayscale image, but could e.g. be just the R component if the video format is RGB\n";
 }
 
 }
 }
 return 0;
}
</thread></chrono></string></fstream>


The question that I am seeking an answer to is How can I use libavcodec to split an mp4 file into 1 second chunk clips (those clips will be in mp4 format) ?


-
Streaming Anki Vector's camera
25 novembre 2023, par Brendan GoodeI am trying to stream my robot to Remo.tv with my Vector robot. The website recognizes I am going live but does not stream what the robots camera is seeing. I have confirmed the camera works by a local application that runs the SDK. The very end of the code is what is giving issues, it appears somebody ripped code from Cozmo and attempted to paste it into a Vector file. The problem is it seems like the camera is taking pictures and we reach the point where it attempts to send photo but fails ?


# This is a dummy file to allow the automatic loading of modules without error on none.
import anki_vector
import atexit
import time
import _thread as thread
import logging
import networking

log = logging.getLogger('RemoTV.vector')
vector = None
reserve_control = None
robotKey = None
volume = 100 #this is stupid, but who cares
annotated = False

def connect():
 global vector
 global reserve_control

 log.debug("Connecting to Vector")
 vector = anki_vector.AsyncRobot()
 vector.connect()
 #reserve_control = anki_vector.behavior.ReserveBehaviorControl()
 
 atexit.register(exit)

 return(vector)

def exit():
 log.debug("Vector exiting")
 vector.disconnect()
 
def setup(robot_config):
 global forward_speed
 global turn_speed
 global volume
 global vector
 global charge_high
 global charge_low
 global stay_on_dock

 global robotKey
 global server
 global no_mic
 global no_camera
 global ffmpeg_location
 global v4l2_ctl_location
 global x_res
 global y_res
 
 robotKey = robot_config.get('robot', 'robot_key')

 if robot_config.has_option('misc', 'video_server'):
 server = robot_config.get('misc', 'video_server')
 else:
 server = robot_config.get('misc', 'server')
 
 no_mic = robot_config.getboolean('camera', 'no_mic')
 no_camera = robot_config.getboolean('camera', 'no_camera')

 ffmpeg_location = robot_config.get('ffmpeg', 'ffmpeg_location')
 v4l2_ctl_location = robot_config.get('ffmpeg', 'v4l2-ctl_location')

 x_res = robot_config.getint('camera', 'x_res')
 y_res = robot_config.getint('camera', 'y_res')


 if vector == None:
 vector = connect()

 #x mod_utils.repeat_task(30, check_battery, coz)

 if robot_config.has_section('cozmo'):
 forward_speed = robot_config.getint('cozmo', 'forward_speed')
 turn_speed = robot_config.getint('cozmo', 'turn_speed')
 volume = robot_config.getint('cozmo', 'volume')
 charge_high = robot_config.getfloat('cozmo', 'charge_high')
 charge_low = robot_config.getfloat('cozmo', 'charge_low')
 stay_on_dock = robot_config.getboolean('cozmo', 'stay_on_dock')

# if robot_config.getboolean('tts', 'ext_chat'): #ext_chat enabled, add motor commands
# extended_command.add_command('.anim', play_anim)
# extended_command.add_command('.forward_speed', set_forward_speed)
# extended_command.add_command('.turn_speed', set_turn_speed)
# extended_command.add_command('.vol', set_volume)
# extended_command.add_command('.charge', set_charging)
# extended_command.add_command('.stay', set_stay_on_dock)

 vector.audio.set_master_volume(volume) # set volume

 return
 
def move(args):
 global charging
 global low_battery
 command = args['button']['command']

 try:
 if vector.status.is_on_charger and not charging:
 if low_battery:
 print("Started Charging")
 charging = 1
 else:
 if not stay_on_dock:
 vector.drive_off_charger_contacts().wait_for_completed()

 if command == 'f':
 vector.behavior.say_text("Moving {}".format(command))

 #causes delays #coz.drive_straight(distance_mm(10), speed_mmps(50), False, True).wait_for_completed()
 vector.motors.set_wheel_motors(forward_speed, forward_speed, forward_speed*4, forward_speed*4 )
 time.sleep(0.7)
 vector.motors.set_wheel_motors(0, 0)
 elif command == 'b':
 #causes delays #coz.drive_straight(distance_mm(-10), speed_mmps(50), False, True).wait_for_completed()
 vector.motors.set_wheel_motors(-forward_speed, -forward_speed, -forward_speed*4, -forward_speed*4 )
 time.sleep(0.7)
 vector.motors.set_wheel_motors(0, 0)
 elif command == 'l':
 #causes delays #coz.turn_in_place(degrees(15), False).wait_for_completed()
 vector.motors.set_wheel_motors(-turn_speed, turn_speed, -turn_speed*4, turn_speed*4 )
 time.sleep(0.5)
 vector.motors.set_wheel_motors(0, 0)
 elif command == 'r':
 #causes delays #coz.turn_in_place(degrees(-15), False).wait_for_completed()
 vector.motors.set_wheel_motors(turn_speed, -turn_speed, turn_speed*4, -turn_speed*4 )
 time.sleep(0.5)
 vector.motors.set_wheel_motors(0, 0)

 #move lift
 elif command == 'w':
 vector.behavior.say_text("w")
 vector.set_lift_height(height=1).wait_for_completed()
 elif command == 's':
 vector.behavior.say_text("s")
 vector.set_lift_height(height=0).wait_for_completed()

 #look up down
 #-25 (down) to 44.5 degrees (up)
 elif command == 'q':
 #head_angle_action = coz.set_head_angle(degrees(0))
 #clamped_head_angle = head_angle_action.angle.degrees
 #head_angle_action.wait_for_completed()
 vector.behaviour.set_head_angle(45)
 time.sleep(0.35)
 vector.behaviour.set_head_angle(0)
 elif command == 'a':
 #head_angle_action = coz.set_head_angle(degrees(44.5))
 #clamped_head_angle = head_angle_action.angle.degrees
 #head_angle_action.wait_for_completed()
 vector.behaviour.set_head_angle(-22.0)
 time.sleep(0.35)
 vector.behaviour.set_head_angle(0)
 
 #things to say with TTS disabled
 elif command == 'sayhi':
 tts.say( "hi! I'm cozmo!" )
 elif command == 'saywatch':
 tts.say( "watch this" )
 elif command == 'saylove':
 tts.say( "i love you" )
 elif command == 'saybye':
 tts.say( "bye" )
 elif command == 'sayhappy':
 tts.say( "I'm happy" )
 elif command == 'saysad':
 tts.say( "I'm sad" )
 elif command == 'sayhowru':
 tts.say( "how are you?" )
 except:
 return(False)
 return

def start():
 log.debug("Starting Vector Video Process")
 try:
 thread.start_new_thread(video, ())
 except KeyboardInterrupt as e:
 pass 
 return
 
def video():
 global vector
 # Turn on image receiving by the camera
 vector.camera.init_camera_feed()

 vector.behavior.say_text("hey everyone, lets robot!")

 while True:
 time.sleep(0.25)

 from subprocess import Popen, PIPE
 from sys import platform

 log.debug("ffmpeg location : {}".format(ffmpeg_location))

# import os
# if not os.path.isfile(ffmpeg_location):
# print("Error: cannot find " + str(ffmpeg_location) + " check ffmpeg is installed. Terminating controller")
# thread.interrupt_main()
# thread.exit()

 while not networking.authenticated:
 time.sleep(1)

 p = Popen([ffmpeg_location, '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', '25', '-i', '-', '-vcodec', 'mpeg1video', '-r', '25', "-f", "mpegts", "-headers", "\"Authorization: Bearer {}\"".format(robotKey), "http://{}:1567/transmit?name={}-video".format(server, networking.channel_id)], stdin=PIPE)
 #p = Popen([ffmpeg_location, '-nostats', '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', '25', '-i', '-', '-vcodec', 'mpeg1video', '-r', '25','-b:v', '400k', "-f","mpegts", "-headers", "\"Authorization: Bearer {}\"".format(robotKey), "http://{}/transmit?name=rbot-390ddbe0-f1cc-4710-b3f1-9f477f4875f9-video".format(server)], stdin=PIPE)
 #p = Popen([ffmpeg_location, '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', '25', '-i', '-', '-vcodec', 'mpeg1video', '-r', '25', "-f", "mpegts", "-headers", "\"Authorization: Bearer {}\"".format(robotKey), "http://{}/transmit?name=rbot-390ddbe0-f1cc-4710-b3f1-9f477f4875f9-video".format(server, networking.channel_id)], stdin=PIPE)
 print(vector)
 image = vector.camera.latest_image
 image.raw_image.save("test.png", 'PNG')
 try:
 while True:
 if vector:
 image = vector.camera.latest_image
 if image:
 if annotated:
 image = image.annotate_image()
 else:
 image = image.raw_image
 print("attempting to write image")
 image.save(p.stdin, 'PNG')

 else:
 time.sleep(.1)
 log.debug("Lost Vector object, terminating video stream")
 p.stdin.close()
 p.wait()
 except Exception as e:
 log.debug("Vector Video Exception! {}".format(e))
 p.stdin.close()
 p.wait()
 pass 
 



Here is the error we get


[vost#0:0/mpeg1video @ 000001c7153c1cc0] Error submitting a packet to the muxer: Error number -10053 occurred
[out#0/mpegts @ 000001c713448480] Error muxing a packet
[out#0/mpegts @ 000001c713448480] Error writing trailer: Error number -10053 occurred
[http @ 000001c7134cab00] URL read error: Error number -10053 occurred
[out#0/mpegts @ 000001c713448480] Error closing file: Error number -10053 occurred
[out#0/mpegts @ 000001c713448480] video:56kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
frame= 25 fps=0.0 q=2.0 Lsize= 53kB time=00:00:01.32 bitrate= 325.9kbits/s speed=7.05x
Conversion failed!

attempting to write image



You can see our attempts to fix by commented out code in the #p section at the bottom.