
Recherche avancée
Autres articles (52)
-
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page.
Sur d’autres sites (10155)
-
C++ ffmpeg lib version 7.0 - noice in exported audio
2 septembre 2024, par Chris PI want to make a C++ lib named cppdub which will mimic the python module pydub.


One main function is to export the AudioSegment to a file with a specific format (example : mp3).


The code is :


AudioSegment AudioSegment::from_file(const std::string& file_path, const std::string& format, const std::string& codec,
 const std::map& parameters, int start_second, int duration) {

 avformat_network_init();
 av_log_set_level(AV_LOG_ERROR); // Adjust logging level as needed

 AVFormatContext* format_ctx = nullptr;
 if (avformat_open_input(&format_ctx, file_path.c_str(), nullptr, nullptr) != 0) {
 std::cerr << "Error: Could not open audio file." << std::endl;
 return AudioSegment(); // Return an empty AudioSegment on failure
 }

 if (avformat_find_stream_info(format_ctx, nullptr) < 0) {
 std::cerr << "Error: Could not find stream information." << std::endl;
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 int audio_stream_index = -1;
 for (unsigned int i = 0; i < format_ctx->nb_streams; i++) {
 if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
 audio_stream_index = i;
 break;
 }
 }

 if (audio_stream_index == -1) {
 std::cerr << "Error: Could not find audio stream." << std::endl;
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 AVCodecParameters* codec_par = format_ctx->streams[audio_stream_index]->codecpar;
 const AVCodec* my_codec = avcodec_find_decoder(codec_par->codec_id);
 AVCodecContext* codec_ctx = avcodec_alloc_context3(my_codec);

 if (avcodec_parameters_to_context(codec_ctx, codec_par) < 0) {
 std::cerr << "Error: Could not initialize codec context." << std::endl;
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 if (avcodec_open2(codec_ctx, my_codec, nullptr) < 0) {
 std::cerr << "Error: Could not open codec." << std::endl;
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 SwrContext* swr_ctx = swr_alloc();
 if (!swr_ctx) {
 std::cerr << "Error: Could not allocate SwrContext." << std::endl;
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 av_opt_set_chlayout(swr_ctx, "in_chlayout", &codec_ctx->ch_layout, 0);
 av_opt_set_int(swr_ctx, "in_sample_rate", codec_ctx->sample_rate, 0);
 av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", codec_ctx->sample_fmt, 0);

 AVChannelLayout dst_ch_layout;
 av_channel_layout_copy(&dst_ch_layout, &codec_ctx->ch_layout);
 av_channel_layout_uninit(&dst_ch_layout);
 av_channel_layout_default(&dst_ch_layout, 2);

 av_opt_set_chlayout(swr_ctx, "out_chlayout", &dst_ch_layout, 0);
 av_opt_set_int(swr_ctx, "out_sample_rate", 48000, 0);
 av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);

 if (swr_init(swr_ctx) < 0) {
 std::cerr << "Error: Failed to initialize the resampling context" << std::endl;
 swr_free(&swr_ctx);
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 AVPacket packet;
 AVFrame* frame = av_frame_alloc();
 if (!frame) {
 std::cerr << "Error: Could not allocate frame." << std::endl;
 swr_free(&swr_ctx);
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 return AudioSegment();
 }

 std::vector<char> output;
 while (av_read_frame(format_ctx, &packet) >= 0) {
 if (packet.stream_index == audio_stream_index) {
 if (avcodec_send_packet(codec_ctx, &packet) == 0) {
 while (avcodec_receive_frame(codec_ctx, frame) == 0) {
 if (frame->pts != AV_NOPTS_VALUE) {
 frame->pts = av_rescale_q(frame->pts, codec_ctx->time_base, format_ctx->streams[audio_stream_index]->time_base);
 }

 uint8_t* output_buffer;
 int output_samples = av_rescale_rnd(
 swr_get_delay(swr_ctx, codec_ctx->sample_rate) + frame->nb_samples,
 48000, codec_ctx->sample_rate, AV_ROUND_UP);

 int output_buffer_size = av_samples_get_buffer_size(
 nullptr, 2, output_samples, AV_SAMPLE_FMT_S16, 1);

 output_buffer = (uint8_t*)av_malloc(output_buffer_size);

 if (output_buffer) {
 memset(output_buffer, 0, output_buffer_size); // Zero padding to avoid random noise
 int converted_samples = swr_convert(swr_ctx, &output_buffer, output_samples,
 (const uint8_t**)frame->extended_data, frame->nb_samples);

 if (converted_samples >= 0) {
 output.insert(output.end(), output_buffer, output_buffer + output_buffer_size);
 }
 else {
 std::cerr << "Error: Failed to convert audio samples." << std::endl;
 }

 av_free(output_buffer);
 }
 else {
 std::cerr << "Error: Could not allocate output buffer." << std::endl;
 }
 }
 }
 else {
 std::cerr << "Error: Failed to send packet to codec context." << std::endl;
 }
 }
 av_packet_unref(&packet);
 }

 av_frame_free(&frame);
 swr_free(&swr_ctx);
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);

 std::map metadata = {
 {"sample_width", 2},
 {"frame_rate", 48000},
 {"channels", 2},
 {"frame_width", 4}
 };

 return AudioSegment(static_cast<const>(output.data()), output.size(), metadata);
}


std::ofstream AudioSegment::export_segment(std::string& out_f,
 const std::string& format,
 const std::string& codec,
 const std::string& bitrate,
 const std::vector& parameters,
 const std::map& tags,
 const std::string& id3v2_version,
 const std::string& cover) {
 av_log_set_level(AV_LOG_DEBUG);
 AVCodecContext* codec_ctx = nullptr;
 AVFormatContext* format_ctx = nullptr;
 AVStream* stream = nullptr;
 AVFrame* frame = nullptr;
 AVPacket* pkt = nullptr;
 int ret;

 // Open output file
 std::ofstream out_file(out_f, std::ios::binary);
 if (!out_file) {
 throw std::runtime_error("Failed to open output file.");
 }

 // Initialize format context
 avformat_alloc_output_context2(&format_ctx, nullptr, format.c_str(), out_f.c_str());
 if (!format_ctx) {
 throw std::runtime_error("Could not allocate format context.");
 }

 // Find encoder
 const AVCodec* codec_ptr = avcodec_find_encoder_by_name(codec.c_str());
 if (!codec_ptr) {
 throw std::runtime_error("Codec not found.");
 }

 // Add stream
 stream = avformat_new_stream(format_ctx, codec_ptr);
 if (!stream) {
 throw std::runtime_error("Failed to create new stream.");
 }

 // Allocate codec context
 codec_ctx = avcodec_alloc_context3(codec_ptr);
 if (!codec_ctx) {
 throw std::runtime_error("Could not allocate audio codec context.");
 }

 // Set codec parameters
 codec_ctx->bit_rate = std::stoi(bitrate);
 codec_ctx->sample_fmt = codec_ptr->sample_fmts ? codec_ptr->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
 codec_ctx->sample_rate = frame_rate_;
 codec_ctx->ch_layout.nb_channels = this->get_channels();
 AVChannelLayout ch_layout_1;
 av_channel_layout_uninit(&ch_layout_1);
 av_channel_layout_default(&ch_layout_1, this->get_channels());
 codec_ctx->ch_layout = ch_layout_1;

 // Open codec
 ret = avcodec_open2(codec_ctx, codec_ptr, nullptr);
 if (ret < 0) {
 throw std::runtime_error("Could not open codec.");
 }

 // Initialize packet
 pkt = av_packet_alloc();
 if (!pkt) {
 throw std::runtime_error("Could not allocate AVPacket.");
 }

 // Initialize frame
 frame = av_frame_alloc();
 if (!frame) {
 throw std::runtime_error("Could not allocate AVFrame.");
 }

 frame->nb_samples = codec_ctx->frame_size;
 frame->format = codec_ctx->sample_fmt;
 frame->ch_layout = codec_ctx->ch_layout;
 frame->sample_rate = codec_ctx->sample_rate;

 // Allocate data buffer
 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 throw std::runtime_error("Could not allocate audio data buffers.");
 }

 // Encode frames
 int samples_read = 0;
 while (samples_read < data_.size()) {
 ret = av_frame_make_writable(frame);
 if (ret < 0) {
 throw std::runtime_error("Frame not writable.");
 }

 // Determine the number of samples to copy into the frame
 int frame_size = std::min<int>(codec_ctx->frame_size, (data_.size() - samples_read) / frame_width_);
 int buffer_size = frame_size * frame_width_;

 // Clear the frame data to avoid artifacts from previous data
 std::memset(frame->data[0], 0, codec_ctx->frame_size * frame_width_);

 // Copy the actual audio data into the frame
 std::memcpy(frame->data[0], data_.data() + samples_read, buffer_size);
 samples_read += buffer_size;

 // If the frame is partially filled, pad the remaining part with zeros
 if (frame_size < codec_ctx->frame_size) {
 std::memset(frame->data[0] + buffer_size, 0, (codec_ctx->frame_size - frame_size) * frame_width_);
 }

 // Send the frame for encoding
 ret = avcodec_send_frame(codec_ctx, frame);
 if (ret < 0) {
 throw std::runtime_error("Error sending frame for encoding.");
 }

 // Receive and write packets
 while (ret >= 0) {
 ret = avcodec_receive_packet(codec_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
 break;
 }
 else if (ret < 0) {
 throw std::runtime_error("Error encoding frame.");
 }

 out_file.write(reinterpret_cast(pkt->data), pkt->size);
 av_packet_unref(pkt);
 }
 }

 // **Explicitly flush the encoder**
 ret = avcodec_send_frame(codec_ctx, nullptr);
 if (ret < 0) {
 throw std::runtime_error("Error flushing the encoder.");
 }

 // Receive and write remaining packets after flushing
 while (ret >= 0) {
 ret = avcodec_receive_packet(codec_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
 break;
 }
 else if (ret < 0) {
 throw std::runtime_error("Error encoding frame during flush.");
 }

 out_file.write(reinterpret_cast(pkt->data), pkt->size);
 av_packet_unref(pkt);
 }

 // Cleanup
 av_frame_free(&frame);
 av_packet_free(&pkt);
 avcodec_free_context(&codec_ctx);
 avformat_free_context(format_ctx);

 return out_file;
}
</int></const></char>


I have no run time error but i see this message in console :


[libmp3lame @ 000002d26b239ac0] Trying to remove 47 more samples than there are in the queue



I can play the exported mp3 file but there is background noise.


-
How to optimize this code that gets video frame as image
14 août 2024, par TSRI am quite new to mp4 file. But here is my working attempt to extract image frame given video url and a timestamp.


In reality the input url is an 8K 10hours 200GB video, so I can't download it all, I can't load it to memory, and this is an API call so it has to be fast.


Is there anything else I can optimize ?


My doubts :


- 

-
This line
ffprobe -v error -select_streams v:0 -show_entries packet=pos,size,dts_time -read_intervals ${timestamp}%+5 -of csv=p=0 "${url}"
I chose this clingy 5s, in which case would this fail ?

-
Same line, I don't know what's going on under the hood of this
ffprobe
command, but I tested it with the big 8K video and it seems to complete fast. So is it safe to assume that the entire 200GB was not downloaded ? An explanation of how thisffprobe
command work would be appreciated

-
Based on trial and error, I concluded that the intervals returned is parsable by
ffmpeg
only if the first frame until the timestamp is included. If I include only that the single frame interval,ffmpeg
says it is an invalid file. (Makes sense cuz I don't think I'll get an image from a 4byte data.) However, how can I be sure that I am selecting the least number of intervals.

-
Worse bottleneck : The function
extractFrame
takes 6seconds on the big video. It seems to read the entire video segment fetched (by the preceding subrange step). I couldn't find a way to jump to the last frame without computing. Is this how MP4 work ? I read somewhere that MP4 computes the current frame based on the previous. If that is true, does it mean there is no way to compute a specific frame without reading everything since the last keyframe ?

-
Finally, this
ffmpeg
line is fishy (I got it from SO Extract final frame from a video using ffmpeg) it says that it ovewrites the output at everyframe . Does it mean it is writing to the disk every time ? I experience severe degradation in performance when I used.png
instead of.jpg
. This tells me that the image is computed every
frame. Can we compute only the final image at the very end ?













Here is the working code to optimize.


import path from "path";
import axios from "axios";
import ffmpeg from "fluent-ffmpeg";
import fs from "fs";
import {promisify} from 'util';
import {exec} from 'child_process';

const execPromise = promisify(exec);


// URL of the video and desired timestamp (in seconds)

const videoUrl = 'https://raw.githubusercontent.com/tolotrasamuel/assets/main/videoplayback.mp4';

console.log(videoUrl);
const timestamp = 30; // Example: 30 seconds into the video


// Function to get the byte range using ffprobe
const getByteRangeForTimestamp = async (url, timestamp) => {
 // Use ffprobe to get the offset and size of the frame at the given timestamp
 const command = `ffprobe -v error -select_streams v:0 -show_entries packet=pos,size,dts_time -read_intervals ${timestamp}%+5 -of csv=p=0 "${url}"`;
 console.log('Running command:', command);
 const {stdout} = await execPromise(command);


 // Parse the output
 const timeStamps = stdout.trim().split("\n");
 const frames = timeStamps.map(ts => {
 const [dts_time, size, offset] = ts.split(',');
 const timeInt = parseFloat(dts_time);
 const offsetInt = parseInt(offset);
 const sizeInt = parseInt(size);
 return {dts_time: timeInt, size: sizeInt, offset: offsetInt};
 })

 if (frames.length === 0) {
 throw new Error('No frames found in the specified interval');
 }

 let closest;


 let i = 0
 while (i < frames.length) {
 if (i === frames.length) {
 throw new Error('No frames found in the specified 5s interval');
 }
 if (frames[i].dts_time >= timestamp) {
 const oldDiff = Math.abs(closest.dts_time - timestamp);
 const newDiff = Math.abs(frames[i].dts_time - timestamp);
 if (newDiff < oldDiff) {
 closest = frames[i];
 }
 break;
 }
 closest = frames[i];
 i++;
 }

 // I experimented with this, but it seems that the first frame is always at the beginning of a valid atom
 // anything after that will make the video unplayable
 const startByte = frames[0].offset;
 const endByte = closest.offset + closest.size - 1;

 const diff = Math.abs(closest.dts_time - timestamp);
 const size = endByte - startByte + 1;
 console.log("Start: ", startByte, "End: ", endByte, "Diff: ", diff, "Timestamp: ", timestamp, "Closest: ", closest.dts_time, "Size to fetch: ", size)


 const startTime = closest.dts_time - frames[0].dts_time;
 return {startByte, endByte, startTime};
};

// Download the specific segment
const downloadSegment = async (url, startByte, endByte, outputPath) => {
 console.log(`Downloading bytes ${startByte}-${endByte}...`);
 const response = await axios.get(url, {
 responseType: 'arraybuffer',
 headers: {
 Range: `bytes=${startByte}-${endByte}`,
 },
 });

 console.log('Segment downloaded!', response.data.length, "Written to: ", outputPath);
 fs.writeFileSync(outputPath, response.data);
};

// Extract frame from the segment
const extractFrameRaw = async (videoPath, timestamp, outputFramePath) => {


 const command = `ffmpeg -sseof -3 -i ${videoPath} -update 1 -q:v 1 ${outputFramePath} -y`;
 console.log('Running command:', command);
 const startTime = new Date().getTime();
 await execPromise(command);
 const endTime = new Date().getTime();
 console.log('Processing time:', endTime - startTime, 'ms');
 console.log('Frame extracted to:', outputFramePath);
};
const extractFrame = (videoPath, timestamp, outputFramePath) => {
 ffmpeg(videoPath)
 .inputOptions(['-sseof -5']) // Seeks to 3 seconds before the end of the video
 .outputOptions([
 '-update 1', // Continuously update the output file with new frames
 '-q:v 1' // Set the highest JPEG quality
 ])
 .output(outputFramePath) // Set the output file path

 // log
 .on('start', function (commandLine) {
 console.log('Spawned Ffmpeg with command: ' + commandLine);
 })
 .on('progress', function (progress) {
 console.log('Processing: ' + progress.timemark + '% done', progress, 'frame: ', progress.frames);
 })
 .on('end', function () {
 console.log('Processing finished !');
 })
 .on('error', function (err, stdout, stderr) {
 console.error('Error:', err);
 console.error('ffmpeg stderr:', stderr);
 })
 .run();
};


const __dirname = path.resolve();

// Main function to orchestrate the process
(async () => {
 try {
 // ffmpeg.setFfmpegPath('/usr/local/bin/ffmpeg');
 const {startByte, endByte, startTime} = await getByteRangeForTimestamp(videoUrl, timestamp);
 const tmpVideoPath = path.resolve(__dirname, 'temp_video.mp4');
 const outputFramePath = path.resolve(__dirname, `frame_${timestamp}.jpg`);

 await downloadSegment(videoUrl, startByte, endByte, tmpVideoPath);
 await extractFrame(tmpVideoPath, startTime, outputFramePath);
 } catch (err) {
 console.error('Error:', err);
 }
})();



-
-
How to extract the first frame from a video ?
10 août 2024, par AndrewI am trying to extract the first frame from a video to save it as a preview.


Here's an almost working solution using ffmpeg :


func extractFirstFrame(videoBytes []byte) ([]byte, error) {
 input := bytes.NewReader(videoBytes)

 var output bytes.Buffer

 err := ffmpeg.Input("pipe:0").
 Filter("select", ffmpeg.Args{"gte(n,1)"}).
 Output(
 "pipe:1",
 ffmpeg.KwArgs{
 "vframes": 1,
 "format": "image2",
 "vcodec": "mjpeg",
 }).
 WithInput(input).
 WithOutput(&output).
 Run()

 if err != nil {
 return nil, fmt.Errorf("error extracting frame: %v", err)
 }

 return output.Bytes(), nil
}



The problem with this is that it can only process horizontal videos. For some reason it will throw a
0xbebbb1b7
error for a vertical videos. I don't understand why is that, probably because this is my very first time with ffmpeg.

Also, I am concerned if this solution is optimal. My assumption is that the whole video will be loaded into the memory first, which is kinda bad and I would like to avoid that


I use
https://github.com/u2takey/ffmpeg-go
, but errors are the same even if I run it usingexec
:

cmd := exec.Command(
 "ffmpeg",
 "-i", "pipe:0",
 "-vf", "select=eq(n\\,0)",
 "-q:v", "3",
 "-f", "image2",
 "pipe:1",
 )



Interestingly enough, I noticed that running command in the terminal will work as expected with any type of video
ffmpeg -i .\video_2024-08-10_00-03-00.mp4 -vf "select=eq(n\,0)" -q:v 3 output_image.jpg
, so the problem may be on how I send those videos to my server. I useFiber and send videos asmultipart/form-data
and then read it like this :

form_data, err := c.FormFile("image")
 file, err := form_data.Open()
 bytes, err := io.ReadAll(file)
 ....
 preview, err := extractFirstFrame(bytes)




I also managed to find logs for the failed attempt :



ffmpeg version 7.0.2-full_build-www.gyan.dev Copyright (c) 2000-2024 the FFmpeg developers
 built with gcc 13.2.0 (Rev5, Built by MSYS2 project)
 configuration: --enable-gpl --enable-version3 --enable-static --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libxevd --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxeve --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-dxva2 --enable-d3d11va --enable-d3d12va --enable-ffnvcodec --enable-libvpl --enable-nvdec --enable-nvenc --enable-vaapi --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint
 libavutil 59. 8.100 / 59. 8.100
 libavcodec 61. 3.100 / 61. 3.100
 libavformat 61. 1.100 / 61. 1.100
 libavdevice 61. 1.100 / 61. 1.100
 libavfilter 10. 1.100 / 10. 1.100
 libswscale 8. 1.100 / 8. 1.100
 libswresample 5. 1.100 / 5. 1.100
 libpostproc 58. 1.100 / 58. 1.100
[mov,mp4,m4a,3gp,3g2,mj2 @ 000002a562ff0e80] stream 0, offset 0x30: partial file
[mov,mp4,m4a,3gp,3g2,mj2 @ 000002a562ff0e80] Could not find codec parameters for stream 1 (Video: h264 (avc1 / 0x31637661), none, 720x1280, 3093 kb/s): unspecified pixel format
Consider increasing the value for the 'analyzeduration' (0) and 'probesize' (5000000) options
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'pipe:0':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 creation_time : 2024-08-09T20:00:29.000000Z
 Duration: 00:00:06.80, start: 0.000000, bitrate: N/A
 Stream #0:0[0x1](eng): Audio: aac (mp4a / 0x6134706D), 22050 Hz, stereo, fltp, 74 kb/s (default)
 Metadata:
 creation_time : 2024-08-09T20:00:23.000000Z
 handler_name : SoundHandle
 vendor_id : [0][0][0][0]
 Stream #0:1[0x2](eng): Video: h264 (avc1 / 0x31637661), none, 720x1280, 3093 kb/s, 30 fps, 30 tbr, 90k tbn (default)
 Metadata:
 creation_time : 2024-08-09T20:00:23.000000Z
 handler_name : VideoHandle
 vendor_id : [0][0][0][0]
Stream mapping:
 Stream #0:1 -> #0:0 (h264 (native) -> mjpeg (native))
[mov,mp4,m4a,3gp,3g2,mj2 @ 000002a562ff0e80] stream 1, offset 0x5d: partial file
[in#0/mov,mp4,m4a,3gp,3g2,mj2 @ 000002a562fda2c0] Error during demuxing: Invalid data found when processing input
Cannot determine format of input 0:1 after EOF
[vf#0:0 @ 000002a5636c0ec0] Task finished with error code: -1094995529 (Invalid data found when processing input)
[vf#0:0 @ 000002a5636c0ec0] Terminating thread with return code -1094995529 (Invalid data found when processing input)
[vost#0:0/mjpeg @ 000002a5636c0400] Could not open encoder before EOF
[vost#0:0/mjpeg @ 000002a5636c0400] Task finished with error code: -22 (Invalid argument)
[vost#0:0/mjpeg @ 000002a5636c0400] Terminating thread with return code -22 (Invalid argument)
[out#0/image2 @ 000002a562ff5640] Nothing was written into output file, because at least one of its streams received no packets.
frame= 0 fps=0.0 q=0.0 Lsize= 0KiB time=N/A bitrate=N/A speed=N/A
Conversion failed!



Logs for the same video, but ran via terminal :


ffmpeg version 7.0.2-full_build-www.gyan.dev Copyright (c) 2000-2024 the FFmpeg developers
 built with gcc 13.2.0 (Rev5, Built by MSYS2 project)
 configuration: --enable-gpl --enable-version3 --enable-static --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libxevd --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxeve --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-dxva2 --enable-d3d11va --enable-d3d12va --enable-ffnvcodec --enable-libvpl --enable-nvdec --enable-nvenc --enable-vaapi --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint
 libavutil 59. 8.100 / 59. 8.100
 libavcodec 61. 3.100 / 61. 3.100
 libavformat 61. 1.100 / 61. 1.100
 libavdevice 61. 1.100 / 61. 1.100
 libavfilter 10. 1.100 / 10. 1.100
 libswscale 8. 1.100 / 8. 1.100
 libswresample 5. 1.100 / 5. 1.100
 libpostproc 58. 1.100 / 58. 1.100
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '.\video_2024-08-10_00-03-00.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 creation_time : 2024-08-09T20:00:29.000000Z
 Duration: 00:00:06.80, start: 0.000000, bitrate: 3175 kb/s
 Stream #0:0[0x1](eng): Audio: aac (HE-AAC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 74 kb/s (default)
 Metadata:
 creation_time : 2024-08-09T20:00:23.000000Z
 handler_name : SoundHandle
 vendor_id : [0][0][0][0]
 Stream #0:1[0x2](eng): Video: h264 (Baseline) (avc1 / 0x31637661), yuv420p(tv, bt709, progressive), 720x1280, 3093 kb/s, 30 fps, 30 tbr, 90k tbn (default)
 Metadata:
 creation_time : 2024-08-09T20:00:23.000000Z
 handler_name : VideoHandle
 vendor_id : [0][0][0][0]
Stream mapping:
 Stream #0:1 -> #0:0 (h264 (native) -> mjpeg (native))
Press [q] to stop, [?] for help
[swscaler @ 00000219eb4d0c00] deprecated pixel format used, make sure you did set range correctly
Output #0, image2, to 'output_image.jpg':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf61.1.100
 Stream #0:0(eng): Video: mjpeg, yuvj420p(pc, unknown/bt709/bt709, progressive), 720x1280, q=2-31, 200 kb/s, 30 fps, 30 tbn (default)
 Metadata:
 creation_time : 2024-08-09T20:00:23.000000Z
 handler_name : VideoHandle
 vendor_id : [0][0][0][0]
 encoder : Lavc61.3.100 mjpeg
 Side data:
 cpb: bitrate max/min/avg: 0/0/200000 buffer size: 0 vbv_delay: N/A
[image2 @ 00000219e7aa51c0] The specified filename 'output_image.jpg' does not contain an image sequence pattern or a pattern is invalid.
[image2 @ 00000219e7aa51c0] Use a pattern such as %03d for an image sequence or use the -update option (with -frames:v 1 if needed) to write a single image.
[out#0/image2 @ 00000219e7b5e400] video:49KiB audio:0KiB subtitle:0KiB other streams:0KiB global headers:0KiB muxing overhead: unknown
frame= 1 fps=0.0 q=3.0 Lsize=N/A time=00:00:00.03 bitrate=N/A speed=0.322x



I also realized that the problem was not related to the video orientation, but to to the video from a specific source uploaded specifically on my server 🤡 Such a weird coincidence that I decided to test my video upload with those files, but anyway. And those are.... Downloaded instagram reels lol. But again, ffmpeg works just fine when ran in terminal