
Recherche avancée
Autres articles (51)
-
Personnaliser les catégories
21 juin 2013, parFormulaire de création d’une catégorie
Pour ceux qui connaissent bien SPIP, une catégorie peut être assimilée à une rubrique.
Dans le cas d’un document de type catégorie, les champs proposés par défaut sont : Texte
On peut modifier ce formulaire dans la partie :
Administration > Configuration des masques de formulaire.
Dans le cas d’un document de type média, les champs non affichés par défaut sont : Descriptif rapide
Par ailleurs, c’est dans cette partie configuration qu’on peut indiquer le (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...)
Sur d’autres sites (6821)
-
FFMpeg Coding in C : Encoder returns EOF at first interaction. Encoder not opened correctly ? [closed]
26 février, par Davidhoheyas I'm fairly new to FFMpeg Programming and C in general, the code looks like a mess.


I have smashed my head against a wall trying to get this code to work for about a week.


int decode_encode_pipeline(AVFormatContext *Input_Format_Context, AVFormatContext *Output_Format_Context, int *streams_list){

 const AVCodec *DECodec, *ENCodec;
 AVCodecContext *DECodecContext = NULL, *ENCodecContext = NULL;
 AVCodecParameters *CodecParameters = NULL;
 AVDictionary *opts = NULL;
 AVPacket *Packet;
 AVFrame *Frame;
 int check;

 Packet = av_packet_alloc();
 if(!Packet){
 
 printf("\nFehler bei Allocating Packet");
 
 return 0;
 
 }

 Frame = av_frame_alloc();
 if(!Frame){
 
 printf("\nFehler bei Allocating Frame");
 
 return 0;
 
 }

 CodecParameters = Input_Format_Context->streams[Packet->stream_index]->codecpar;
 if(!CodecParameters){

 printf("\nCodecParameters konnte nicht erstellt oder zugewiesen werden.");

 }

 DECodec = avcodec_find_decoder(CodecParameters->codec_id);
 if(!DECodec){
 
 printf("\nCodec nicht gefunden");
 
 return 0;
 
 }

 DECodecContext = avcodec_alloc_context3(DECodec);
 if (!DECodecContext){
 
 printf("\nFehler bei Allocating CodecContext");
 
 return 0;
 
 }

 ENCodec = avcodec_find_encoder(CodecParameters->codec_id);
 if(!DECodec){
 
 printf("\nCodec nicht gefunden");
 
 return 0;
 
 }

 ENCodecContext = avcodec_alloc_context3(ENCodec);
 if (!ENCodecContext){
 
 printf("\nFehler bei Allocating CodecContext");
 
 return 0;
 
 }

 check = avformat_write_header(Output_Format_Context, &opts);
 if(check < 0){

 printf("\nFehler beim Öffnen des Output Files.");
 
 return 1;

 }

 avcodec_parameters_to_context(DECodecContext, CodecParameters);
 avcodec_parameters_to_context(ENCodecContext, CodecParameters);

 ENCodecContext->width = DECodecContext->width;
 ENCodecContext->height = DECodecContext->height;
 ENCodecContext->bit_rate = DECodecContext->bit_rate;
 ENCodecContext->time_base = (AVRational){1, 30};
 ENCodecContext->framerate = DECodecContext->framerate;
 ENCodecContext->gop_size = DECodecContext->gop_size;
 ENCodecContext->max_b_frames = DECodecContext->max_b_frames;
 ENCodecContext->pix_fmt = DECodecContext->pix_fmt;
 if(ENCodec->id == AV_CODEC_ID_H264){

 av_opt_set(ENCodecContext->priv_data, "preset", "slow", 0);

 }

 check = avcodec_open2(DECodecContext, DECodec, NULL);
 if(check < 0){
 
 printf("\nFehler bei Öffnen von DECodec");
 
 return 1;
 
 }

 check = avcodec_open2(ENCodecContext, ENCodec, NULL);
 if(check < 0){
 
 printf("\nFehler bei Öffnen von ENCodec");
 
 return 1;
 
 }

 while(1){
 
 check = av_read_frame(Input_Format_Context, Packet);
 if(check < 0){
 
 break;
 
 }

 AVStream *in_stream, *out_stream;

 in_stream = Input_Format_Context->streams[Packet->stream_index];
 out_stream = Output_Format_Context->streams[Packet->stream_index];

 if(in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && Packet->stream_index == streams_list[Packet->stream_index]){

 check = avcodec_send_packet(DECodecContext, Packet);
 if(check < 0){

 printf("\nFehler bei Encoding");

 return 1;

 }

 AVPacket *EncodedPacket;
 EncodedPacket = av_packet_alloc();
 if(!EncodedPacket){
 
 printf("\nFehler bei Allocating Packet");
 
 return 1;
 
 }

 /*While Loop Decoding*/
 while(check >= 0){
 
 check = avcodec_receive_frame(DECodecContext, Frame);
 if(check == AVERROR(EAGAIN)){
 
 continue;
 
 }else if(check == AVERROR_EOF){
 
 break;
 
 }else if(check < 0){
 
 printf("\nFehler bei Decoding");
 
 return 1;
 
 }

 /*Convert Colorspace*/
 struct SwsContext *SwsContexttoRGB = sws_getContext(Frame->width, Frame->height, Frame->format, Frame->width, Frame->height, AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
 struct SwsContext *SwsContexttoOriginal = sws_getContext(Frame->width, Frame->height, AV_PIX_FMT_RGB24, Frame->width, Frame->height, Frame->format, SWS_BILINEAR, NULL, NULL, NULL);
 if(!SwsContexttoRGB || !SwsContexttoOriginal){

 printf("\nSwsContext konnte nicht befüllt werden.");

 return 1;

 } 

 if(Frame->linesize < 0){

 printf("\nFehler: linesize ist negativ und nicht kompatibel\n");

 return 1;

 }

 AVFrame *RGBFrame;
 RGBFrame = av_frame_alloc();
 if(!RGBFrame){

 printf("\nFehler bei der Reservierung für den RGBFrame");

 return 1;

 }
 /*
 int number_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, Frame->width, Frame->height, 1);
 if(number_bytes < 0){

 printf("\nFehler bei der Berechnung der benoetigten Bytes fuer Konvertierung");

 return 1;

 }
 
 uint8_t *rgb_buffer = (uint8_t *)av_malloc(number_bytes*sizeof(uint8_t));
 if(rgb_buffer == NULL){

 printf("\nFehler bei der Reservierung für den RGBBuffer");

 return 1;

 }

 check = av_image_fill_arrays(RGBFrame->data, RGBFrame->linesize, rgb_buffer, AV_PIX_FMT_RGB24, Frame->width, Frame->height, 1);
 if(check < 0){

 printf("\nFehler bei der Zuweisung der RGB Daten");

 return 1;

 }*/

 //sws_scale(SwsContexttoRGB, (const uint8_t * const *)Frame->data, Frame->linesize, 0, Frame->height, RGBFrame->data, RGBFrame->linesize);
 sws_scale_frame(SwsContexttoRGB, Frame, RGBFrame);
 printf("\nIch habe die Daten zu RGB konvertiert.");

 //sws_scale(SwsContexttoOriginal, (const uint8_t * const *)RGBFrame->data, RGBFrame->linesize, 0, Frame->height, Frame->data, Frame->linesize);
 sws_scale_frame(SwsContexttoOriginal, RGBFrame, Frame);
 printf("\nIch habe die Daten zurück ins Original konvertiert.");

 Frame->format = ENCodecContext->pix_fmt;
 Frame->width = ENCodecContext->width;
 Frame->height = ENCodecContext->height;
 
 check = av_frame_get_buffer(Frame, 0);
 if(check < 0){
 
 printf("\nFehler bei Allocating Frame Buffer");
 
 return 1;
 
 }

 /* Encoding */
 check = av_frame_make_writable(Frame);
 if(check < 0){

 printf("\nFehler bei Make Frame Writable");

 return 1;

 }

 encode(ENCodecContext, Frame, EncodedPacket, Output_Format_Context);

 sws_freeContext(SwsContexttoRGB);
 sws_freeContext(SwsContexttoOriginal);
 av_frame_free(&RGBFrame);
 //av_free(rgb_buffer);

 }

 /* Flushing Encoder */
 encode(ENCodecContext, NULL, EncodedPacket, Output_Format_Context);

 //avcodec_flush_buffers(DECodecContext);
 //avcodec_flush_buffers(ENCodecContext);

 av_packet_free(&EncodedPacket);

 }else{

 av_interleaved_write_frame(Output_Format_Context, Packet);

 }

 }

 av_write_trailer(Output_Format_Context); 

 /* Memory Free */
 avcodec_free_context(&DECodecContext);
 avcodec_free_context(&ENCodecContext);
 avcodec_parameters_free(&CodecParameters);
 av_frame_free(&Frame);
 av_packet_free(&Packet);

 return 0;

}




The function encode looks as follows :


static void encode(AVCodecContext *ENCodecContext, AVFrame *Frame, AVPacket *EncodedPacket, AVFormatContext *Output_Format_Context){

 int check;



 check = avcodec_send_frame(ENCodecContext, Frame);
 if(check == AVERROR(EAGAIN)){
 printf("\nEAGAIN");
 } 
 if(check == AVERROR_EOF){
 printf("\nEOF");
 }
 if(check == AVERROR(EINVAL)){
 printf("\nEINVAL");
 }
 if(check == AVERROR(ENOMEM)){
 printf("\nENOMEM");
 }
 if(check < 0){

 printf("\nFehler bei Encoding Send Frame. Check = %d", check);

 return;

 }

 while(check >= 0){

 check = avcodec_receive_packet(ENCodecContext, EncodedPacket);
 if(check == AVERROR(EAGAIN) || check == AVERROR_EOF){

 return;

 }else if(check < 0){

 printf("\nFehler bei Encoding");

 return;

 }

 if (av_interleaved_write_frame(Output_Format_Context, EncodedPacket) < 0) {

 printf("\nFehler beim Muxen des Paketes.");
 break;

 }

 av_packet_unref(EncodedPacket);

 }

 return;

}



The program should decode a video into the individual frames convert them to RGB24, so I can work with the raw data of the frame, then convert it back to the original format and encode the frames.


The encoder doesn't play nice, as I get an EOF error at avcodec_send_frame().
But I couldn't figure it out why the encoder behaves like this.
And yes I have read the docs and example files, but either I'm massivly missing a crucial detail or I'm just ****.


Any and all help will be and is massivly appreciated.


PS. : The used libraries are libavutil, libavformat, libavcodec, libswscale. All installed with the "-dev" suffix through linux commandline. Should all be the version 7.0 libraries.


Thanks in advance.
With best regards.


- 

- Read the docs
- Shifting the encoding step out of the decoding while loop






-
Grabbing a single image from a MS VS .NET 6.0 C# WPF process always returns the same image
12 mai 2023, par Wolfgang KurzI am trying to develop a MS VS .NET 6.0 C# WPF application to digitize my Super 8 cine films. To grab the frames of the film I want to use FFMPEG because Accord.NET6.0 DirectShow does not work under MS VS 2022 .
I use
FFmpeg 64-bit static Windows build from www.gyan.dev
Version : 2022-09-29-git-8089fe072e-full_build-www.gyan.dev


FFMPEG is invoked from a Process in ma application whicch sets the process FFMPEG start parameters. The webcam parameter is "USB Webcam" ( digital Celestron handheld microscope connected via USB - is DirectShow compatible )
The video resolution parameter "camRes" is "1280x960"


public void UpdateImage(string aString)
 {
 string startPath = "C:" + MainWindow.bSl + "ffmpeg" + MainWindow.bSl + "bin" + MainWindow.bSl + "ffmpeg.exe ";

 MainWindow.aResult = "";
 System.Drawing.Bitmap result;
 ProcessStartInfo psi;
 psi = new ProcessStartInfo();
 psi.FileName = startPath;
 string targPath;
 psi.Arguments = "-f dshow -video_size" + MainWindow.dQuote + webCam + MainWindow.dQuote +
 " -framerate 10 -i video=" + MainWindow.dQuote + camRes + MainWindow.dQuote +
 " -frames:v 1 test%3d.bmp -update 1";
 string errors = "";
 string results = "";
 psi.CreateNoWindow = false;
 psi.RedirectStandardOutput = true;
 psi.RedirectStandardError = true;
 psi.WindowStyle = ProcessWindowStyle.Normal;
 psi.WorkingDirectory = "C:" + MainWindow.bSl + "ffmpeg" + MainWindow.bSl + "bin" + MainWindow.bSl;

 using (Process theProcess = new Process())
 {
 theProcess.StartInfo = psi;
 theProcess.Start();

 theProcess.WaitForExit();
 while (theProcess.HasExited == false)
 {
 Thread.Sleep(50);
 }
 Thread.Sleep(50);

 try
 {
 if (File.Exists(pathCI) == true)
 {
 DefineImage.ffmpegRes = new Bitmap(pathCI);
 MainWindow.actMWInstance.UpdateMessage(DateTime.Now +
 "- Old image disposed, new image grabbed");
 File.Delete(pathCI);
 }
 }
 catch (System.Exception ex)
 {
 MainWindow.actMWInstance.UpdateMessage(DateTime.Now + "Image grabbing failed");
 }
 theProcess.Close();
 theProcess.Dispose();
 }

 if (aString.Length == 0)
 {
 File.Delete(pathCI);
 MainWindow.actMWInstance.DoMove("200", "0", true);
 }
 if (ffmpegRes != null)
 {
 BitmapSource aBMPSrc = BitmapConversion.ToWpfBitmap(ffmpegRes);
 IMGFrame.Source = aBMPSrc;
 }
 Show();
 }



The attached screenshot shows the expected GUI content.
https://www.wkurz.com/wkurz/images/FFMPEGTEST.jpg


My problem now is, when I try to refresh the image in the
DefineDialog
window, I always get the same image although the film has been moved by one frame.

Is the first image cached by FFMPEG and always used again with the provided parameters.


How to force FFMPEG to replace the image with a new one.



-----------------------------------------------------------------------+this is additional information to the question.
I have managed to grab the film frame but still get always the same image during a session :*




It is obvious that the cine film frame has been successfully grabbbed.
The next step should be to work at the image ( correct brightness contrast, gama and colors) and then store the evaluated correction values that have later to be applied to the frames during a batch process, which automatically transports the film, grabs the frames and corrects the colors and then stores the frames as consecutively numbered images which than can be used to generate a video with FFMPEG :


Now the problem I cannot overcome :


In the definition dialog there are 2 buttons ( Refresh and New Image). The intension is to reread / regrab a frame, if the current frame is not optimal to evaluate the correction values.


Whenever I click one of the buttons FFMPEG seems to grab an image, but FFMPEG provides always the same (identcial to the first already available image) to the caller ( a .NET C# Process in my proggram).


The output stream is a single jpg image ( test%04d.jpg ) in a specified target library . Here follows the generated output which seem to be OK according to my knowledge.


I want to ask all interested persons to look over this provided information - may be someone has an idea, what causes the fact, that FFMPEG 6.0 van Neumann always delivers the first image grabbed. in a session.
It seems that there is something cached and reused over and over again.


DefImg 147: UpdateImage started ( Lines starting with DefImg or DefineImage are test output statements provided by the C# program)



DefineImage 155 11.05.2023 17:53:20 C :\ffmpeg\bin\ffmpeg.exe -y -nostdin -f dshow -an -video_size 1280x960 -framerate 10 -i video="USB Microscope" -filter:v "smartblur=luma_radius=0.9:luma_strength=0.7:luma_threshold=0" -frames:v 1 C :\FilmProjList\TEST\TEST%%04d.jpg -update 1
DefineImage 180 ffmpeg version 6.0-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers
built with gcc 12.2.0 (Rev10, Built by MSYS2 project)
configuration : —enable-gpl —enable-version3 —enable-static —disable-w32threads —disable-autodetect —enable-fontconfig —enable-iconv —enable-gnutls —enable-libxml2 —enable-gmp —enable-bzlib —enable-lzma —enable-libsnappy —enable-zlib —enable-librist —enable-libsrt —enable-libssh —enable-libzmq —enable-avisynth —enable-libbluray —enable-libcaca —enable-sdl2 —enable-libaribb24 —enable-libdav1d —enable-libdavs2 —enable-libuavs3d —enable-libzvbi —enable-librav1e —enable-libsvtav1 —enable-libwebp —enable-libx264 —enable-libx265 —enable-libxavs2 —enable-libxvid —enable-libaom —enable-libjxl —enable-libopenjpeg —enable-libvpx —enable-mediafoundation —enable-libass —enable-frei0r —enable-libfreetype —enable-libfribidi —enable-liblensfun —enable-libvidstab —enable-libvmaf —enable-libzimg —enable-amf —enable-cuda-llvm —enable-cuvid —enable-ffnvcodec —enable-nvdec —enable-nvenc —enable-d3d11va —enable-dxva2 —enable-libvpl —enable-libshaderc —enable-vulkan —enable-libplacebo —enable-opencl —enable-libcdio —enable-libgme —enable-libmodplug —enable-libopenmpt —enable-libopencore-amrwb —enable-libmp3lame —enable-libshine —enable-libtheora —enable-libtwolame —enable-libvo-amrwbenc —enable-libilbc —enable-libgsm —enable-libopencore-amrnb —enable-libopus —enable-libspeex —enable-libvorbis —enable-ladspa —enable-libbs2b —enable-libflite —enable-libmysofa —enable-librubberband —enable-libsoxr —enable-chromaprint
libavutil 58. 2.100 / 58. 2.100
libavcodec 60. 3.100 / 60. 3.100
libavformat 60. 3.100 / 60. 3.100
libavdevice 60. 1.100 / 60. 1.100
libavfilter 9. 3.100 / 9. 3.100
libswscale 7. 1.100 / 7. 1.100
libswresample 4. 10.100 / 4. 10.100
libpostproc 57. 1.100 / 57. 1.100
Trailing option(s) found in the command : may be ignored.
Input #0, dshow, from 'video=USB Microscope' :
Duration : N/A, start : 5368.514437, bitrate : N/A
Stream #0:0 : Video : rawvideo (YUY2 / 0x32595559), yuyv422(tv, bt470bg/bt709/unknown), 1280x960, 10 fps, 10 tbr, 10000k tbn
Stream mapping :
Stream #0:0 -> #0:0 (rawvideo (native) -> mjpeg (native))
[swscaler @ 000001442fdcebc0] deprecated pixel format used, make sure you did set range correctly ( that I do not understand )
Last message repeated 3 times
[mjpeg @ 00000144278b0d80] removing common factors from framerate
Output #0, image2, to 'C :\FilmProjList\TEST\TEST%04d.jpg' :
Metadata :
encoder : Lavf60.3.100
Stream #0:0 : Video : mjpeg, yuvj422p(pc, bt470bg/bt709/unknown, progressive), 1280x960, q=2-31, 200 kb/s, 10 fps, 10 tbn
Metadata :
encoder : Lavc60.3.100 mjpeg
Side data :
cpb : bitrate max/min/avg : 0/0/200000 buffer size : 0 vbv_delay : N/A
frame= 0 fps=0.0 q=3.6 size= 0kB time=00:00:00.00 bitrate=N/A speed=N/A

frame= 1 fps=0.0 q=3.6 Lsize=N/A time=00:00:00.00 bitrate=N/A speed= 0x

video:45kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead : unknown

DefineImage 248 11.05.2023 17:53:23 ( test output from, the C# program)
Das Programm "[11724] Cine2Video.exe" wurde mit Code 0 (0x0) beendet.




—


Mit freundlichen Grüßen / Best regards
Ute & Wolfgang Kurz
Domaine : https://uwkurz.de ; Homepage : https://www.uwkurz.de/home
Location : 9° 11' 27,75" East, 48° 43' 32,80" North
E-Mail : wolfgang@uwkurz.de , ute@uwkurz.de
Gesendet über Glasfaser von HOMENET.de




-
Python asyncio subprocess code returns "pipe closed by peer or os.write(pipe, data) raised exception."
4 novembre 2022, par Duke DougalI am trying to convert a synchronous Python process to asyncio. Any ideas what I am doing wrong ?


This is the synchronous code which successfully starts ffmpeg and converts a directory of webp files into a video.


import subprocess
import shlex
from os import listdir
from os.path import isfile, join

output_filename = 'output.mp4'
process = subprocess.Popen(shlex.split(f'ffmpeg -y -framerate 60 -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 output.mp4'), stdin=subprocess.PIPE)

thepath = '/home/ubuntu/webpfiles/'
thefiles = [f for f in listdir(thepath) if isfile(join(thepath, f))]
for filename in thefiles:
 absolute_path = f'{thepath}{filename}'
 with open(absolute_path, 'rb') as f:
 process.stdin.write(f.read())

process.stdin.close()
process.wait()
process.terminate()



This async code fails :


from os import listdir
from os.path import isfile, join
import shlex
import asyncio

outputfilename = 'output.mp4'

async def write_stdin(proc):
 thepath = '/home/ubuntu/webpfiles/'
 thefiles = [f for f in listdir(thepath) if isfile(join(thepath, f))]
 thefiles.sort()
 for filename in thefiles:
 absolute_path = f'{thepath}{filename}'
 with open(absolute_path, 'rb') as f:
 await proc.communicate(input=f.read())

async def create_ffmpeg_subprocess():
 bin = f'/home/ubuntu/bin/ffmpeg'
 params = f'-y -framerate 60 -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {outputfilename}'
 proc = await asyncio.create_subprocess_exec(
 bin,
 *shlex.split(params),
 stdin=asyncio.subprocess.PIPE,
 stdout=asyncio.subprocess.PIPE,
 stderr=asyncio.subprocess.PIPE,
 )
 return proc

async def start():
 loop = asyncio.get_event_loop()
 proc = await create_ffmpeg_subprocess()
 task_stdout = loop.create_task(write_stdin(proc))
 await asyncio.gather(task_stdout)

if __name__ == '__main__':
 asyncio.run(start())



The output for the async code is :


pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.
pipe closed by peer or os.write(pipe, data) raised exception.



etc - one line for each webp file