
Recherche avancée
Médias (1)
-
Video d’abeille en portrait
14 mai 2011, par
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (104)
-
La sauvegarde automatique de canaux SPIP
1er avril 2010, parDans le cadre de la mise en place d’une plateforme ouverte, il est important pour les hébergeurs de pouvoir disposer de sauvegardes assez régulières pour parer à tout problème éventuel.
Pour réaliser cette tâche on se base sur deux plugins SPIP : Saveauto qui permet une sauvegarde régulière de la base de donnée sous la forme d’un dump mysql (utilisable dans phpmyadmin) mes_fichiers_2 qui permet de réaliser une archive au format zip des données importantes du site (les documents, les éléments (...) -
Librairies et binaires spécifiques au traitement vidéo et sonore
31 janvier 2010, parLes logiciels et librairies suivantes sont utilisées par SPIPmotion d’une manière ou d’une autre.
Binaires obligatoires FFMpeg : encodeur principal, permet de transcoder presque tous les types de fichiers vidéo et sonores dans les formats lisibles sur Internet. CF ce tutoriel pour son installation ; Oggz-tools : outils d’inspection de fichiers ogg ; Mediainfo : récupération d’informations depuis la plupart des formats vidéos et sonores ;
Binaires complémentaires et facultatifs flvtool2 : (...) -
Support audio et vidéo HTML5
10 avril 2011MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...)
Sur d’autres sites (7911)
-
Why there is no AVFrame->data[2] data when decode h264 by ffmpeg use "h264_cuvid"
27 juillet 2017, par Wu NLenv : ubuntu 16.04 64 bit ; ffmpeg 3.3.2 build whih cuda cuvid libnpp...
use ffmpeg cmd :ffmpeg -vsync 0 -c:v h264_cuvid -i test.264 -f rawvideo test.yuv
works fine, the generated yuv file is ok.
BUT When I decode this 264 file by my code use ’h264_cuvid’ decoder, something problem happens, this is my code :#include
#define __STDC_CONSTANT_MACROS
#ifdef _WIN32
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec></libavcodec>avcodec.h>
#ifdef __cplusplus
};
#endif
#endif
//test different codec
#define TEST_H264 1
#define TEST_HEVC 0
int main(int argc, char* argv[])
{
AVCodec *pCodec;
AVCodecContext *pCodecCtx= NULL;
AVCodecParserContext *pCodecParserCtx=NULL;
FILE *fp_in;
FILE *fp_out;
AVFrame *pFrame;
const int in_buffer_size=4096;
unsigned char in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE]= {0};
unsigned char *cur_ptr;
int cur_size;
AVPacket packet;
int ret, got_picture;
#if TEST_HEVC
enum AVCodecID codec_id=AV_CODEC_ID_HEVC;
char filepath_in[]="bigbuckbunny_480x272.hevc";
#elif TEST_H264
AVCodecID codec_id=AV_CODEC_ID_H264;
char filepath_in[]="2_60_265to264.264";
#else
AVCodecID codec_id=AV_CODEC_ID_MPEG2VIDEO;
char filepath_in[]="bigbuckbunny_480x272.m2v";
#endif
char filepath_out[]="mainSend.yuv";
int first_time=1;
//av_log_set_level(AV_LOG_DEBUG);
avcodec_register_all();
// pCodec = avcodec_find_decoder(codec_id);
pCodec = avcodec_find_decoder_by_name("h264_cuvid");
if (!pCodec)
{
printf("Codec not found\n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx)
{
printf("Could not allocate video codec context\n");
return -1;
}
pCodecParserCtx=av_parser_init(pCodec->id);
if (!pCodecParserCtx)
{
printf("Could not allocate video parser context\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("Could not open codec\n");
return -1;
}
//Input File
fp_in = fopen(filepath_in, "rb");
if (!fp_in)
{
printf("Could not open input stream\n");
return -1;
}
//Output File
fp_out = fopen(filepath_out, "wb");
if (!fp_out)
{
printf("Could not open output YUV file\n");
return -1;
}
pFrame = av_frame_alloc();
av_init_packet(&packet);
while (1)
{
cur_size = fread(in_buffer, 1, in_buffer_size, fp_in);
if (cur_size == 0)
break;
cur_ptr=in_buffer;
while (cur_size>0)
{
int len = av_parser_parse2(
pCodecParserCtx, pCodecCtx,
&packet.data, &packet.size,
cur_ptr, cur_size,
AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
cur_ptr += len;
cur_size -= len;
if(packet.size==0)
continue;
//Some Info from AVCodecParserContext
printf("[Packet]Size:%6d\t",packet.size);
switch(pCodecParserCtx->pict_type)
{
case AV_PICTURE_TYPE_I:
printf("Type:I\tNumber:%4d\n",pCodecParserCtx->output_picture_number);
break;
case AV_PICTURE_TYPE_P:
printf("Type:P\t");
break;
case AV_PICTURE_TYPE_B:
printf("Type:B\t");
break;
default:
printf("Type:Other\t");
break;
}
printf("Number:%4d\n",pCodecParserCtx->output_picture_number);
AVFrame* myFrame = av_frame_alloc();
ret = avcodec_decode_video2(pCodecCtx, myFrame, &got_picture, &packet);
if (ret < 0)
{
printf("Decode Error.\n");
return ret;
}
if (got_picture)
{
if(first_time)
{
printf("\nCodec Full Name:%s\n",pCodecCtx->codec->long_name);
printf("width:%d\nheight:%d\n\n",pCodecCtx->width,pCodecCtx->height);
first_time=0;
}
//Y, U, V
for(int i=0; iheight; i++)
{
fwrite(myFrame->data[0]+myFrag-g>linesize[0]*i,1,myFrame->width,fp_out);
}
for(int i=0; iheight/2; i++)
{
fwrite(myFrame->data[1]+myFrag-g>linesize[1]*i,1,myFrame->width/2,fp_out);
}
for(int i=0; iheight/2; i++)
{
fwrite(myFrame->data[2]+myFrag-g>linesize[2]*i,1,myFrame->width/2,fp_out);
}
// printf("pframe's width height %d %d\t key frame %d\n",myFrame->width,myFrame->height,myFrame->key_frame);
printf("Succeed to decode 1 frame!\n");
av_frame_free(&myFrame);
}
}
}
fclose(fp_in);
fclose(fp_out);
av_parser_close(pCodecParserCtx);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
av_free(pCodecCtx);
return 0;
}In this demo code, I call h264_cuvid by
vcodec_find_decoder_by_name("h264_cuvid");
BUT the code crash atfwrite(myFrame->data[2]+myFrag-g>linesize[2]*i,1,myFrame->width/2,fp_out);
So after debug with codeblocks, I found that there is no data in myFrame->data[2] codeblocks watching windowAny suggestion ? thanks !
-
How to stop ffmpeg when recording the desktop to save the file to the hard disk ?
27 juin 2022, par Eliot SheinI'm trying to record the desktop with the ffmpeg and save a video file to the hard disk.


using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace Testings
{
 internal class FFmpeg_Capture
 {
 Process process;

 public FFmpeg_Capture()
 {
 process = new Process();
 }

 public void Start(string FileName, int Framerate)
 {
 process.StartInfo.FileName = @"D:\Captured Videos\ffmpeg.exe"; // Change the directory where ffmpeg.exe is. 
 process.EnableRaisingEvents = false;
 process.StartInfo.WorkingDirectory = @"D:\Captured Videos"; // The output directory 
 process.StartInfo.Arguments = @"-f gdigrab -framerate " + Framerate +
 " -i desktop -preset ultrafast - pix_fmt yuv420p " + FileName;
 process.Start();
 process.StartInfo.UseShellExecute = false;
 process.StartInfo.CreateNoWindow = false;
 Stop();
 }

 public void Stop()
 {
 process.Close();
 }
 }
}



And using it in form1 :


private void btnRecord_Click(object sender, EventArgs e)
 {
 recordToggle = !recordToggle;

 if (recordToggle)
 {
 btnRecord.Text = "Stop";
 record.Start("Testing", 60);
 }
 else
 {
 btnRecord.Text = "Record";
 record.Stop();
 }
 }



but the file Testing never saved to the hard disk. my guess is that


process.Close();



is not like ctrl+ c and ctrl + c is what stopping the ffmpeg and save the file.


This is working but how to remove the black window of the ffmpeg ?




using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.IO;
using System.IO.Pipes;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;

namespace Testings
{
 internal class FFmpeg_Capture
 {
 Process process;

 public FFmpeg_Capture()
 {
 process = new Process();
 }

 public void Start(string FileName, int Framerate)
 {
 process.StartInfo.FileName = @"D:\Captured Videos\ffmpeg.exe"; // Change the directory where ffmpeg.exe is. 
 process.EnableRaisingEvents = false;
 process.StartInfo.WorkingDirectory = @"D:\Captured Videos\"; // The output directory 
 process.StartInfo.Arguments = @"-y -f gdigrab -framerate " + Framerate +
 " -i desktop -preset ultrafast -pix_fmt yuv420p " + FileName;
 process.StartInfo.UseShellExecute = false;
 process.StartInfo.CreateNoWindow = false;
 process.StartInfo.RedirectStandardInput = true; //Redirect stdin
 process.Start();
 }

 public void Stop()
 {
 byte[] qKey = Encoding.GetEncoding("gbk").GetBytes("q"); //Get encoding of 'q' key
 process.StandardInput.BaseStream.Write(qKey, 0, 1); //Write 'q' key to stdin of FFmpeg sub-processs
 process.StandardInput.BaseStream.Flush(); //Flush stdin (just in case).
 process.Close();
 }
 }
}



-
Save video from rtsp and play in exoplayer simultaneously [closed]
25 janvier 2024, par Julian Peña GallegoI am trying to receive a live stream via RTSP from an IP camera in Android Kotlin, I am recording the video to a local file with ffmpegkit but I must additionally view the live stream.


When I record the live stream with ffmpeg without playing the stream through exoplayer it works fine, but when both processes are running there is packet loss in the video recording or in the exoplayer.


Then I tried to get exoplayer to play the video that ffmpeg was recording, but it gave me an error since the file has not been closed yet.


Could you provide me with a solution ? I have found on the internet that it is possible with server sockets but they do not indicate how to do it.