
Recherche avancée
Autres articles (95)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela. -
ANNEXE : Les plugins utilisés spécifiquement pour la ferme
5 mars 2010, parLe site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)
Sur d’autres sites (10147)
-
Writing image to RTP with ffmpeg
22 septembre 2017, par Gaulois94I am actually trying to send real time images via the network efficiently. For this, I thought that the RTP protocole in video streaming can be a good way to achieve this.
I actually tried this :
extern "C"
{
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libswscale></libswscale>swscale.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>channel_layout.h>
#include <libavutil></libavutil>common.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>mathematics.h>
#include <libavutil></libavutil>samplefmt.h>
}
#include <iostream>
#include
#include
//Mainly based on https://stackoverflow.com/questions/40825300/ffmpeg-create-rtp-stream
int main()
{
//Init ffmpeg
avcodec_register_all();
av_register_all();
avformat_network_init();
//Init the codec used to encode our given image
AVCodecID codecID = AV_CODEC_ID_MPEG4;
AVCodec* codec;
AVCodecContext* codecCtx;
codec = avcodec_find_encoder(codecID);
codecCtx = avcodec_alloc_context3(codec);
//codecCtx->bit_rate = 400000;
codecCtx->width = 352;
codecCtx->height = 288;
codecCtx->time_base.num = 1;
codecCtx->time_base.den = 25;
codecCtx->gop_size = 25;
codecCtx->max_b_frames = 1;
codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
if (codecID == AV_CODEC_ID_H264)
{
av_opt_set(codecCtx->priv_data, "preset", "ultrafast", 0);
av_opt_set(codecCtx->priv_data, "tune", "zerolatency", 0);
}
avcodec_open2(codecCtx, codec, NULL);
//Init the Frame containing our raw data
AVFrame* frame;
frame = av_frame_alloc();
frame->format = codecCtx->pix_fmt;
frame->width = codecCtx->width;
frame->height = codecCtx->height;
av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, codecCtx->pix_fmt, 32);
//Init the format context
AVFormatContext* fmtCtx = avformat_alloc_context();
AVOutputFormat* format = av_guess_format("rtp", NULL, NULL);
avformat_alloc_output_context2(&fmtCtx, format, format->name, "rtp://127.0.0.1:49990");
avio_open(&fmtCtx->pb, fmtCtx->filename, AVIO_FLAG_WRITE);
//Configure the AVStream for the output format context
struct AVStream* stream = avformat_new_stream(fmtCtx, codec);
avcodec_parameters_from_context(stream->codecpar, codecCtx);
stream->time_base.num = 1;
stream->time_base.den = 25;
/* Rewrite the header */
avformat_write_header(fmtCtx, NULL);
/* Write a file for VLC */
char buf[200000];
AVFormatContext *ac[] = { fmtCtx };
av_sdp_create(ac, 1, buf, 20000);
printf("sdp:\n%s\n", buf);
FILE* fsdp = fopen("test.sdp", "w");
fprintf(fsdp, "%s", buf);
fclose(fsdp);
AVPacket pkt;
int j = 0;
for(int i = 0; i < 10000; i++)
{
fflush(stdout);
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
int R, G, B;
R = G = B = i % 255;
int Y = 0.257 * R + 0.504 * G + 0.098 * B + 16;
int U = -0.148 * R - 0.291 * G + 0.439 * B + 128;
int V = 0.439 * R - 0.368 * G - 0.071 * B + 128;
/* prepare a dummy image */
/* Y */
for (int y = 0; y < codecCtx->height; y++)
for (int x = 0; x < codecCtx->width; x++)
frame->data[0][y * codecCtx->width + x] = Y;
for (int y = 0; y < codecCtx->height/2; y++)
for (int x=0; x < codecCtx->width / 2; x++)
{
frame->data[1][y * frame->linesize[1] + x] = U;
frame->data[2][y * frame->linesize[2] + x] = V;
}
/* Which frame is it ? */
frame->pts = i;
/* Send the frame to the codec */
avcodec_send_frame(codecCtx, frame);
/* Use the data in the codec to the AVPacket */
switch(avcodec_receive_packet(codecCtx, &pkt))
{
case AVERROR_EOF:
printf("Stream EOF\n");
break;
case AVERROR(EAGAIN):
printf("Stream EAGAIN\n");
break;
default:
printf("Write frame %3d (size=%5d)\n", j++, pkt.size);
/* Write the data on the packet to the output format */
av_interleaved_write_frame(fmtCtx, &pkt);
/* Reset the packet */
av_packet_unref(&pkt);
break;
}
usleep(1e6/25);
}
// end
avcodec_send_frame(codecCtx, NULL);
//Free everything
av_free(codecCtx);
av_free(fmtCtx);
return 0;
}
</iostream>And I can with VLC to see one image, but not a video (I have to reaload it to see another image in grayscale).
Does someone know why vlc don’t play my video well ? Thank you !
-
Encoding frames to video with ffmpeg
5 septembre 2017, par Mher DidaryanI am trying to encode a video in Unreal Engine 4 with C++. I have access to the separate frames. Below is the code which reads
viewport's
displayed pixels and stores in buffer.//Safely get render target resource.
FRenderTarget* RenderTarget = TextureRenderTarget->GameThread_GetRenderTargetResource();
FIntPoint Size = RenderTarget->GetSizeXY();
auto ImageBytes = Size.X* Size.Y * static_cast<int32>(sizeof(FColor));
TArray<uint8> RawData;
RawData.AddUninitialized(ImageBytes);
//Get image raw data.
if (!RenderTarget->ReadPixelsPtr((FColor*)RawData.GetData()))
{
RawData.Empty();
UE_LOG(ExportRenderTargetBPFLibrary, Error, TEXT("ExportRenderTargetAsImage: Failed to get raw data."));
return false;
}
Buffer::getInstance().add(RawData);
</uint8></int32>Unreal Engine has
IImageWrapperModule
with which you can get an image from frame, but noting for video encoding. What I want is to encode frames in real time basis for live streaming service.I found this post Encoding a screenshot into a video using FFMPEG which is kind of what I want, but I have problems adapting this solution for my case. The code is outdated (for example
avcodec_encode_video
changed toavcodec_encode_video2
with different parameters).Bellow is the code of encoder.
void Compressor::DoWork()
{
AVCodec* codec;
AVCodecContext* c = NULL;
//uint8_t* outbuf;
//int /*i, out_size,*/ outbuf_size;
UE_LOG(LogTemp, Warning, TEXT("encoding"));
codec = avcodec_find_encoder(AV_CODEC_ID_MPEG1VIDEO); // finding the H264 encoder
if (!codec) {
UE_LOG(LogTemp, Warning, TEXT("codec not found"));
exit(1);
}
else UE_LOG(LogTemp, Warning, TEXT("codec found"));
c = avcodec_alloc_context3(codec);
c->bit_rate = 400000;
c->width = 1280; // resolution must be a multiple of two (1280x720),(1900x1080),(720x480)
c->height = 720;
c->time_base.num = 1; // framerate numerator
c->time_base.den = 25; // framerate denominator
c->gop_size = 10; // emit one intra frame every ten frames
c->max_b_frames = 1; // maximum number of b-frames between non b-frames
c->keyint_min = 1; // minimum GOP size
c->i_quant_factor = (float)0.71; // qscale factor between P and I frames
//c->b_frame_strategy = 20; ///// find out exactly what this does
c->qcompress = (float)0.6; ///// find out exactly what this does
c->qmin = 20; // minimum quantizer
c->qmax = 51; // maximum quantizer
c->max_qdiff = 4; // maximum quantizer difference between frames
c->refs = 4; // number of reference frames
c->trellis = 1; // trellis RD Quantization
c->pix_fmt = AV_PIX_FMT_YUV420P; // universal pixel format for video encoding
c->codec_id = AV_CODEC_ID_MPEG1VIDEO;
c->codec_type = AVMEDIA_TYPE_VIDEO;
if (avcodec_open2(c, codec, NULL) < 0) {
UE_LOG(LogTemp, Warning, TEXT("could not open codec")); // opening the codec
//exit(1);
}
else UE_LOG(LogTemp, Warning, TEXT("codec oppened"));
FString FinalFilename = FString("C:/Screen/sample.mpg");
auto &PlatformFile = FPlatformFileManager::Get().GetPlatformFile();
auto FileHandle = PlatformFile.OpenWrite(*FinalFilename, true);
if (FileHandle)
{
delete FileHandle; // remove when ready
UE_LOG(LogTemp, Warning, TEXT("file opened"));
while (true)
{
UE_LOG(LogTemp, Warning, TEXT("removing from buffer"));
int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, c->width, c->height); // allocating outbuffer
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes * sizeof(uint8_t));
AVFrame* inpic = av_frame_alloc();
AVFrame* outpic = av_frame_alloc();
outpic->pts = (int64_t)((float)1 * (1000.0 / ((float)(c->time_base.den))) * 90); // setting frame pts
avpicture_fill((AVPicture*)inpic, (uint8_t*)Buffer::getInstance().remove().GetData(),
AV_PIX_FMT_PAL8, c->width, c->height); // fill image with input screenshot
avpicture_fill((AVPicture*)outpic, outbuffer, AV_PIX_FMT_YUV420P, c->width, c->height); // clear output picture for buffer copy
av_image_alloc(outpic->data, outpic->linesize, c->width, c->height, c->pix_fmt, 1);
/*
inpic->data[0] += inpic->linesize[0]*(screenHeight-1);
// flipping frame
inpic->linesize[0] = -inpic->linesize[0];
// flipping frame
struct SwsContext* fooContext = sws_getContext(screenWidth, screenHeight, PIX_FMT_RGB32, c->width, c->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(fooContext, inpic->data, inpic->linesize, 0, c->height, outpic->data, outpic->linesize); // converting frame size and format
out_size = avcodec_encode_video(c, outbuf, outbuf_size, outpic);
// save in file
*/
}
delete FileHandle;
}
else
{
UE_LOG(LogTemp, Warning, TEXT("Can't open file"));
}
}Can someone explain flipping frame part (why it’s done ?) and how to use
avcodec_encode_video2
function instead ofavcodec_encode_video
? -
Delphi, TBitmap (rgb) to YCbCr colors format
18 octobre 2019, par Alexander M.have a video encoding example from http://www.delphiffmpeg.com - need to convert a set of TBitmaps to YCbCr (YUV), how should we do it ? the example contains dummy colors :
(* encode 1 second of video *)
idx := 1;
for i := 0 to 25 - 1 do
begin
av_init_packet(@pkt);
pkt.data := nil; // packet data will be allocated by the encoder
pkt.size := 0;
//fflush(stdout);
(* prepare a dummy image *)
(* Y *)
for y := 0 to c.height - 1 do
for x := 0 to c.width - 1 do
PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := x + y + i * 3;
(* Cb and Cr *)
for y := 0 to c.height div 2 - 1 do
for x := 0 to c.width div 2 - 1 do
begin
PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := 128 + y + i * 2;
PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := 64 + x + i * 5;
end;
frame.pts := i;
(* encode the image *)
ret := avcodec_encode_video2(c, @pkt, frame, @got_output);
if ret < 0 then
begin
Writeln(ErrOutput, 'Error encoding frame');
ExitCode := 1;
Exit;
end;
if got_output <> 0 then
begin
Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size]));
FileWrite(f, pkt.data^, pkt.size);
av_packet_unref(@pkt);
Inc(idx);
end;
end;But we need to convert bitmaps to YCbCr..instead of filling pixels with dummy image. Here is a full source code :
(*
* Video encoding example
*)
procedure video_encode_example(const filename: string; codec_id: TAVCodecID);
const
endcode: array[0..3] of Byte = ( 0, 0, 1, $b7 );
var
codec: PAVCodec;
c: PAVCodecContext;
idx, i, ret, x, y, got_output: Integer;
f: THandle;
frame: PAVFrame;
pkt: TAVPacket;
begin
Writeln(Format('Encode video file %s', [filename]));
(* find the mpeg1 video encoder *)
codec := avcodec_find_encoder(codec_id);
if not Assigned(codec) then
begin
Writeln(ErrOutput, 'Codec not found');
ExitCode := 1;
Exit;
end;
c := avcodec_alloc_context3(codec);
if not Assigned(c) then
begin
Writeln(ErrOutput, 'Could not allocate video codec context');
ExitCode := 1;
Exit;
end;
(* put sample parameters *)
c.bit_rate := 400000;
(* resolution must be a multiple of two *)
c.width := 352;
c.height := 288;
(* frames per second *)
c.time_base.num := 1;
c.time_base.den := 25;
(* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*)
c.gop_size := 10;
c.max_b_frames := 1;
c.pix_fmt := AV_PIX_FMT_YUV420P;
if codec_id = AV_CODEC_ID_H264 then
av_opt_set(c.priv_data, 'preset', 'slow', 0);
(* open it *)
if avcodec_open2(c, codec, nil) < 0 then
begin
Writeln(ErrOutput, 'Could not open codec');
ExitCode := 1;
Exit;
end;
f := FileCreate(filename);
if f = INVALID_HANDLE_VALUE then
begin
Writeln(ErrOutput, Format('Could not open %s', [filename]));
ExitCode := 1;
Exit;
end;
frame := av_frame_alloc();
if not Assigned(frame) then
begin
Writeln(ErrOutput, 'Could not allocate video frame');
ExitCode := 1;
Exit;
end;
frame.format := Ord(c.pix_fmt);
frame.width := c.width;
frame.height := c.height;
(* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used *)
ret := av_image_alloc(@frame.data[0], @frame.linesize[0], c.width, c.height,
c.pix_fmt, 32);
if ret < 0 then
begin
Writeln(ErrOutput, 'Could not allocate raw picture buffer');
ExitCode := 1;
Exit;
end;
(* encode 1 second of video *)
idx := 1;
for i := 0 to 25 - 1 do
begin
av_init_packet(@pkt);
pkt.data := nil; // packet data will be allocated by the encoder
pkt.size := 0;
//fflush(stdout);
(* prepare a dummy image *)
(* Y *)
for y := 0 to c.height - 1 do
for x := 0 to c.width - 1 do
PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := x + y + i * 3;
(* Cb and Cr *)
for y := 0 to c.height div 2 - 1 do
for x := 0 to c.width div 2 - 1 do
begin
PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := 128 + y + i * 2;
PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := 64 + x + i * 5;
end;
frame.pts := i;
(* encode the image *)
ret := avcodec_encode_video2(c, @pkt, frame, @got_output);
if ret < 0 then
begin
Writeln(ErrOutput, 'Error encoding frame');
ExitCode := 1;
Exit;
end;
if got_output <> 0 then
begin
Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size]));
FileWrite(f, pkt.data^, pkt.size);
av_packet_unref(@pkt);
Inc(idx);
end;
end;
(* get the delayed frames *)
repeat
//fflush(stdout);
ret := avcodec_encode_video2(c, @pkt, nil, @got_output);
if ret < 0 then
begin
Writeln(ErrOutput, 'Error encoding frame');
ExitCode := 1;
Exit;
end;
if got_output <> 0 then
begin
Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size]));
FileWrite(f, pkt.data^, pkt.size);
av_packet_unref(@pkt);
Inc(idx);
end;
until got_output = 0;
(* add sequence end code to have a real mpeg file *)
FileWrite(f, endcode[0], SizeOf(endcode));
FileClose(f);
avcodec_close(c);
av_free(c);
av_freep(@frame.data[0]);
av_frame_free(@frame);
Writeln('');
end;yes we know this formula, but what should we do with (* Cb and Cr *) loop that goes up to c.height div 2 - 1 and c.width div 2 - 1 ? all our experiments make correct image geometry but incorrect colors... Here is what we have :
( Y )
for y := 0 to c.height - 1 do
begin
Line := image.ScanLine[y];
for x := 0 to c.width - 1 do
begin
Yy := Round(Line[x].R*0.29900 + Line[x].G*0.58700 + Line[x].B*0.11400);
PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := Yy;
end;
end;
( Cb and Cr )
for y := 0 to c.height div 2 - 1 do
begin
Pixels := image.ScanLine[y];
for x := 0 to c.width div 2 - 1 do
begin
Cb := Round(Line[x].R -0.16874 - Line[x].G 0.33126 + Line[x].B * 0.50000) + 128;
Cr := Round(Line[x].R 0.50000 - Line[x].G 0.41869 - Line[x].B * 0.08131) + 64;
PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := Cr;
PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := Cb;
//PByte(@PAnsiChar(frame.data[1])[y frame.linesize[1] + x])^ := 128 + y + i 2;
//PByte(@PAnsiChar(frame.data[2])[y frame.linesize[2] + x])^ := 64 + x + i 5;
end;
end;How this should be fixed ?