
Recherche avancée
Autres articles (38)
-
La sauvegarde automatique de canaux SPIP
1er avril 2010, parDans le cadre de la mise en place d’une plateforme ouverte, il est important pour les hébergeurs de pouvoir disposer de sauvegardes assez régulières pour parer à tout problème éventuel.
Pour réaliser cette tâche on se base sur deux plugins SPIP : Saveauto qui permet une sauvegarde régulière de la base de donnée sous la forme d’un dump mysql (utilisable dans phpmyadmin) mes_fichiers_2 qui permet de réaliser une archive au format zip des données importantes du site (les documents, les éléments (...) -
MediaSPIP v0.2
21 juin 2013, parMediaSPIP 0.2 est la première version de MediaSPIP stable.
Sa date de sortie officielle est le 21 juin 2013 et est annoncée ici.
Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
Comme pour la version précédente, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...) -
Mise à disposition des fichiers
14 avril 2011, parPar défaut, lors de son initialisation, MediaSPIP ne permet pas aux visiteurs de télécharger les fichiers qu’ils soient originaux ou le résultat de leur transformation ou encodage. Il permet uniquement de les visualiser.
Cependant, il est possible et facile d’autoriser les visiteurs à avoir accès à ces documents et ce sous différentes formes.
Tout cela se passe dans la page de configuration du squelette. Il vous faut aller dans l’espace d’administration du canal, et choisir dans la navigation (...)
Sur d’autres sites (4896)
-
Android h264 decode non-existing PPS 0 referenced
8 juin 2023, par nmxprimeIn Android JNI, using
ffmpeg with libx264
use below codes to encode and decode raw rgb data !. I should use swscale to convert rgb565 to yuv420p as required by H.264. But not clear about this conversion.Please help, where i am wrong, with regard the log i get !


Code for Encoding



codecinit()- called once(JNI wrapper function)



int Java_com_my_package_codecinit (JNIEnv *env, jobject thiz) {
avcodec_register_all();
codec = avcodec_find_encoder(AV_CODEC_ID_H264);//AV_CODEC_ID_MPEG1VIDEO);
if(codec->id == AV_CODEC_ID_H264)
 __android_log_write(ANDROID_LOG_ERROR, "set","h264_encoder");

if (!codec) {
 fprintf(stderr, "codec not found\n");
 __android_log_write(ANDROID_LOG_ERROR, "codec", "not found");

}
 __android_log_write(ANDROID_LOG_ERROR, "codec", "alloc-contest3");
c= avcodec_alloc_context3(codec);
if(c == NULL)
 __android_log_write(ANDROID_LOG_ERROR, "avcodec","context-null");

picture= av_frame_alloc();

if(picture == NULL)
 __android_log_write(ANDROID_LOG_ERROR, "picture","context-null");

c->bit_rate = 400000;
c->height = 800;
c->time_base= (AVRational){1,25};
c->gop_size = 10; 
c->max_b_frames=1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
outbuf_size = 768000;
c->width = 480;

size = (c->width * c->height);

if (avcodec_open2(c, codec,NULL) < 0) {

__android_log_write(ANDROID_LOG_ERROR, "codec", "could not open");


}

ret = av_image_alloc(picture->data, picture->linesize, c->width, c->height,
 c->pix_fmt, 32);
if (ret < 0) {
 __android_log_write(ANDROID_LOG_ERROR, "image","alloc-failed");
 fprintf(stderr, "could not alloc raw picture buffer\n");

}

picture->format = c->pix_fmt;
picture->width = c->width;
picture->height = c->height;
return 0;

}




encodeframe()-called in a while loop



int Java_com_my_package_encodeframe (JNIEnv *env, jobject thiz,jbyteArray buffer) {
jbyte *temp= (*env)->GetByteArrayElements(env, buffer, 0);
Output = (char *)temp;
const uint8_t * const inData[1] = { Output }; 
const int inLinesize[1] = { 2*c->width };

//swscale should implement here

 av_init_packet(&pkt);
 pkt.data = NULL; // packet data will be allocated by the encoder
 pkt.size = 0;

 fflush(stdout);
picture->data[0] = Output;
ret = avcodec_encode_video2(c, &pkt, picture,&got_output);

 fprintf(stderr,"ret = %d, got-out = %d \n",ret,got_output);
 if (ret < 0) {
 __android_log_write(ANDROID_LOG_ERROR, "error","encoding");
 if(got_output > 0)
 __android_log_write(ANDROID_LOG_ERROR, "got_output","is non-zero");

 }

 if (got_output) {
 fprintf(stderr,"encoding frame %3d (size=%5d): (ret=%d)\n", 1, pkt.size,ret);
 fprintf(stderr,"before caling decode");
 decode_inline(&pkt); //function that decodes right after the encode
 fprintf(stderr,"after caling decode");


 av_free_packet(&pkt);
 }


fprintf(stderr,"y val: %d \n",y);


(*env)->ReleaseByteArrayElements(env, buffer, Output, 0);
return ((ret));
}




decode_inline() function



decode_inline(AVPacket *avpkt){
AVCodec *codec;
AVCodecContext *c = NULL;
int frame, got_picture, len = -1,temp=0;

AVFrame *rawFrame, *rgbFrame;
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
char buf[1024];
char rawBuf[768000],rgbBuf[768000];

struct SwsContext *sws_ctx;

memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
avcodec_register_all();

c= avcodec_alloc_context3(codec);
if(c == NULL)
 __android_log_write(ANDROID_LOG_ERROR, "avcodec","context-null");

codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
 fprintf(stderr, "codec not found\n");
 fprintf(stderr, "codec = %p \n", codec);
 }
c->pix_fmt = AV_PIX_FMT_YUV420P;
c->width = 480;
c->height = 800;

rawFrame = av_frame_alloc();
rgbFrame = av_frame_alloc();

if (avcodec_open2(c, codec, NULL) < 0) {
 fprintf(stderr, "could not open codec\n");
 exit(1);
 }
sws_ctx = sws_getContext(c->width, c->height,/*PIX_FMT_RGB565BE*/
 PIX_FMT_YUV420P, c->width, c->height, AV_PIX_FMT_RGB565/*PIX_FMT_YUV420P*/,
 SWS_BILINEAR, NULL, NULL, NULL);


frame = 0;

unsigned short *decodedpixels = &rawBuf;
rawFrame->data[0] = &rawBuf;
rgbFrame->data[0] = &rgbBuf;

fprintf(stderr,"size of avpkt %d \n",avpkt->size);
temp = avpkt->size;
while (temp > 0) {
 len = avcodec_decode_video2(c, rawFrame, &got_picture, avpkt);

 if (len < 0) {
 fprintf(stderr, "Error while decoding frame %d\n", frame);
 exit(1);
 }
 temp -= len;
 avpkt->data += len;

 if (got_picture) {
 printf("saving frame %3d\n", frame);
 fflush(stdout);
 //TODO 
 //memcpy(decodedpixels,rawFrame->data[0],rawFrame->linesize[0]); 
 // decodedpixels +=rawFrame->linesize[0];

 frame++;
 }

 }

avcodec_close(c);
av_free(c);
//free(rawBuf);
//free(rgbBuf);
av_frame_free(&rawFrame);
av_frame_free(&rgbFrame);




}



The log i get



For the decode_inline() function :





01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] Invalid mix of idr and non-idr slices
01-02 14:50:50.160: I/stderr(3407): Error while decoding frame 0




Edit : Changing GOP value :



If i change
c->gop_size = 3;
as expected it emitsone I frame
every three frames. Thenon-existing PPS 0 referenced
message is not there for in every third execution, but all other have this message

-
How to publish selfmade stream with ffmpeg and c++ to rtmp server ?
10 janvier 2024, par rLinoHave a nice day to you, people !



I am writing an application for Windows that will capture the screen and send the stream to Wowza server by rtmp (for broadcasting). My application use ffmpeg and Qt.
I capture the screen with WinApi, convert a buffer to YUV444(because it's simplest) and encode frame as described at the file decoding_encoding.c (from FFmpeg examples) :



///////////////////////////
//Encoder initialization
///////////////////////////
avcodec_register_all();
codec=avcodec_find_encoder(AV_CODEC_ID_H264);
c = avcodec_alloc_context3(codec);
c->width=scr_width;
c->height=scr_height;
c->bit_rate = 400000;
int base_num=1;
int base_den=1;//for one frame per second
c->time_base= (AVRational){base_num,base_den};
c->gop_size = 10;
c->max_b_frames=1;
c->pix_fmt = AV_PIX_FMT_YUV444P;
av_opt_set(c->priv_data, "preset", "slow", 0);

frame = avcodec_alloc_frame();
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;

for(int counter=0;counter<10;counter++)
{
///////////////////////////
//Capturing Screen
///////////////////////////
 GetCapScr(shotbuf,scr_width,scr_height);//result: shotbuf is filled by screendata from HBITMAP
///////////////////////////
//Convert buffer to YUV444 (standard formula)
//It's handmade function because of problems with prepare buffer to swscale from HBITMAP
///////////////////////////
 RGBtoYUV(shotbuf,frame->linesize,frame->data,scr_width,scr_height);//result in frame->data
///////////////////////////
//Encode Screenshot
///////////////////////////
 av_init_packet(&pkt);
 pkt.data = NULL; // packet data will be allocated by the encoder
 pkt.size = 0;
 frame->pts = counter;
 avcodec_encode_video2(c, &pkt, frame, &got_output);
 if (got_output) 
 {
 //I think that sending packet by rtmp must be here!
 av_free_packet(&pkt); 

 }

}
// Get the delayed frames
for (int got_output = 1,i=0; got_output; i++)
{
 ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
 if (ret < 0)
 {
 fprintf(stderr, "Error encoding frame\n");
 exit(1);
 }
 if (got_output)
 {
 //I think that sending packet by rtmp must be here!
 av_free_packet(&pkt); 
 }
}

///////////////////////////
//Deinitialize encoder
///////////////////////////
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
avcodec_free_frame(&frame);




I need to send video stream generated by this code to RTMP server.
In other words, I need c++/c analog for this command :



ffmpeg -re -i "sample.h264" -f flv rtmp://sample.url.com/screen/test_stream




It's useful, but I don't want to save stream to file, I want to use ffmpeg libraries for realtime encoding screen capture and sending encoded frames to RTMP server inside my own application.
Please give me a little example how to initialize AVFormatContext properly and to send my encoded video AVPackets to server.



Thanks.


-
avcodec_open2 returns -22 "Invalid argument" trying to encode AV_CODEC_ID_H264
26 mai 2023, par Fries of DoomI'm trying to use libavcodec to encode h264 video but avcodec_open2 returns -22 "Invalid argument" and I can't figure out why. Here is my code, which is mostly a copy from the encode example from libavcodec.


/* find the mpeg1video encoder */
 const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
 if (!codec) {
 fprintf(stderr, "Codec '%s' not found\n", "h.264");
 exit(1);
 }

 AVCodecContext* codecContext = avcodec_alloc_context3(codec);
 if (!codecContext) {
 fprintf(stderr, "Could not allocate video codec context\n");
 exit(1);
 }

 AVPacket* pkt = av_packet_alloc();
 if (!pkt)
 exit(1);

 /* put sample parameters */
 codecContext->bit_rate = 400000;
 /* resolution must be a multiple of two */
 codecContext->width = 1920;
 codecContext->height = 1080;
 /* frames per second */
 codecContext->time_base = { 1, 25 };
 codecContext->framerate = { 25, 1 };

 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 codecContext->gop_size = 10;
 codecContext->max_b_frames = 1;
 codecContext->codec_type = AVMEDIA_TYPE_VIDEO;
 codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(codecContext->priv_data, "profile", "baseline", 0);

 /* open it */
 int ret = avcodec_open2(codecContext, codec, nullptr);
 if (ret < 0) {
 char eb[AV_ERROR_MAX_STRING_SIZE];
 fprintf(stderr, "Could not open codec: %s\n", av_make_error_string(eb, AV_ERROR_MAX_STRING_SIZE, ret));
 exit(1);
 }



Does anyone know what I'm doing wrong ?