Recherche avancée

Médias (1)

Mot : - Tags -/sintel

Autres articles (63)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Ecrire une actualité

    21 juin 2013, par

    Présentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
    Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
    Vous pouvez personnaliser le formulaire de création d’une actualité.
    Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...)

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

Sur d’autres sites (8230)

  • push video use ffmpeg api is terminated with error occurred : broken pipe

    1er juin 2021, par guoyanzhang

    I met a question, I used ffmpeg API to push video to rtmp server ip PushVideoInfo.VideoServer.
in my code, the value of PushVideoInfo.VideoServer is rtmp ://192.168.128.29:1935/live/SP_20210531180743756wExIOPLvAjK2. The streaming process was interrupted. I don't know why. The following error is prompted :

    


    Send        0 video frames to output URL
Send        1 video frames to output URL
Send        2 video frames to output URL
Send        3 video frames to output URL
Send        4 video frames to output URL
Send        5 video frames to output URL
Send        6 video frames to output URL
Send        7 video frames to output URL
Send        8 video frames to output URL
Send        9 video frames to output URL
Send       10 video frames to output URL
---------------------------------stop push video -------------------------------------------
Error occured : Broken pipe


    


    What are the possibilities for this error, in RTMP client or on RTMP server side ?

    


    #include "/usr/local/include/libavcodec/avcodec.h"
#include "/usr/local/include/libavformat/avformat.h"
#include "/usr/local/include/libavfilter/avfilter.h"
#include "/usr/local/include/libavutil/mathematics.h"
#include "/usr/local/include/libavutil/time.h"

extern VideoDataStruct *VideoDataListHeader;
extern PushVideoStruct PushVideoInfo;
extern enum IsPushingVideo IsPushingVideoFlag;
extern UCHAR ChangeAnotherVideo;
typedef long long int64;


#define READ_BUF_LEN       1024*8

extern enum IsStopPushVideo StopPushVideoFlag;  

static int read_packet(void *opaque, uint8_t *buf, int buf_size)
{
    int64 dataLen = 0;

    while (dataLen < buf_size)
    {
        if ((VideoDataListHeader != NULL) && (VideoDataListHeader->flag == 1))
        {
            memcpy(&buf[dataLen], VideoDataListHeader->buf, sizeof(VideoDataListHeader->buf));
            dataLen += sizeof(VideoDataListHeader->buf);

            VideoDataListHeader->flag = 0;
            VideoDataListHeader = VideoDataListHeader->next;
        }
        else 
        {
            usleep(10000);
        }
    }
    return buf_size;
}

void *PushVideoFunction(void *arg)
{
    AVFormatContext *m_pFmtCtx = NULL;
    AVPacket pkt;   
    AVIOContext *m_pIOCtx = NULL;
    AVInputFormat *in_fmt = NULL;
    int ret = 0;
    unsigned int i = 0;
    int vid_idx =-1;
    unsigned char *m_pIOBuf = NULL;
    int m_pIOBuf_size = READ_BUF_LEN;
    int64 start_time = 0;
    int frame_index = 0;
    //const char *rtmp_url = "rtmp://192.168.1.108/mytv/01";
    char rtmp_url[140] = {0};
    memset(rtmp_url, 0, sizeof(rtmp_url));
    strcpy(rtmp_url, PushVideoInfo.VideoServer);
    CHAR fileName[64] = {0};

    avformat_network_init(); 
    if (strcmp(PushVideoInfo.VideoType, REAL_VIDEO) == 0) 
    {
        m_pIOBuf = (unsigned char*)av_malloc(m_pIOBuf_size);
        if(m_pIOBuf == NULL)
        {
            printf("av malloc failed!\n");
            goto end;
        }

    
        m_pIOCtx = avio_alloc_context(m_pIOBuf, m_pIOBuf_size, 0, NULL, read_packet, NULL, NULL);       
        if (!m_pIOCtx) 
        {
            printf("avio alloc context failed!\n");
            goto end;
        }

    
        m_pFmtCtx = avformat_alloc_context();
        if (!m_pFmtCtx)  
        {
            printf("avformat alloc context failed!\n");
            goto end;
        }


        //m_pFmtCtx->probesize = BYTES_PER_FRAME * 8;
        m_pFmtCtx->pb = m_pIOCtx;  
        ret = avformat_open_input(&m_pFmtCtx, "", in_fmt, NULL);
    }
    else if (strcmp(PushVideoInfo.VideoType, HISTORY_VIDEO) == 0) 
    {
        sprintf(fileName, "%s", VIDEO_FILE_FOLDER);
        sprintf(fileName+strlen(fileName), "%s", PushVideoInfo.VideoFile);
        ret = avformat_open_input(&m_pFmtCtx, fileName, NULL, NULL);
    }
    if (ret < 0)
    {
        printf("avformat open failed!\n");
        goto end;           
    }

    ret = avformat_find_stream_info(m_pFmtCtx, 0);
    if (ret < 0)
    {
        printf("could not find stream info!\n");
        goto end;           
    }       
    for(i = 0; i < m_pFmtCtx->nb_streams; i++)
    {
        if((m_pFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) && (vid_idx < 0))
        {
            vid_idx = i;
        }
    }

    AVFormatContext *octx = NULL;

    ret = avformat_alloc_output_context2(&octx, 0, "flv", rtmp_url);
    if (ret < 0)
    {
        printf("avformat alloc output context2 failed!\n");
        goto end;
    }   

    av_init_packet(&pkt);    

    
    for (i = 0;i < m_pFmtCtx->nb_streams; i++)
    {
        AVCodec *codec = avcodec_find_decoder(m_pFmtCtx->streams[i]->codecpar->codec_id);
        AVStream *out = avformat_new_stream(octx, codec);
        ret = avcodec_parameters_copy(out->codecpar, m_pFmtCtx->streams[i]->codecpar);
        out->codecpar->codec_tag = 0;
    }

    ret = avio_open(&octx->pb, rtmp_url, AVIO_FLAG_WRITE);
    if (!octx->pb)
    {
        printf("avio open failed!\n");
        goto end;       
    }

    ret = avformat_write_header(octx, 0);
    if (ret < 0)
    {
        printf("avformat write header failed!\n");
        goto end;           
    }

    start_time = av_gettime();
    AVStream *in_stream, *out_stream;
    AVRational time_base1;
    AVRational time_base;
    AVRational time_base_q;
    int64 calc_duration;
    int64 pts_time;
    int64 now_time;
    
    ChangeAnotherVideo = 0;
    while((!StopPushVideoFlag) && (ChangeAnotherVideo == 0))
    {
        ret = av_read_frame(m_pFmtCtx, &pkt);
        if (ret < 0)
        {
            break;
        }
        if (pkt.pts == AV_NOPTS_VALUE)
        {
            time_base1 = m_pFmtCtx->streams[vid_idx]->time_base;
            calc_duration = (double)AV_TIME_BASE/av_q2d(m_pFmtCtx->streams[vid_idx]->r_frame_rate);
            
            pkt.pts = (double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
            pkt.dts = pkt.pts;
            pkt.duration = (double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
        }
        if (pkt.stream_index == vid_idx)
        {
            time_base = m_pFmtCtx->streams[vid_idx]->time_base;
            time_base_q = (AVRational){1, AV_TIME_BASE};            
            pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);
            now_time = av_gettime() - start_time;
            if (pts_time > now_time)
            {
                av_usleep(pts_time - now_time);
            }
        }
        in_stream  = m_pFmtCtx->streams[pkt.stream_index];
        out_stream = octx->streams[pkt.stream_index];
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        if(pkt.stream_index == vid_idx)
        {
            printf("Send %8d video frames to output URL\n",frame_index);
            frame_index++;
        }
        ret = av_interleaved_write_frame(octx, &pkt);
        if (ret < 0)
        {
            goto end;
        }
        av_packet_unref(&pkt);
    }
    
end:
    printf("---------------------------------stop push video -------------------------------------------\n");
    StopPushVideoFlag = NO_STOP_PUSH;
    IsPushingVideoFlag = NO_PUSHING;    
    ChangeAnotherVideo = 0;
    avformat_close_input(&m_pFmtCtx);
    if (octx)
    {
        avio_closep(&octx->pb);
        avformat_free_context(octx);
    }
    /* note: the internal buffer could have changed, and be != avio_ctx_buffer */
    if (m_pIOCtx) 
    {
        av_freep(&m_pIOCtx->buffer);
        av_freep(&m_pIOCtx);
    }

    if (ret < 0) 
    {
        printf("Error occured : %s\n", av_err2str(ret));
        //return 1;
    }
    pthread_exit((void*)"push video end!"); 
    
}


void PushVideo(void)
{
    int ret = 0;
    pthread_t pushVideoThread;

    ret = pthread_create(&pushVideoThread, NULL, PushVideoFunction, NULL);
    if(ret != 0)
    {
        printf("error : push video thread create failed!\n");
        exit(-1);
    }
    else
    {
        printf("(debug) push video thread create success!\n");
    } 
} 


    


    I grab a pcap file by tcpdump, and use wireshark to analysis, and got such a message

    


    37  0.400350    172.17.4.58 192.168.11.240  RTMP    411 @setDataFrame()|Video Data|FCUnpublish()|deleteStream()


    


  • Android javacv FFMpeg overlay animated gif over video

    5 avril 2019, par Diego Perez

    I’m developing an Android app that creates a video (from an image plus an mp3 file) with javacv and FFMpeg and the video generation part is working fine, but my problem comes now that I would like to overlap a transparent background animated gif over the video, and I’m really stuck with it and I have no success on every attempt so far.

    To be honest it is being my first experience with javacv and FFMpeg, so I have little knowledge on how to work with filters.

    I’ll paste my complete video creation method below and let’s see if anyone can help me. Again, my problem is overlapping the animated gif, the video (removing the filter part) is being created just fine. The most I was able to achieve is overlapping a static (one frame) small gif (with no transparency) on the top left corner of video, but what I’d like to achieve is a transparent gif with the same dimensions of video over it.

    Maybe you will see nonsense code, but remember this is due to my lack of knowledge with javacv and FFMpeg filters.

    I hope anyone can help me.

    Thanks.

    PS. I have read this post : javaCV Android, Strange colors on overlay while streaming to rtmp server

    of @schw4ndi but I wasn’t able to do anything with it.

    static String recordVideo(JSONObject objJSON) {

       try {
           fileName = objJSON.has("file_name") ? String.valueOf(objJSON.getString("file_name")) : "";
           videoPath = objJSON.has("video_path") ? String.valueOf(objJSON.getString("video_path")) : "";
       } catch (JSONException e) {
           ExceptionHandler.logException(e);
       }

       String strReturn = Enum.Result.OK;

       if (!fileName.equals("") && !videoPath.equals("")) {
           try {
               String outputFilename = videoPath + "/" + fileName;
               //video grabber
               FrameGrabber grabber1 = new FFmpegFrameGrabber(outputFilename + ".jpg");
               //audio grabber
               FrameGrabber grabber2 = new FFmpegFrameGrabber(outputFilename + ".mp3");

               grabber1.start();
               grabber2.start();

               FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(outputFilename + ".mp4", 1080, 1920,2);

               //video
               recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
               recorder.setVideoOption("tune", "zerolatency");
               recorder.setFrameRate(30);
               recorder.setVideoBitrate(128000);
               recorder.setVideoOption("crf", "28");
               recorder.setVideoQuality(0); //highest quality
               recorder.setVideoOption("preset", "fast");
               recorder.setFormat("mp4");

               //audio
               recorder.setAudioCodec(avcodec.AV_CODEC_ID_AAC);
               recorder.setSampleRate(44100);
               recorder.setAudioBitrate(128000);
               recorder.setAudioOption("crf", "0"); //no variable bitrate audio (constant rate factor)
               recorder.setAudioQuality(0); //highest quality

               FFmpegFrameFilter filter = null;
               try{
                   filter = new FFmpegFrameFilter("movie=" + videoPath + "/anim01.gif [logo];[in][logo]overlay=0:0:format=yuv420 [out]",1080, 1920);
                   filter.start();
               }catch (FrameFilter.Exception e){
                   e.printStackTrace();
               }

               recorder.start();

               Frame frame1, frame2 = null, frame3 = null;

               while ((frame1 = grabber1.grabFrame()) != null ||
                       (frame2 = grabber2.grabFrame()) != null) {
                   if (frame1 != null) filter.push(frame1);
                   frame3 = filter.pull();
                   //recorder.record(frame1);
                   recorder.record(frame3, avutil.AV_PIX_FMT_YUV420P);
                   recorder.record(frame2, avutil.AV_PIX_FMT_YUV420P);
               }

               recorder.stop();
               grabber1.stop();
               grabber2.stop();
               filter.stop();

           } catch (Exception e) {
               strReturn = Enum.Result.KO;
               ExceptionHandler.logException(e);
           }
       }

       return strReturn;
    }
  • The hardware decoding was successful, but the hw_frames_ctx in the received frame is empty

    15 juillet 2024, par mercuric taylor

    I tried to use QSV hardware decoding under ffmpeg, using the integrated graphics 730 on my computer. Here's the code I used to initialize the decoder

    


    const AVCodec* codec = NULL;
int ret;
int err = 0;
// Create the QSV hardware device.
    ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_QSV, "auto", NULL, 0);
    if (ret < 0)
    {
        char error_string[AV_ERROR_MAX_STRING_SIZE];
        av_make_error_string(error_string, AV_ERROR_MAX_STRING_SIZE, ret);
        LError("Error creating QSV device: {}", error_string);
        return NULL;
    }
 // Search for QSV decoders, either for H.264 or H.265.
    codec = avcodec_find_decoder_by_name(codec_name);
    if (!codec)
    {
        LError("Failed to find QSV decoder.");
        return NULL;
    }

    // Creating a decoder context and associating it with the hardware device.
    decoder_ctx = avcodec_alloc_context3(codec);
    if (!decoder_ctx)
    {
        ret = AVERROR(ENOMEM);
        LError("Failed to allocate decoder context.\n");
        return NULL;
    }
    decoder_ctx->codec_id = AV_CODEC_ID_H264;  
    decoder_ctx->opaque = &hw_device_ctx;
    decoder_ctx->get_format = get_format;
// Open the decoder.
    if ((ret = avcodec_open2(decoder_ctx, NULL, NULL)) < 0)
    {
        LError("Failed to open decoder: %d\n", ret);
        return NULL;
    }

    parser_ctx = av_parser_init(avcodec_find_encoder_by_name(codec_name)->id);


    


    The following is the process of decoding using the decoder :

    


    AVFrame* frame = av_frame_alloc();
    AVFrame* dstFrame = av_frame_alloc();
    res = avcodec_send_packet(decoder_ctx, pkt);
    if (res < 0)
    {
        return;
    }
    int num = 0;
    while (res >= 0)
    {
        res = avcodec_receive_frame(decoder_ctx, frame);

        if (res == AVERROR(EAGAIN) || res == AVERROR_EOF)
        {
            //if (res == AVERROR(EAGAIN)) 
            //{
            //   LInfo("AVERROR(EAGAIN):");
            //}
            //if (res == AVERROR_EOF) 
            //{
            //  //  LInfo("AVERROR_EOF");
            //}
           // av_frame_unref(frame);
            break;
        }
        else if (res < 0)
        {
          //  av_frame_unref(frame);
            return;
        }


        frameNumbers_++;
        if (frame->hw_frames_ctx == NULL)
        {
            LError("hw_frames_ctx is null");
            LError("avcodec_receive_frame return is {}", res);
        }


    


    My issue is that I've successfully decoded the video. The return value of avcodec_receive_frame is 0, and the width and height of the AVFrame are the same as the input video stream.

    


    However,** the hw_frames_ctx field of the AVFrame is empty**. Why would this happen in a successful hardware decoding scenario ?

    


    Could it be due to some incorrect configurations ? I've set up a get_format function like this

    


    static enum AVPixelFormat get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
{
    while (*pix_fmts != AV_PIX_FMT_NONE) {
        if (*pix_fmts == AV_PIX_FMT_QSV) {
            DecodeContext *decode = (DecodeContext*)avctx->opaque;
            AVHWFramesContext  *frames_ctx;
            AVQSVFramesContext *frames_hwctx;
            int ret;
            /* create a pool of surfaces to be used by the decoder */
            avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
            if (!avctx->hw_frames_ctx)
                return AV_PIX_FMT_NONE;
            frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
            frames_hwctx = (AVQSVFramesContext*)frames_ctx->hwctx;
            frames_ctx->format = AV_PIX_FMT_QSV;
            frames_ctx->sw_format = avctx->sw_pix_fmt;
            frames_ctx->width = FFALIGN(avctx->coded_width, 32);
            frames_ctx->height = FFALIGN(avctx->coded_height, 32);
            frames_ctx->initial_pool_size = 32;
            frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
            ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
            if (ret < 0)
                return AV_PIX_FMT_NONE;
            return AV_PIX_FMT_QSV;
        }
        pix_fmts++;
    }
    fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
    return AV_PIX_FMT_NONE;
}


    


    But I also noticed that even though I set decoder_ctx->get_format = get_format ; this function is not being executed later on.

    


    I observed that my GPU is also being utilized during program execution, indicating a successful hardware decoding. My subsequent goal is to render a frame from this decoded AVFrame. It seems like the hw_frames_ctx of the AVFrame is a texture handle on the GPU. I wish to directly use this field for D3D11 rendering and display it on the screen.
My questions are :

    


      

    1. Is the hw_frames_ctx field empty in the case of successful hardware decoding ?
    2. 


    3. Does it represent a texture handle on the GPU ?
    4. 


    5. If my rendering approach is wrong, how can I correctly render this AVFrame using D3D11 ?
    6.