Recherche avancée

Médias (0)

Mot : - Tags -/performance

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (61)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

  • Use, discuss, criticize

    13 avril 2011, par

    Talk to people directly involved in MediaSPIP’s development, or to people around you who could use MediaSPIP to share, enhance or develop their creative projects.
    The bigger the community, the more MediaSPIP’s potential will be explored and the faster the software will evolve.
    A discussion list is available for all exchanges between users.

  • Des sites réalisés avec MediaSPIP

    2 mai 2011, par

    Cette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
    Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page.

Sur d’autres sites (7812)

  • Display ffmpeg frames on opgel texture

    21 mars 2018, par naki

    I am using Dranger tutorial01 (ffmpeg) to decode the video and get AVI frames. I want to use OpenGL to display the video.

    http://dranger.com/ffmpeg/tutorial01.html

    The main function is as follows :

    int main (int argc, char** argv) {
    // opengl stuff
    glutInit(&argc, argv);
    glutInitDisplayMode(GLUT_RGBA);
    glutInitWindowSize(800, 600);
    glutCreateWindow("Hello GL");

    glutReshapeFunc(changeViewport);
    glutDisplayFunc(render);

    GLenum err = glewInit();
    if(GLEW_OK !=err){
       fprintf(stderr, "GLEW error");
       return 1;
    }

    glClear(GL_COLOR_BUFFER_BIT);


    glEnable(GL_TEXTURE_2D);
    GLuint texture;
    glGenTextures(1, &texture); //Make room for our texture
    glBindTexture(GL_TEXTURE_2D, texture);

    //ffmpeg stuff

    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVFrame         *pFrameRGB = NULL;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer = NULL;

    AVDictionary    *optionsDict = NULL;


    if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
    }
    // Register all formats and codecs

    av_register_all();

    // Open video file
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
      return -1; // Couldn't open file

    // Retrieve stream information

    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream

    videoStream=-1;
    for(i=0; inb_streams; i++)
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
     videoStream=i;
     break;
    }
    if(videoStream==-1)
    return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
      fprintf(stderr, "Unsupported codec!\n");
      return -1; // Codec not found
    }
    // Open codec
    if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
      return -1; // Could not open codec

    // Allocate video frame
    pFrame=av_frame_alloc();

    // Allocate an AVFrame structure
    pFrameRGB=av_frame_alloc();
    if(pFrameRGB==NULL)
    return -1;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
                 pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    struct SwsContext      *sws_ctx = sws_getContext(pCodecCtx->width,
              pCodecCtx->height, pCodecCtx->pix_fmt, 800,
              600, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
              NULL, NULL);


    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
        pCodecCtx->width, pCodecCtx->height);

    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {


    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
     // Decode video frame
     avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
              &packet);

     // Did we get a video frame?
     if(frameFinished) {
    // Convert the image from its native format to RGB
     /*  sws_scale
       (
           sws_ctx,
           (uint8_t const * const *)pFrame->data,
           pFrame->linesize,
           0,
           pCodecCtx->height,
           pFrameRGB->data,
           pFrameRGB->linesize
       );
      */
    sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
     // additional opengl
       glBindTexture(GL_TEXTURE_2D, texture);

           //gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]);
      // glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);

           glTexImage2D(GL_TEXTURE_2D,                //Always GL_TEXTURE_2D
               0,                            //0 for now
               GL_RGB,                       //Format OpenGL uses for image
               pCodecCtx->width, pCodecCtx->height,  //Width and height
               0,                            //The border of the image
               GL_RGB, //GL_RGB, because pixels are stored in RGB format
               GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
                               //as unsigned numbers
               pFrameRGB->data[0]);               //The actual pixel data
     // additional opengl end  

    // Save the frame to disk
    if(++i<=5)
     SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
           i);
     }
    }

    glColor3f(1,1,1);
    glBindTexture(GL_TEXTURE_2D, texture);
    glBegin(GL_QUADS);
       glTexCoord2f(0,1);
       glVertex3f(0,0,0);

       glTexCoord2f(1,1);
       glVertex3f(pCodecCtx->width,0,0);

       glTexCoord2f(1,0);
       glVertex3f(pCodecCtx->width, pCodecCtx->height,0);

       glTexCoord2f(0,0);
       glVertex3f(0,pCodecCtx->height,0);

    glEnd();
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
    }


     // Free the RGB image
    av_free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
    }

    Unfortunately i could not find my solution here

    ffmpeg video to opengl texture

    The program compiles but does not show any video on the texture. Just a OpenGL window is created.

  • WebRTC Detect Orientation from Peer Video Stream

    30 mars 2018, par Daryl

    Using RecordRTC and WebRTC I am able to save a Peer MediaStream as a webm/video file. The orientation of the file differs based on the peer device. Meaning a 640x480 video is rotated clockwise or counter clockwise.

    To re-orient the recorded video, some of the videos need to be rotated "clockwise" and others need to be rotated "counter clockwise". I need a way to determine which direction to rotate the video. Otherwise, some videos will be right side up and others upside down.

    I attempted to look up the orientation, using ffprobe. However, the videos created do not have the "rotate" tag in the metadata.

    I have also been unable to find the orientation in the Peer video stream of the WebRTC object.

    I really thought that since the Video Element has the WebRTC Peer Stream displayed correctly in it, that I should be able to get the orientation from it.

  • Image to MPEG on Linux works, same code on Android = green video

    5 avril 2018, par JScoobyCed

    EDIT
    I have check the execution and found that the error is not (yet) at the swscale point. My current issue is that the JPG image is not found :
    No such file or directory
    when doing the avformat_open_input(&pFormatCtx, imageFileName, NULL, NULL);
    Before you tell me I need to register anything, I can tell I already did (I updated the code below).
    I also added the Android permission to access the external storage (I don’t think it is related to Android since I can already write to the /mnt/sdcard/ where the image is also located)
    END EDIT

    I have been through several tutorials (including the few posted from SO, i.e. http://dranger.com/ffmpeg/, how to compile ffmpeg for Android...,been through dolphin-player source code). Here is what I have :
    . Compiled ffmpeg for android
    . Ran basic tutorials using NDK to create a dummy video on my android device
    . been able to generate a MPEG2 video from images on Ubuntu using a modified version of dummy video code above and a lot of Googling
    . running the new code on Android device gives a green screen video (duration 1 sec whatever the number of frames I encode)

    I saw another post about iPhone in a similar situation that mentioned the ARM processor optimization could be the culprit. I tried a few ldextra-flags (-arch armv7-a and similar) to no success.

    I include at the end the code that loads the image. Is there something different to do on Android than on linux ? Is my ffmpeg build not correct for Android video encoding ?

    void copyFrame(AVCodecContext *destContext, AVFrame* dest,
               AVCodecContext *srcContext, AVFrame* source) {
    struct SwsContext *swsContext;
    swsContext = sws_getContext(srcContext->width, srcContext->height, srcContext->pix_fmt,
                   destContext->width, destContext->height, destContext->pix_fmt,
                   SWS_FAST_BILINEAR, NULL, NULL, NULL);
    sws_scale(swsContext, source->data, source->linesize, 0, srcContext->height, dest->data, dest->linesize);
    sws_freeContext(swsContext);
    }

    int loadFromFile(const char* imageFileName, AVFrame* realPicture, AVCodecContext* videoContext) {
    AVFormatContext *pFormatCtx = NULL;
    avcodec_register_all();
    av_register_all();

    int ret = avformat_open_input(&pFormatCtx, imageFileName, NULL, NULL);
    if (ret != 0) {
       // ERROR hapening here
       // Can't open image file. Use strerror(AVERROR(ret))) for details
       return ERR_CANNOT_OPEN_IMAGE;
    }

    AVCodecContext *pCodecCtx;

    pCodecCtx = pFormatCtx->streams[0]->codec;
    pCodecCtx->width = W_VIDEO;
    pCodecCtx->height = H_VIDEO;
    pCodecCtx->pix_fmt = PIX_FMT_YUV420P;

    // Find the decoder for the video stream
    AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (!pCodec) {
       // Codec not found
       return ERR_CODEC_NOT_FOUND;
    }

    // Open codec
    if (avcodec_open(pCodecCtx, pCodec) < 0) {
       // Could not open codec
       return ERR_CANNOT_OPEN_CODEC;
    }

    //
    AVFrame *pFrame;

    pFrame = avcodec_alloc_frame();

    if (!pFrame) {
       // Can't allocate memory for AVFrame
       return ERR_CANNOT_ALLOC_MEM;
    }

    int frameFinished;
    int numBytes;

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
    uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof (uint8_t));

    avpicture_fill((AVPicture *) pFrame, buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
    AVPacket packet;
    int res = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
       if (packet.stream_index != 0)
           continue;

       ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
       if (ret > 0) {
           // now, load the useful info into realPicture
           copyFrame(videoContext, realPicture, pCodecCtx, pFrame);
           // Free the packet that was allocated by av_read_frame
           av_free_packet(&packet);
           return 0;
       } else {
           // Error decoding frame. Use strerror(AVERROR(ret))) for details
           res = ERR_DECODE_FRAME;
       }
    }
    av_free(pFrame);

    // close codec
    avcodec_close(pCodecCtx);

    // Close the image file
    av_close_input_file(pFormatCtx);

    return res;
    }

    Some ./configure options :
    --extra-cflags="-O3 -fpic -DANDROID -DHAVE_SYS_UIO_H=1 -Dipv6mr_interface=ipv6mr_ifindex -fasm -Wno-psabi -fno-short-enums -fno-strict-aliasing -finline-limit=300 -mfloat-abi=softfp -mfpu=vfp -marm -march=armv7-a -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE"

    --extra-ldflags="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -nostdlib -lc -lm -ldl -llog"

    --arch=armv7-a --enable-armv5te --enable-armv6 --enable-armvfp --enable-memalign-hack