Recherche avancée

Médias (1)

Mot : - Tags -/Rennes

Autres articles (68)

  • Le profil des utilisateurs

    12 avril 2011, par

    Chaque utilisateur dispose d’une page de profil lui permettant de modifier ses informations personnelle. Dans le menu de haut de page par défaut, un élément de menu est automatiquement créé à l’initialisation de MediaSPIP, visible uniquement si le visiteur est identifié sur le site.
    L’utilisateur a accès à la modification de profil depuis sa page auteur, un lien dans la navigation "Modifier votre profil" est (...)

  • Configurer la prise en compte des langues

    15 novembre 2010, par

    Accéder à la configuration et ajouter des langues prises en compte
    Afin de configurer la prise en compte de nouvelles langues, il est nécessaire de se rendre dans la partie "Administrer" du site.
    De là, dans le menu de navigation, vous pouvez accéder à une partie "Gestion des langues" permettant d’activer la prise en compte de nouvelles langues.
    Chaque nouvelle langue ajoutée reste désactivable tant qu’aucun objet n’est créé dans cette langue. Dans ce cas, elle devient grisée dans la configuration et (...)

  • XMP PHP

    13 mai 2011, par

    Dixit Wikipedia, XMP signifie :
    Extensible Metadata Platform ou XMP est un format de métadonnées basé sur XML utilisé dans les applications PDF, de photographie et de graphisme. Il a été lancé par Adobe Systems en avril 2001 en étant intégré à la version 5.0 d’Adobe Acrobat.
    Étant basé sur XML, il gère un ensemble de tags dynamiques pour l’utilisation dans le cadre du Web sémantique.
    XMP permet d’enregistrer sous forme d’un document XML des informations relatives à un fichier : titre, auteur, historique (...)

Sur d’autres sites (6734)

  • Display ffmpeg frames on opgel texture

    24 octobre 2014, par naki

    I am using Dranger tutorial01 (ffmpeg) to decode the video and get AVI frames. I want to use OpenGL to display the video.

    http://dranger.com/ffmpeg/tutorial01.html

    The main function is as follows :

    int main (int argc, char** argv) {
    // opengl stuff
    glutInit(&argc, argv);
    glutInitDisplayMode(GLUT_RGBA);
    glutInitWindowSize(800, 600);
    glutCreateWindow("Hello GL");

    glutReshapeFunc(changeViewport);
    glutDisplayFunc(render);

    GLenum err = glewInit();
    if(GLEW_OK !=err){
       fprintf(stderr, "GLEW error");
       return 1;
    }

    glClear(GL_COLOR_BUFFER_BIT);


    glEnable(GL_TEXTURE_2D);
    GLuint texture;
    glGenTextures(1, &texture); //Make room for our texture
    glBindTexture(GL_TEXTURE_2D, texture);

    //ffmpeg stuff

    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVFrame         *pFrameRGB = NULL;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer = NULL;

    AVDictionary    *optionsDict = NULL;


    if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
    }
    // Register all formats and codecs

    av_register_all();

    // Open video file
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
      return -1; // Couldn't open file

    // Retrieve stream information

    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream

    videoStream=-1;
    for(i=0; inb_streams; i++)
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
     videoStream=i;
     break;
    }
    if(videoStream==-1)
    return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
      fprintf(stderr, "Unsupported codec!\n");
      return -1; // Codec not found
    }
    // Open codec
    if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
      return -1; // Could not open codec

    // Allocate video frame
    pFrame=av_frame_alloc();

    // Allocate an AVFrame structure
    pFrameRGB=av_frame_alloc();
    if(pFrameRGB==NULL)
    return -1;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
                 pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    struct SwsContext      *sws_ctx = sws_getContext(pCodecCtx->width,
              pCodecCtx->height, pCodecCtx->pix_fmt, 800,
              600, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
              NULL, NULL);


    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
        pCodecCtx->width, pCodecCtx->height);

    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {


    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
     // Decode video frame
     avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
              &packet);

     // Did we get a video frame?
     if(frameFinished) {
    // Convert the image from its native format to RGB
     /*  sws_scale
       (
           sws_ctx,
           (uint8_t const * const *)pFrame->data,
           pFrame->linesize,
           0,
           pCodecCtx->height,
           pFrameRGB->data,
           pFrameRGB->linesize
       );
      */
    sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
     // additional opengl
       glBindTexture(GL_TEXTURE_2D, texture);

           //gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]);
      // glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);

           glTexImage2D(GL_TEXTURE_2D,                //Always GL_TEXTURE_2D
               0,                            //0 for now
               GL_RGB,                       //Format OpenGL uses for image
               pCodecCtx->width, pCodecCtx->height,  //Width and height
               0,                            //The border of the image
               GL_RGB, //GL_RGB, because pixels are stored in RGB format
               GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
                               //as unsigned numbers
               pFrameRGB->data[0]);               //The actual pixel data
     // additional opengl end  

    // Save the frame to disk
    if(++i<=5)
     SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
           i);
     }
    }

    glColor3f(1,1,1);
    glBindTexture(GL_TEXTURE_2D, texture);
    glBegin(GL_QUADS);
       glTexCoord2f(0,1);
       glVertex3f(0,0,0);

       glTexCoord2f(1,1);
       glVertex3f(pCodecCtx->width,0,0);

       glTexCoord2f(1,0);
       glVertex3f(pCodecCtx->width, pCodecCtx->height,0);

       glTexCoord2f(0,0);
       glVertex3f(0,pCodecCtx->height,0);

    glEnd();
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
    }


     // Free the RGB image
    av_free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
    }

    Unfortunately i could not find my solution here

    ffmpeg video to opengl texture

    The program compiles but does not show any video on the texture. Just a OpenGL window is created.

  • Revision 35042 : Un truc qui trainait

    9 février 2010, par kent1@… — Log

    Un truc qui trainait

  • Qt Open Source Product 2 - Vlc Demo [closed]

    3 juin 2021, par cool code

    Ⅰ. Preface

    


    The previous work was made by the FFmpeg kernel, and FFmpeg is too powerful for many beginners to understand. There are also a lot of users only need a simple video stream can be played, do not need to be involved in the responsible decoding and transcoding, so VLC came in handy, it directly made FFMPEG deep encapsulation, to provide a friendly interface. There's an MPV that does the same thing, and MPV is even better than VLC in that it's just one library file, and it looks like it's packaged as a static library, unlike VLC, VLC comes with a bunch of dynamic library files and plug-in files.
Of course, the simplicity of VLC is that it only needs a few lines of code to start, so that beginners immediately see the effect is very important, very excited, you can more quickly carry out the next step of coding, experience the fun of coding.

    


    Ⅱ. Code framework

    


    #include "ffmpeg.h"

FFmpegThread::FFmpegThread(QObject *parent) : QThread(parent)
{
    setObjectName("FFmpegThread");
    stopped = false;
    isPlay = false;

    frameFinish = false;
    videoWidth = 0;
    videoHeight = 0;
    oldWidth = 0;
    oldHeight = 0;
    videoStreamIndex = -1;
    audioStreamIndex = -1;

    url = "rtsp://192.168.1.128:554/1";

    buffer = NULL;
    avPacket = NULL;
    avFrame = NULL;
    avFrame2 = NULL;
    avFrame3 = NULL;
    avFormatContext = NULL;
    videoCodec = NULL;
    audioCodec = NULL;
    swsContext = NULL;

    options = NULL;
    videoDecoder = NULL;
    audioDecoder = NULL;

    //Initial registration, only register once in a software
    FFmpegThread::initlib();
}

//Only need to initialize once in a software
void FFmpegThread::initlib()
{
    static QMutex mutex;
    QMutexLocker locker(&mutex);
    static bool isInit = false;
    if (!isInit) {
        //Register all available file formats and decoders in the library
        av_register_all();
        //Register all devices, mainly for local camera playback support
#ifdef ffmpegdevice
        avdevice_register_all();
#endif
        //Initialize the network stream format, which must be executed first when using the network stream
        avformat_network_init();

        isInit = true;
        qDebug() << TIMEMS << "init ffmpeg lib ok" << " version:" << FFMPEG_VERSION;
#if 0
        //Output all supported decoder names
        QStringList listCodeName;
        AVCodec *code = av_codec_next(NULL);
        while (code != NULL) {
            listCodeName << code->name;
            code = code->next;
        }

        qDebug() << TIMEMS << listCodeName;
#endif
    }
}

bool FFmpegThread::init()
{
    //Before opening the code stream, specify various parameters such as: detection time/timeout time/maximum delay, etc.
    //Set the cache size, 1080p can increase the value
    av_dict_set(&options, "buffer_size", "8192000", 0);
    //Open in tcp mode, if open in udp mode, replace tcp with udp
    av_dict_set(&options, "rtsp_transport", "tcp", 0);
    //Set the timeout disconnection time, the unit is microseconds, 3000000 means 3 seconds
    av_dict_set(&options, "stimeout", "3000000", 0);
    //Set the maximum delay, in microseconds, 1000000 means 1 second
    av_dict_set(&options, "max_delay", "1000000", 0);
    //Automatically start the number of threads
    av_dict_set(&options, "threads", "auto", 0);

    //Open video stream
    avFormatContext = avformat_alloc_context();

    int result = avformat_open_input(&avFormatContext, url.toStdString().data(), NULL, &options);
    if (result < 0) {
        qDebug() << TIMEMS << "open input error" << url;
        return false;
    }

    //Release setting parameters
    if (options != NULL) {
        av_dict_free(&options);
    }

    //Get flow information
    result = avformat_find_stream_info(avFormatContext, NULL);
    if (result < 0) {
        qDebug() << TIMEMS << "find stream info error";
        return false;
    }

    //----------At the beginning of the video stream part, make a mark to facilitate the folding of the code----------
    if (1) {
        videoStreamIndex = av_find_best_stream(avFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &videoDecoder, 0);
        if (videoStreamIndex < 0) {
            qDebug() << TIMEMS << "find video stream index error";
            return false;
        }

        //Get video stream
        AVStream *videoStream = avFormatContext->streams[videoStreamIndex];

        //Get the video stream decoder, or specify the decoder
        videoCodec = videoStream->codec;
        videoDecoder = avcodec_find_decoder(videoCodec->codec_id);
        //videoDecoder = avcodec_find_decoder_by_name("h264_qsv");
        if (videoDecoder == NULL) {
            qDebug() << TIMEMS << "video decoder not found";
            return false;
        }

        //Set up accelerated decoding
        videoCodec->lowres = videoDecoder->max_lowres;
        videoCodec->flags2 |= AV_CODEC_FLAG2_FAST;

        //Open the video decoder
        result = avcodec_open2(videoCodec, videoDecoder, NULL);
        if (result < 0) {
            qDebug() << TIMEMS << "open video codec error";
            return false;
        }

        //Get the resolution size
        videoWidth = videoStream->codec->width;
        videoHeight = videoStream->codec->height;

        //If the width and height are not obtained, return
        if (videoWidth == 0 || videoHeight == 0) {
            qDebug() << TIMEMS << "find width height error";
            return false;
        }

        QString videoInfo = QString("Video stream info -> index: %1  decode: %2  format: %3  duration: %4 s  Resolution: %5*%6")
                            .arg(videoStreamIndex).arg(videoDecoder->name).arg(avFormatContext->iformat->name)
                            .arg((avFormatContext->duration) / 1000000).arg(videoWidth).arg(videoHeight);
        qDebug() << TIMEMS << videoInfo;
    }
    //----------The video stream part starts----------

    //----------Start the audio stream part, mark it to facilitate the code folding----------
    if (1) {
        //Loop to find audio stream index
        audioStreamIndex = -1;
        for (uint i = 0; i < avFormatContext->nb_streams; i++) {
            if (avFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
                audioStreamIndex = i;
                break;
            }
        }

        //Some have no audio stream, so there is no need to return here
        if (audioStreamIndex == -1) {
            qDebug() << TIMEMS << "find audio stream index error";
        } else {
            //Get audio stream
            AVStream *audioStream = avFormatContext->streams[audioStreamIndex];
            audioCodec = audioStream->codec;

            //Get the audio stream decoder, or specify the decoder
            audioDecoder = avcodec_find_decoder(audioCodec->codec_id);
            //audioDecoder = avcodec_find_decoder_by_name("aac");
            if (audioDecoder == NULL) {
                qDebug() << TIMEMS << "audio codec not found";
                return false;
            }

            //Open the audio decoder
            result = avcodec_open2(audioCodec, audioDecoder, NULL);
            if (result < 0) {
                qDebug() << TIMEMS << "open audio codec error";
                return false;
            }

            QString audioInfo = QString("Audio stream information -> index: %1  decode: %2  Bit rate: %3  channel num: %4  sampling: %5")
                                .arg(audioStreamIndex).arg(audioDecoder->name).arg(avFormatContext->bit_rate)
                                .arg(audioCodec->channels).arg(audioCodec->sample_rate);
            qDebug() << TIMEMS << audioInfo;
        }
    }
    //----------End of audio stream----------

    //Pre-allocated memory
    avPacket = av_packet_alloc();
    avFrame = av_frame_alloc();
    avFrame2 = av_frame_alloc();
    avFrame3 = av_frame_alloc();

    //Compare the width and height of the last file. When changing, you need to reallocate the memory
    if (oldWidth != videoWidth || oldHeight != videoHeight) {
        int byte = avpicture_get_size(AV_PIX_FMT_RGB32, videoWidth, videoHeight);
        buffer = (uint8_t *)av_malloc(byte * sizeof(uint8_t));
        oldWidth = videoWidth;
        oldHeight = videoHeight;
    }

    //Define pixel format
    AVPixelFormat srcFormat = AV_PIX_FMT_YUV420P;
    AVPixelFormat dstFormat = AV_PIX_FMT_RGB32;
    //Get the decoded format through the decoder
    srcFormat = videoCodec->pix_fmt;

    //The SWS_FAST_BILINEAR parameter used by the default fastest decoding may lose part of the picture data, and you can change it to other parameters by yourself
    int flags = SWS_FAST_BILINEAR;

    //Open up a cache to store one frame of data
    //The following two methods are ok, avpicture_fill has been gradually abandoned
    //avpicture_fill((AVPicture *)avFrame3, buffer, dstFormat, videoWidth, videoHeight);
    av_image_fill_arrays(avFrame3->data, avFrame3->linesize, buffer, dstFormat, videoWidth, videoHeight, 1);

    //Image conversion
    swsContext = sws_getContext(videoWidth, videoHeight, srcFormat, videoWidth, videoHeight, dstFormat, flags, NULL, NULL, NULL);

    //Output video information
    //av_dump_format(avFormatContext, 0, url.toStdString().data(), 0);

    //qDebug() << TIMEMS << "init ffmpeg finsh";
    return true;
}

void FFmpegThread::run()
{
    while (!stopped) {
        //Perform initialization based on the flag bit
        if (isPlay) {
            this->init();
            isPlay = false;
            continue;
        }

        if (av_read_frame(avFormatContext, avPacket) >= 0) {
            //Determine whether the current package is video or audio
            int index = avPacket->stream_index;
            if (index == videoStreamIndex) {
                //Decode video stream avcodec_decode_video2 method has been deprecated
#if 0
                avcodec_decode_video2(videoCodec, avFrame2, &frameFinish, avPacket);
#else
                frameFinish = avcodec_send_packet(videoCodec, avPacket);
                if (frameFinish < 0) {
                    continue;
                }

                frameFinish = avcodec_receive_frame(videoCodec, avFrame2);
                if (frameFinish < 0) {
                    continue;
                }
#endif

                if (frameFinish >= 0) {
                    //Turn the data into a picture
                    sws_scale(swsContext, (const uint8_t *const *)avFrame2->data, avFrame2->linesize, 0, videoHeight, avFrame3->data, avFrame3->linesize);

                    //The following two methods can be used
                    //QImage image(avFrame3->data[0], videoWidth, videoHeight, QImage::Format_RGB32);
                    QImage image((uchar *)buffer, videoWidth, videoHeight, QImage::Format_RGB32);
                    if (!image.isNull()) {
                        emit receiveImage(image);
                    }

                    msleep(1);
                }
            } else if (index == audioStreamIndex) {
                //Decode the audio stream, it will not be processed here, and will be handed over to sdl to play
            }
        }

        av_packet_unref(avPacket);
        av_freep(avPacket);
        msleep(1);
    }

    //Release resources after the thread ends
    free();
    stopped = false;
    isPlay = false;
    qDebug() << TIMEMS << "stop ffmpeg thread";
}

void FFmpegThread::setUrl(const QString &url)
{
    this->url = url;
}

void FFmpegThread::free()
{
    if (swsContext != NULL) {
        sws_freeContext(swsContext);
        swsContext = NULL;
    }

    if (avPacket != NULL) {
        av_packet_unref(avPacket);
        avPacket = NULL;
    }

    if (avFrame != NULL) {
        av_frame_free(&avFrame);
        avFrame = NULL;
    }

    if (avFrame2 != NULL) {
        av_frame_free(&avFrame2);
        avFrame2 = NULL;
    }

    if (avFrame3 != NULL) {
        av_frame_free(&avFrame3);
        avFrame3 = NULL;
    }

    if (videoCodec != NULL) {
        avcodec_close(videoCodec);
        videoCodec = NULL;
    }

    if (audioCodec != NULL) {
        avcodec_close(audioCodec);
        audioCodec = NULL;
    }

    if (avFormatContext != NULL) {
        avformat_close_input(&avFormatContext);
        avFormatContext = NULL;
    }

    av_dict_free(&options);
    //qDebug() << TIMEMS << "close ffmpeg ok";
}

void FFmpegThread::play()
{
    //Let the thread perform initialization through the flag bit
    isPlay = true;
}

void FFmpegThread::pause()
{

}

void FFmpegThread::next()
{

}

void FFmpegThread::stop()
{
    //Stop the thread through the flag
    stopped = true;
}

//Real-time video display form class
FFmpegWidget::FFmpegWidget(QWidget *parent) : QWidget(parent)
{
    thread = new FFmpegThread(this);
    connect(thread, SIGNAL(receiveImage(QImage)), this, SLOT(updateImage(QImage)));
    image = QImage();
}

FFmpegWidget::~FFmpegWidget()
{
    close();
}

void FFmpegWidget::paintEvent(QPaintEvent *)
{
    if (image.isNull()) {
        return;
    }

    //qDebug() << TIMEMS << "paintEvent" << objectName();
    QPainter painter(this);
    painter.drawImage(this->rect(), image);
}

void FFmpegWidget::updateImage(const QImage &image)
{
    //this->image = image.copy();
    this->image = image;
    this->update();
}

void FFmpegWidget::setUrl(const QString &url)
{
    thread->setUrl(url);
}

void FFmpegWidget::open()
{
    //qDebug() << TIMEMS << "open video" << objectName();
    clear();

    thread->play();
    thread->start();
}

void FFmpegWidget::pause()
{
    thread->pause();
}

void FFmpegWidget::next()
{
    thread->next();
}

void FFmpegWidget::close()
{
    //qDebug() << TIMEMS << "close video" << objectName();
    if (thread->isRunning()) {
        thread->stop();
        thread->quit();
        thread->wait(500);
    }

    QTimer::singleShot(1, this, SLOT(clear()));
}

void FFmpegWidget::clear()
{
    image = QImage();
    update();
}



    


    Ⅲ. Renderings

    


    https://youtu.be/CLsGw2CJW-c

    


    Ⅳ. Open source code download URL

    


    1.download URL for dropbox :

    


    https://www.dropbox.com/sh/n58ucs57pscp25e/AABWBQlg4U3Oz2WF9YOJDrj1a?dl=0

    


    2.download URL for box :

    


    https://app.box.com/s/x48a7ttpk667afqqdk7t1fqok4fmvmyv