Recherche avancée

Médias (1)

Mot : - Tags -/publicité

Autres articles (65)

  • Ajouter notes et légendes aux images

    7 février 2011, par

    Pour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
    Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
    Modification lors de l’ajout d’un média
    Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...)

  • MediaSPIP 0.1 Beta version

    25 avril 2011, par

    MediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
    The zip file provided here only contains the sources of MediaSPIP in its standalone version.
    To get a working installation, you must manually install all-software dependencies on the server.
    If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...)

  • Contribute to documentation

    13 avril 2011

    Documentation is vital to the development of improved technical capabilities.
    MediaSPIP welcomes documentation by users as well as developers - including : critique of existing features and functions articles contributed by developers, administrators, content producers and editors screenshots to illustrate the above translations of existing documentation into other languages
    To contribute, register to the project users’ mailing (...)

Sur d’autres sites (8763)

  • FFmpeg api iOS "Resource temporarily unavailable"

    8 octobre 2017, par Julius Naeumann

    I’ve spent hours trying to fix this :

    I’m trying to use the ffmpeg api on iOS. My Xcode project is building and I can call ffmpeg api functions. I am trying to write code that decodes a video (Without outputting anything for now), and I keep getting error -35 : "Resource temporarily unavailable".

    The input file is from the camera roll (.mov) and I’m using Mpeg-4 for decoding. All I’m currently doing is getting data from the file, parsing it and sending the parsed packets to the decoder. When I try to get frames, all I get is an error. Does anyone know what I’m doing wrong ?

    +(void)test: (NSString*)filename outfile:(NSString*)outfilename {

    /* register all the codecs */
    avcodec_register_all();

    AVCodec *codec;
    AVCodecParserContext *parser;
    AVCodecContext *c= NULL;
    int frame_count;
    FILE* f;
    AVFrame* frame;
    AVPacket* avpkt;
    avpkt = av_packet_alloc();
    //av_init_packet(avpkt);
    char buf[1024];

    uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
    uint8_t *data;
    size_t   data_size;

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
    memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);

    printf("Decode video file %s to %s\n", [filename cStringUsingEncoding:NSUTF8StringEncoding], [outfilename cStringUsingEncoding:NSUTF8StringEncoding]);
    /* find the h264 video decoder */
    codec = avcodec_find_decoder(AV_CODEC_ID_MPEG4);
    if (!codec) {
       fprintf(stderr, "Codec not found\n");
       exit(1);
    }
    c = avcodec_alloc_context3(codec);
    if (!c) {
       fprintf(stderr, "Could not allocate video codec context\n");
       exit(1);
    }
    if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
       c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
    /* For some codecs, such as msmpeg4 and mpeg4, width and height
    MUST be initialized there because this information is not
    available in the bitstream. */
    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
       fprintf(stderr, "Could not open codec\n");
       exit(1);
    }
    f = fopen([filename cStringUsingEncoding:NSUTF8StringEncoding], "rb");
    if (!f) {
       fprintf(stderr, "Could not open %s\n", [filename cStringUsingEncoding:NSUTF8StringEncoding]);
       exit(1);
    }
    frame = av_frame_alloc();
    if (!frame) {
       fprintf(stderr, "Could not allocate video frame\n");
       exit(1);
    }
    frame_count = 0;

    parser = av_parser_init(codec->id);
    if (!parser) {
       fprintf(stderr, "parser not found\n");
       exit(1);
    }

    while (!feof(f)) {
       /* read raw data from the input file */
       data_size = fread(inbuf, 1, INBUF_SIZE, f);
       if (!data_size)
           break;
       /* use the parser to split the data into frames */
       data = inbuf;
       while (data_size > 0) {
           int ret = av_parser_parse2(parser, c, &avpkt->data, &avpkt->size, data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
           if (ret < 0) {
               fprintf(stderr, "Error while parsing\n");
               exit(1);
           }
           data      += ret;
           data_size -= ret;
           if (avpkt->size){
               char buf[1024];

               ret = avcodec_send_packet(c, avpkt);
               if (ret < 0) {

                   fprintf(stderr, "Error sending a packet for decoding\n");
                   continue;
                   exit(1);
               }

               while (ret >= 0) {
                   ret = avcodec_receive_frame(c, frame);
                   if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
                       char e [1024];
                       av_strerror(ret, e, 1024);
                       fprintf(stderr, "Fail: %s !\n", e);
    // ~~~~~~~~ This is where my program exits ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                       return;
                   }
                   else if (ret < 0) {
                       fprintf(stderr, "Error during decoding\n");
                       exit(1);
                   }
               }


           }
       }
    }
    /* some codecs, such as MPEG, transmit the I and P frame with a
    latency of one frame. You must do the following to have a
    chance to get the last frame of the video */

    fclose(f);
    avcodec_close(c);
    av_free(c);
    av_frame_free(&frame);
    printf("\n");

    }

  • varying RTP stream result from custom SIP implementation

    1er février, par Nik Hendricks

    I am in the process of creating my own SIP implementation in Node.js. As well as a b2bua as a learning project.

    


    Finding people wise in the ways of SIP has proved to be difficult elsewhere but here I have had good results

    


    this is the GitHub of my library so far node.js-sip

    


    this is the GitHub of my PBX so far FlowPBX

    


    Currently, everything is working as I expect. Although I really have some questions on possible errors in my implementation.

    


    My main issue is with RTP streams. Currently I am utilizing ffmpeg.

    


    my function goes as follows

    


    start_stream(call_id, sdp){
        console.log('Starting Stream')
        let port = sdp.match(/m=audio (\d+) RTP/)[1];
        let ip = sdp.match(/c=IN IP4 (\d+\.\d+\.\d+\.\d+)/)[1];
        let codec_ids = sdp.match(/m=audio \d+ RTP\/AVP (.+)/)[1].split(' ');
        let ffmpeg_codec_map = {
            'opus': 'libopus',
            'PCMU': 'pcm_mulaw',
            'PCMA': 'pcm_alaw',
            'telephone-event': 'pcm_mulaw',
            'speex': 'speex',
            'G722': 'g722',
            'G729': 'g729',
            'GSM': 'gsm',
            'AMR': 'amr',
            'AMR-WB': 'amr_wb',
            'iLBC': 'ilbc',
            'iSAC': 'isac',
        }

        let codecs = [];
        sdp.split('\n').forEach(line => {
            if(line.includes('a=rtpmap')){
                let codec = line.match(/a=rtpmap:(\d+) (.+)/)[2];
                let c_id = line.match(/a=rtpmap:(\d+) (.+)/)[1];
                codecs.push({                    
                    name: codec.split('/')[0],
                    rate: codec.split('/')[1],
                    channels: codec.split('/')[2] !== undefined ? codec.split('/')[2] : 1,
                    id: c_id
                })
            }
        })

        console.log('codecs')
        console.log(codecs)

        let selected_codec = codecs[0]
        if(selected_codec.name == 'telephone-event'){
            selected_codec = codecs[1]
            console.log(selected_codec)
        }

        //see if opus is available
        codecs.forEach(codec => {
            if(codec.name == 'opus'){
                selected_codec = codec;
            }
        })

        if(selected_codec.name != 'opus'){
            //check if g729 is available
            codecs.forEach(codec => {
                if(codec.name == 'G729'){
                    selected_codec = codec;
                }
            })
        }

        console.log('selected_codec')
        console.log(selected_codec)

        let spawn = require('child_process').spawn;
        let ffmpegArgs = [
            '-re',
            '-i', 'song.mp3',
            '-acodec', ffmpeg_codec_map[selected_codec.name],
            '-ar', selected_codec.rate,
            '-ac', selected_codec.channels,
            '-payload_type', selected_codec.id,
            '-f', 'rtp', `rtp://${ip}:${port}`
        ];

        let ffmpeg = spawn('ffmpeg', ffmpegArgs);

        ffmpeg.stdout.on('data', (data) => {
            console.log(`stdout: ${data}`);
        });
        ffmpeg.stderr.on('data', (data) => {
            console.error(`stderr: ${data}`);
        });




}


    


    When using zoiper to test it works great. I have seen the mobile version negotiate speex
and the desktop version negotiate opus mostly for the codec.

    


    today I tried to register a grandstream phone to my pbx and the rtp stream is blank audio.
opus is available and I have tried to prefer that in my stream but still even when selecting that I cannot get audio to the grandstream phone. This is the same case for a yealink phone. I can only get zoiper to work so far.

    


    what could be causing this behavior ? there is a clear path of communication between everything just like the zoiper client's I have used.

    


    Additionally in my sip implementation,
how important is the concept of a dialog ? currently, I just match messages by Call-ID

    


    and then choose what to send based on the method or response. is there any other underlying dialog functionality that I may need to implement ?

    


    It would just be awesome to get someone who really knows what they are talking about eyes on some of my code to direct this large codebase in the right direction but I realize that a big ask lol.

    


  • MMMS stream is playing slow with FFMEG Codec

    21 juillet 2016, par Matrix Revolved

    I had use AudioQueue and FFMpeg codec for playing MMS stream.
    But it is not playing proper, it’s slow some bit of time.
    I am using open source repository
    https://github.com/shouian/MMSRadioWithFFmpeg

    - (BOOL)initFFmpegAudioStream:(NSString *)filePath withTransferWay:(kNetworkWay)network
    {
       NSString *pAudioInPath;
       AVCodec  *pAudioCodec;
       // Parse header
       uint8_t pInput[] = {0x0ff,0x0f9,0x058,0x80,0,0x1f,0xfc};
       tAACADTSHeaderInfo vxADTSHeader={0};
       [AudioUtilities parseAACADTSHeader:pInput toHeader:(tAACADTSHeaderInfo *) &vxADTSHeader];
       // Compare the file path
       if (strncmp([filePath UTF8String], "rtsp", 4) == 0) {
           pAudioInPath = filePath;
           isLocalFile = NO;
       } else if (strncmp([filePath UTF8String], "mms:", 4) == 0) {
           pAudioInPath = filePath;
           pAudioInPath = [pAudioInPath stringByReplacingOccurrencesOfString:@"mms:" withString:@"mmst:"];
           NSLog(@"Audio path %@", pAudioInPath);
           isLocalFile = NO;
       } else if (strncmp([filePath UTF8String], "mmsh:", 4) == 0) {
           pAudioInPath = filePath;
           isLocalFile = NO;
       }else if (strncmp([filePath UTF8String], "mmst:", 4) == 0) {
           pAudioInPath = filePath;
           isLocalFile = NO;
       }else if (strncmp([filePath UTF8String], "http:", 4) == 0) {
           pAudioInPath = filePath;
           isLocalFile = NO;
       } else {
           pAudioInPath = [[[NSBundle mainBundle] bundlePath] stringByAppendingString:filePath];
           isLocalFile = YES;
       }
       // Register FFmpeg
       avcodec_register_all();
       av_register_all();
       if (isLocalFile == NO) {
           avformat_network_init();
       }
       @synchronized(self) {
           pFormatCtx = avformat_alloc_context();
       }
       // Set network path
       switch (network) {
           case kTCP:
           {
               AVDictionary *option = 0;
               av_dict_set(&option, "rtsp_transport", "tcp", 0);
               // Open video file
               if (avformat_open_input(&pFormatCtx, [pAudioInPath cStringUsingEncoding:NSASCIIStringEncoding], NULL, &option) != 0) {
                   NSLog(@"Could not open connection");
                   return NO;
               }
               av_dict_free(&option);
           }
               break;
           case kUDP:
           {
               if (avformat_open_input(&pFormatCtx, [pAudioInPath cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL) != 0) {
                   NSLog(@"Could not open connection");
                   return NO;
               }
           }
               break;
       }

       pAudioInPath = nil;

       // Retrieve stream information
       if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
           NSLog(@"Could not find streaming information");
           return NO;
       }

       // Dump Streaming information
       av_dump_format(pFormatCtx, 0, [pAudioInPath UTF8String], 0);

       // Find the first audio stream
       if ((audioStream = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &pAudioCodec, 0)) < 0) {
           NSLog(@"Could not find a audio streaming information");
           return NO;
       } else {
           // Succeed to get streaming information
           NSLog(@"== Audio pCodec Information");
           NSLog(@"name = %s",pAudioCodec->name);
           NSLog(@"sample_fmts = %d",*(pAudioCodec->sample_fmts));

           if (pAudioCodec->profiles) {
               NSLog(@"Profile names = %@", pAudioCodec->profiles);
           } else {
               NSLog(@"Profile is Null");
           }

           // Get a pointer to the codec context for the video stream
           pAudioCodeCtx = pFormatCtx->streams[audioStream]->codec;

           // Find out the decoder
           pAudioCodec = avcodec_find_decoder(pAudioCodeCtx->codec_id);

           // Open codec
           if (avcodec_open2(pAudioCodeCtx, pAudioCodec, NULL) < 0) {
               return NO;
           }
       }

       isStop = NO;

       return YES;
    }

    Above code for initiate audio queue and parsing stream header.
    Suggest any change so can make it smooth playing MMS stream.
    How to update audio buffer size while buffering data in Audio queue.