Recherche avancée

Médias (0)

Mot : - Tags -/diogene

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (99)

  • MediaSPIP v0.2

    21 juin 2013, par

    MediaSPIP 0.2 est la première version de MediaSPIP stable.
    Sa date de sortie officielle est le 21 juin 2013 et est annoncée ici.
    Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
    Comme pour la version précédente, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
    Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...)

  • Mise à disposition des fichiers

    14 avril 2011, par

    Par défaut, lors de son initialisation, MediaSPIP ne permet pas aux visiteurs de télécharger les fichiers qu’ils soient originaux ou le résultat de leur transformation ou encodage. Il permet uniquement de les visualiser.
    Cependant, il est possible et facile d’autoriser les visiteurs à avoir accès à ces documents et ce sous différentes formes.
    Tout cela se passe dans la page de configuration du squelette. Il vous faut aller dans l’espace d’administration du canal, et choisir dans la navigation (...)

  • ANNEXE : Les plugins utilisés spécifiquement pour la ferme

    5 mars 2010, par

    Le site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)

Sur d’autres sites (10476)

  • Why does my ffmpeg audio sound slower and deeper - sample rate mismatch

    4 septembre 2020, par yogesh zinzu

    ok so this is a discord bot to record voice chat
https://hatebin.com/hgjlazacri
Now the bot works perfectly fine but the issue is that the audio sounds a bit deeper and slower than normal.. Why does it happen ? how can I make the audio sound 1:1..

    


    

    

    const Discord = require('discord.js');
const client = new Discord.Client();
const ffmpegInstaller = require('@ffmpeg-installer/ffmpeg');
const ffmpeg = require('fluent-ffmpeg');
ffmpeg.setFfmpegPath(ffmpegInstaller.path);
const fs = require('fs-extra')
const mergeStream = require('merge-stream');
const config = require('./config.json');
const { getAudioDurationInSeconds } = require('get-audio-duration');
const cp = require('child_process');
const path1 = require('path');
const Enmap = require('enmap');
const UserRecords = require("./models/userrecords.js")
const ServerRecords = require("./models/serverrecords.js")
let prefix = `$`
class Readable extends require('stream').Readable { _read() {} }
let recording = false;
let currently_recording = {};
let mp3Paths = [];
const silence_buffer = new Uint8Array(3840);
const express = require('express')
const app = express()
const port = 3000
const publicIP = require('public-ip')
const { program } = require('commander');
const { path } = require('@ffmpeg-installer/ffmpeg');
const version = '0.0.1'
program.version(version);
let debug = false
let runProd = false
let fqdn = "";
const mongoose = require("mongoose");
const MongoClient = require('mongodb').MongoClient;
mongoose.connect('SECRRET',{
  useNewUrlParser: true
}, function(err){
  if(err){
    console.log(err);
  }else{
    console.log("Database connection initiated");
  }
});
require("dotenv").config()
function bufferToStream(buffer) {
    let stream = new Readable();
    stream.push(buffer);
    return stream;
}





client.commands = new Enmap();

client.on('ready', async () => {
    console.log(`Logged in as ${client.user.tag}`);

    let host = "localhost"

    

    let ip = await publicIP.v4();

    let protocol = "http";
    if (!runProd) {
        host = "localhost"
    } else {
        host = `35.226.244.186`;
    }
    fqdn = `${protocol}://${host}:${port}`
    app.listen(port, `0.0.0.0`, () => {
        console.log(`Listening on port ${port} for ${host} at fqdn ${fqdn}`)
    })
});
let randomArr = []
let finalArrWithIds = []
let variable = 0
client.on('message', async message => {
    console.log(`fuck`);
    if(message.content === `$record`){
        mp3Paths = []
        finalArrWithIds = []
        let membersToScrape = Array.from(message.member.voice.channel.members.values());
        membersToScrape.forEach((member) => {
            if(member.id === `749250882830598235`) {
                console.log(`botid`);
            }
            else {
                finalArrWithIds.push(member.id)
            }
            
        })
        const randomNumber = Math.floor(Math.random() * 100)
        randomArr = []
        randomArr.push(randomNumber)
    }
   
    
    const generateSilentData = async (silentStream, memberID) => {
        console.log(`recordingnow`)
        while(recording) {
            if (!currently_recording[memberID]) {
                silentStream.push(silence_buffer);
            }
            await new Promise(r => setTimeout(r, 20));
        }
        return "done";
    }
    console.log(generateSilentData, `status`)
    function generateOutputFile(channelID, memberID) {
        const dir = `./recordings/${channelID}/${memberID}`;
        fs.ensureDirSync(dir);
        const fileName = `${dir}/${randomArr[0]}.aac`;
        console.log(`${fileName} ---------------------------`);
        return fs.createWriteStream(fileName);
    }
    
    if (!fs.existsSync("public")) {
        fs.mkdirSync("public");
    }
    app.use("/public", express.static("./public"));
  if (!message.guild) return;

  if (message.content === config.prefix + config.record_command) {
    if (recording) {
        message.reply("bot is already recording");
        return
    }
    if (message.member.voice.channel) {
        recording = true;
        const connection = await message.member.voice.channel.join();
        const dispatcher = connection.play('./audio.mp3');

        connection.on('speaking', (user, speaking) => {
            if (speaking.has('SPEAKING')) {
                currently_recording[user.id] = true;
            } else {
                currently_recording[user.id] = false;
            }
        })


        let members = Array.from(message.member.voice.channel.members.values());
        members.forEach((member) => {

            if (member.id != client.user.id) {
                let memberStream = connection.receiver.createStream(member, {mode : 'pcm', end : 'manual'})

                let outputFile = generateOutputFile(message.member.voice.channel.id, member.id);
                console.log(outputFile, `outputfile here`);
                mp3Paths.push(outputFile.path);
                    

                silence_stream = bufferToStream(new Uint8Array(0));
                generateSilentData(silence_stream, member.id).then(data => console.log(data));
                let combinedStream = mergeStream(silence_stream, memberStream);

                ffmpeg(combinedStream)
                    .inputFormat('s32le')
                    .audioFrequency(44100)
                    .audioChannels(2)
                    .on('error', (error) => {console.log(error)})
                    .audioCodec('aac')
                    .format('adts') 
                    .pipe(outputFile)
                    
            }
        })
    } else {
      message.reply('You need to join a voice channel first!');
    }
  }

  if (message.content === config.prefix + config.stop_command) {

    let date = new Date();
    let dd = String(date.getDate()).padStart(2, '0');
    let mm = String(date.getMonth() + 1).padStart(2, '0'); 
    let yyyy = date.getFullYear();
    date = mm + '/' + dd + '/' + yyyy;





    let currentVoiceChannel = message.member.voice.channel;
    if (currentVoiceChannel) {
        recording = false;
        await currentVoiceChannel.leave();

        let mergedOutputFolder = './recordings/' + message.member.voice.channel.id + `/${randomArr[0]}/`;
        fs.ensureDirSync(mergedOutputFolder);
        let file_name = `${randomArr[0]}` + '.aac';
        let mergedOutputFile = mergedOutputFolder + file_name;
    
        
    let download_path = message.member.voice.channel.id + `/${randomArr[0]}/` + file_name;

        let mixedOutput = new ffmpeg();
        console.log(mp3Paths, `mp3pathshere`);
        mp3Paths.forEach((mp3Path) => {
             mixedOutput.addInput(mp3Path);
            
        })
        console.log(mp3Paths);
        //mixedOutput.complexFilter('amix=inputs=2:duration=longest');
        mixedOutput.complexFilter('amix=inputs=' + mp3Paths.length + ':duration=longest');
        
        let processEmbed = new Discord.MessageEmbed().setTitle(`Audio Processing.`)
        processEmbed.addField(`Audio processing starting now..`, `Processing Audio`)
        processEmbed.setThumbnail(`https://media.discordapp.net/attachments/730811581046325348/748610998985818202/speaker.png`)
        processEmbed.setColor(` #00FFFF`)
        const processEmbedMsg = await message.channel.send(processEmbed)
        async function saveMp3(mixedData, outputMixed) {
            console.log(`${mixedData} MIXED `)
            
            
            
            return new Promise((resolve, reject) => {
                mixedData.on('error', reject).on('progress',
                async (progress) => {
                    
                    let processEmbedEdit = new Discord.MessageEmbed().setTitle(`Audio Processing.`)
                    processEmbedEdit.addField(`Processing: ${progress.targetSize} KB converted`, `Processing Audio`)
                    processEmbedEdit.setThumbnail(`https://media.discordapp.net/attachments/730811581046325348/748610998985818202/speaker.png`)
                    processEmbedEdit.setColor(` #00FFFF`)
                    processEmbedMsg.edit(processEmbedEdit)
                    console.log('Processing: ' + progress.targetSize + ' KB converted');
                }).on('end', () => {
                    console.log('Processing finished !');
                    resolve()
                }).saveToFile(outputMixed);
                console.log(`${outputMixed} IT IS HERE`);
            })
        }
        // mixedOutput.saveToFile(mergedOutputFile);
        await saveMp3(mixedOutput, mergedOutputFile);
        console.log(`${mixedOutput} IN HEREEEEEEE`);
        // We saved the recording, now copy the recording
        if (!fs.existsSync(`./public`)) {
            fs.mkdirSync(`./public`);
        }
        let sourceFile = `${__dirname}/recordings/${download_path}`
        console.log(`DOWNLOAD PATH HERE ${download_path}`)
        const guildName = message.guild.id;
        const serveExist = `/public/${guildName}`
        if (!fs.existsSync(`.${serveExist}`)) {
            fs.mkdirSync(`.${serveExist}`)
        }
        let destionationFile = `${__dirname}${serveExist}/${file_name}`

        let errorThrown = false
        try {
            fs.copySync(sourceFile, destionationFile);
        } catch (err) {
            errorThrown = true
            await message.channel.send(`Error: ${err.message}`)
        }
        const usersWithTag = finalArrWithIds.map(user => `\n <@${user}>`);
        let timeSpent = await getAudioDurationInSeconds(`public/${guildName}/${file_name}`)
        let timesSpentRound = Math.floor(timeSpent)
        let finalTimeSpent = timesSpentRound / 60
        let finalTimeForReal = Math.floor(finalTimeSpent)
        if(!errorThrown){
            //--------------------- server recording save START
            class GeneralRecords {
                constructor(generalLink, date, voice, time) {
                  this.generalLink = generalLink;
                  this.date = date;
                  this.note = `no note`;
                  this.voice = voice;
                  this.time = time
                }
              }
              let newGeneralRecordClassObject = new GeneralRecords(`${fqdn}/public/${guildName}/${file_name}`, date, usersWithTag, finalTimeForReal)
              let checkingServerRecord = await ServerRecords.exists({userid: `server`})
              if(checkingServerRecord === true){
                  existingServerRecord = await ServerRecords.findOne({userid: `server`})
                  existingServerRecord.content.push(newGeneralRecordClassObject)
                  await existingServerRecord.save()
              }
              if(checkingServerRecord === false){
                let serverRecord = new ServerRecords()
                serverRecord.userid = `server`
                serverRecord.content.push(newGeneralRecordClassObject)
                await serverRecord.save()
              }
              //--------------------- server recording save STOP
        }
        
        //--------------------- personal recording section START
        for( member of finalArrWithIds) {

        let personal_download_path = message.member.voice.channel.id + `/${member}/` + file_name;
        let sourceFilePersonal = `${__dirname}/recordings/${personal_download_path}`
        let destionationFilePersonal = `${__dirname}${serveExist}/${member}/${file_name}`
        await fs.copySync(sourceFilePersonal, destionationFilePersonal);
        const user = client.users.cache.get(member);
        console.log(user, `user here`);
        try {
            ffmpeg.setFfmpegPath(ffmpegInstaller.path);
          
            ffmpeg(`public/${guildName}/${member}/${file_name}`)
             .audioFilters('silenceremove=stop_periods=-1:stop_duration=1:stop_threshold=-90dB')
             .output(`public/${guildName}/${member}/personal-${file_name}`)
             .on(`end`, function () {
               console.log(`DONE`);
             })
             .on(`error`, function (error) {
               console.log(`An error occured` + error.message)
             })
             .run();
             
          }
          catch (error) {
          console.log(error)
          }
        

        // ----------------- SAVING PERSONAL RECORDING TO DATABASE START
        class PersonalRecords {
            constructor(generalLink, personalLink, date, time) {
              this.generalLink = generalLink;
              this.personalLink = personalLink;
              this.date = date;
              this.note = `no note`;
              this.time = time;
            }
          }
          let timeSpentPersonal = await getAudioDurationInSeconds(`public/${guildName}/${file_name}`)
          let timesSpentRoundPersonal = Math.floor(timeSpentPersonal)
          let finalTimeSpentPersonal = timesSpentRoundPersonal / 60
          let finalTimeForRealPersonal = Math.floor(finalTimeSpentPersonal)
          let newPersonalRecordClassObject = new PersonalRecords(`${fqdn}/public/${guildName}/${file_name}`, `${fqdn}/public/${guildName}/${member}/personal-${file_name}`, date, finalTimeForRealPersonal)

           let checkingUserRecord = await UserRecords.exists({userid: member})
              if(checkingUserRecord === true){
                  existingUserRecord = await UserRecords.findOne({userid: member})
                  existingUserRecord.content.push(newPersonalRecordClassObject)
                  await existingUserRecord.save()
              }
              if(checkingUserRecord === false){
                let newRecord = new UserRecords()
                newRecord.userid = member
                newRecord.content.push(newPersonalRecordClassObject)
                await newRecord.save()
              }


       
        // ----------------- SAVING PERSONAL RECORDING TO DATABASE END
       

        const endPersonalEmbed = new Discord.MessageEmbed().setTitle(`Your performance was amazing ! Review it here :D`)
        endPersonalEmbed.setColor('#9400D3')
        endPersonalEmbed.setThumbnail(`https://media.discordapp.net/attachments/730811581046325348/745381641324724294/vinyl.png`)
        endPersonalEmbed.addField(`1
  • I received connection refused error while trying to stream live video through RTMP with FFMPEG

    25 septembre 2020, par Femzy

    I am working on a nodeJs app that can send camera stream to third party plartform i.e Facebook and Youtube using the RTMP protoco ;.. It works well on my localhost but once i deploy to the server, it only give me errors. The error I get is below on this content..
Here is my codes

    


    server.js

    


    

    

    const child_process = require('child_process'); // To be used later for running FFmpeg
const express = require('express');
const http = require('http');
const WebSocketServer = require('ws').Server;

const app = express();
const server = http.createServer(app).listen(4000, () => {
  console.log('Listening...');
});

// Serve static files out of the www directory, where we will put our HTML page
app.use(express.static(__dirname + '/www'));


const wss = new WebSocketServer({
  server: server
});
wss.on('connection', (ws, req) => {
  
  
  
  const rtmpUrl = 'rtmp://a.rtmp.youtube.com/live2/MyStreamId';
  console.log('Target RTMP URL:', rtmpUrl);
  
  // Launch FFmpeg to handle all appropriate transcoding, muxing, and RTMP.
  // If 'ffmpeg' isn't in your path, specify the full path to the ffmpeg binary.
  const ffmpeg = child_process.spawn('ffmpeg', [
    // Facebook requires an audio track, so we create a silent one here.
    // Remove this line, as well as `-shortest`, if you send audio from the browser.
    //'-f', 'lavfi', '-i', 'anullsrc',
    
    // FFmpeg will read input video from STDIN
    '-i', '-',
    
    // Because we're using a generated audio source which never ends,
    // specify that we'll stop at end of other input.  Remove this line if you
    // send audio from the browser.
    //'-shortest',
    
    // If we're encoding H.264 in-browser, we can set the video codec to 'copy'
    // so that we don't waste any CPU and quality with unnecessary transcoding.
    // If the browser doesn't support H.264, set the video codec to 'libx264'
    // or similar to transcode it to H.264 here on the server.
    '-vcodec', 'copy',
    
    // AAC audio is required for Facebook Live.  No browser currently supports
    // encoding AAC, so we must transcode the audio to AAC here on the server.
    '-acodec', 'aac',
    
    // FLV is the container format used in conjunction with RTMP
    '-f', 'flv',
    
    // The output RTMP URL.
    // For debugging, you could set this to a filename like 'test.flv', and play
    // the resulting file with VLC.  Please also read the security considerations
    // later on in this tutorial.
    rtmpUrl 
  ]);
  
  // If FFmpeg stops for any reason, close the WebSocket connection.
  ffmpeg.on('close', (code, signal) => {
    console.log('FFmpeg child process closed, code ' + code + ', signal ' + signal);
    ws.terminate();
  });
  
  // Handle STDIN pipe errors by logging to the console.
  // These errors most commonly occur when FFmpeg closes and there is still
  // data to write.  If left unhandled, the server will crash.
  ffmpeg.stdin.on('error', (e) => {
    console.log('FFmpeg STDIN Error', e);
  });
  
  // FFmpeg outputs all of its messages to STDERR.  Let's log them to the console.
  ffmpeg.stderr.on('data', (data) => {
    console.log('FFmpeg STDERR:', data.toString());
  });

  // When data comes in from the WebSocket, write it to FFmpeg's STDIN.
  ws.on('message', (msg) => {
    console.log('DATA', msg);
    ffmpeg.stdin.write(msg);
  });
  
  // If the client disconnects, stop FFmpeg.
  ws.on('close', (e) => {
    ffmpeg.kill('SIGINT');
  });
  
});

    


    


    



    On the server.js file i create a websocket to receive stream data from the client side and then use FFMPEG to send the stream data over to youtube via the RTMP url

    


    Here is my client.js code

    


    

    

    const ws = new WebSocket(
             'wss://my-websocket-server.com'

        );
         ws.addEventListener('open', (e) => {
             console.log('WebSocket Open', e);
             drawVideosToCanvas();
             mediaStream = getMixedVideoStream(); // 30 FPS
             mediaRecorder = new MediaRecorder(mediaStream, {
               mimeType: 'video/webm;codecs=h264',
               //videoBitsPerSecond : 3000000000
               bitsPerSecond: 6000000
             });

             mediaRecorder.addEventListener('dataavailable', (e) => {
               ws.send(e.data);
             });
             mediaRecorder.onstop = function() {
              ws.close.bind(ws);
              isRecording = false;
              actionBtn.textContent = 'Start Streaming';
              actionBtn.onclick = startRecording;
             }
             mediaRecorder.onstart = function() {
              isRecording = true;
              actionBtn.textContent = 'Stop Streaming';
              actionBtn.onclick = stopRecording;
              screenShareBtn.onclick = startSharing;
              screenShareBtn.disabled = false;
             }
             //mediaRecorder.addEventListener('stop', ws.close.bind(ws));

             mediaRecorder.start(1000); // Start recording, and dump data every second

           });

    


    


    



    On my client.js file, i captured users camera and then open the websocket server to send the data to the server.. Every thing works fine on local host expect for when i deploy it to live server..
i am wondering if there is a bad configuration on the server.. The server is Centos 7.8 and the app was runing on Apache software
Here is how i configured the virtual host for the websocket domain

    


    

    

    ServerName my-websocket.com

  RewriteEngine on
  RewriteCond %{HTTP:Upgrade} websocket [NC]
  RewriteCond %{HTTP:Connection} upgrade [NC]
  RewriteRule .* "ws://127.0.0.1:3000/$1" [P,L]

  ProxyPass "/" "http://127.0.0.1:3000/$1"
  ProxyPassReverse "/" "http://127.0.0.1:3000/$1"
  ProxyRequests off

    


    


    



    I don't know much about server configuration but i just thought may be the configuration has to do with why FFMPEg can not open connection to RTMP protocol on the server.

    


    here is the error am getting

    


    

    

    FFmpeg STDERR: Input #0, lavfi, from &#x27;anullsrc&#x27;:&#xA;  Duration:&#xA;FFmpeg STDERR: N/A, start: 0.000000, bitrate: 705 kb/s&#xA;    Stream #0:0: Audio: pcm_u8, 44100 Hz, stereo, u8, 705 kb/s&#xA;&#xA;DATA <buffer 1a="1a">&#xA;DATA <buffer 45="45" df="df" a3="a3" 42="42" 86="86" 81="81" 01="01" f7="f7" f2="f2" 04="04" f3="f3" 08="08" 82="82" 88="88" 6d="6d" 61="61" 74="74" 72="72" 6f="6f" 73="73" 6b="6b" 87="87" 0442="0442" 85="85" 02="02" 18="18" 53="53" 80="80" 67="67" ff="ff" 53991="53991" more="more" bytes="bytes">&#xA;DATA <buffer 40="40" c1="c1" 81="81" 00="00" f0="f0" 80="80" 7b="7b" 83="83" 3e="3e" 3b="3b" 07="07" d6="d6" 4e="4e" 1c="1c" 11="11" b4="b4" 7f="7f" cb="cb" 5e="5e" 68="68" 9b="9b" d5="d5" 2a="2a" e3="e3" 06="06" c6="c6" f3="f3" 94="94" ff="ff" 29="29" 16="16" b2="b2" 60="60" 04ac="04ac" 37="37" fb="fb" 1a="1a" 15="15" ea="ea" 39="39" a0="a0" cd="cd" 02="02" b8="b8" 56206="56206" more="more" bytes="bytes">&#xA;FFmpeg STDERR: Input #1, matroska,webm, from &#x27;pipe:&#x27;:&#xA;  Metadata:&#xA;    encoder         :&#xA;FFmpeg STDERR: Chrome&#xA;  Duration: N/A, start: 0.000000, bitrate: N/A&#xA;    Stream #1:0(eng): Audio: opus, 48000 Hz, mono, fltp (default)&#xA;    Stream #1:1(eng): Video: h264 (Constrained Baseline), yuv420p(progressive), 1366x768, SAR 1:1 DAR 683:384, 30.30 fps, 30 tbr, 1k tbn, 60 tbc (default)&#xA;&#xA;FFmpeg STDERR: [tcp @ 0xe5fac0] Connection to tcp://a.rtmp.youtube.com:1935 failed (Connection refused), trying next address&#xA;[rtmp @ 0xe0fb80] Cannot open connection tcp://a.rtmp.youtube.com:1935&#xA;&#xA;FFmpeg STDERR: rtmp://a.rtmp.youtube.com/live2/mystreamid: Network is unreachable&#xA;&#xA;FFmpeg child process closed, code 1, signal null</buffer></buffer></buffer>

    &#xD;&#xA;

    &#xD;&#xA;

    &#xD;&#xA;&#xA;

    I will really appreciate if I could get some insight on what may be causing this issue or what i can do to solve it..Thanks in advance..

    &#xA;

  • Use FFMPEG mux flv and send rtmp on IOS

    5 novembre 2020, par downloss

    I would like to use iphone camera & microphone to capture information pushed out through FFMPEG RTMP Streaming

    &#xA;&#xA;

    The following Function capture information on IOS

    &#xA;&#xA;

    - (void)captureOutput:(AVCaptureOutput *)captureOutput  didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection&#xA;{    &#xA;    if (connection == videoCaptureConnection)&#xA;    {&#xA;        [manager264 encoderToH264:sampleBuffer];&#xA;    }&#xA;    else if (connection == audioCaptureConnection)&#xA;    {&#xA;        [manager264 encoderToMP3:sampleBuffer];&#xA;    }&#xA;}&#xA;

    &#xA;&#xA;

    Initialization FFMPEG

    &#xA;&#xA;

    - (int)setX264Resource&#xA;{&#xA;    Global_Variables_VVV = (AppDelegate *)[[UIApplication sharedApplication] delegate];&#xA;    avformat_network_init();&#xA;    av_register_all();&#xA;&#xA;    pFormatCtx = avformat_alloc_context();&#xA;    avformat_alloc_output_context2(&amp;pFormatCtx, NULL, "flv", out_file);&#xA;    fmt = pFormatCtx->oformat;&#xA;&#xA;    //Open output URL&#xA;    if (avio_open(&amp;pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) &lt; 0)&#xA;    {&#xA;        printf("Failed to open output file! \n");&#xA;        return -1;&#xA;    }&#xA;&#xA;    /* Add the audio and video streams using the default format codecs&#xA;     * and initialize the codecs. */&#xA;    video_st = NULL;&#xA;    audio_st = NULL;&#xA;   if (fmt->video_codec != AV_CODEC_ID_NONE) {&#xA;        video_st = add_stream(pFormatCtx, &amp;pCodec, AV_CODEC_ID_H264);&#xA;    }&#xA;   if (fmt->audio_codec != AV_CODEC_ID_NONE) {&#xA;        audio_st = add_stream(pFormatCtx, &amp;aCodec, AV_CODEC_ID_MP3);&#xA;    }&#xA;&#xA;    /* Now that all the parameters are set, we can open the audio and&#xA;     * video codecs and allocate the necessary encode buffers. */&#xA;    if (video_st)&#xA;        [self open_video:pFormatCtx avcodec:pCodec avstream:video_st];&#xA;&#xA;    if (audio_st)&#xA;        [self open_audio:pFormatCtx avcodec:aCodec avstream:audio_st];&#xA;&#xA;    // Show some Information&#xA;    av_dump_format(pFormatCtx, 0, out_file, 1);&#xA;&#xA;    //Write File Header&#xA;    avformat_write_header(pFormatCtx, NULL);&#xA;&#xA;    av_new_packet(&amp;pkt, picture_size);&#xA;    av_new_packet(&amp;pkt2, picture_size);&#xA;&#xA;    AVCodecContext *c = video_st->codec;&#xA;&#xA;    y_size = c->width * c->height;&#xA;&#xA;    if (pFrame)&#xA;        pFrame->pts = 0;&#xA;&#xA;    if(aFrame)&#xA;    {&#xA;        aFrame->pts = 0;&#xA;    }&#xA;&#xA;    return 0;&#xA;}&#xA;&#xA;static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)&#xA;{&#xA;    AVCodecContext *c;&#xA;    AVStream *st;&#xA;    /* find the encoder */&#xA;    *codec = avcodec_find_encoder(codec_id);&#xA;    if (!(*codec))&#xA;    {&#xA;        NSLog(@"Could not find encoder for &#x27;%s&#x27;\n",&#xA;          avcodec_get_name(codec_id));&#xA;    }&#xA;    st = avformat_new_stream(oc, *codec);&#xA;    if (!st)&#xA;    {&#xA;        NSLog(@"Could not allocate stream\n");&#xA;    }&#xA;    st->id = oc->nb_streams-1;&#xA;    c = st->codec;&#xA;    switch ((*codec)->type)&#xA;    {&#xA;        case AVMEDIA_TYPE_AUDIO:&#xA;            c->codec_id = AV_CODEC_ID_MP3;&#xA;            c->codec_type = AVMEDIA_TYPE_AUDIO;&#xA;            c->channels = 1;&#xA;&#xA;            c->sample_fmt = AV_SAMPLE_FMT_S16P;&#xA;            c->bit_rate = 128000;&#xA;            c->sample_rate = 44100;&#xA;            c->channel_layout = AV_CH_LAYOUT_MONO;&#xA;            break;&#xA;        case AVMEDIA_TYPE_VIDEO:&#xA;            c->codec_id = AV_CODEC_ID_H264;&#xA;            c->codec_type=AVMEDIA_TYPE_VIDEO;&#xA;            /* Resolution must be a multiple of two. */&#xA;            c->width    = 720;&#xA;            c->height   = 1280;&#xA;            /* timebase: This is the fundamental unit of time (in seconds) in terms&#xA;             * of which frame timestamps are represented. For fixed-fps content,&#xA;             * timebase should be 1/framerate and timestamp increments should be&#xA;             * identical to 1. */&#xA;            c->time_base.den = 30;&#xA;            c->time_base.num = 1;&#xA;            c->gop_size      = 15; /* emit one intra frame every twelve frames at most */&#xA;            c->pix_fmt       = PIX_FMT_YUV420P;&#xA;            c->max_b_frames = 0;&#xA;            c->bit_rate = 3000000;&#xA;            c->qmin = 10;&#xA;            c->qmax = 51;&#xA;&#xA;            break;&#xA;        default:&#xA;            break;&#xA;    }&#xA;    /* Some formats want stream headers to be separate. */&#xA;    if (oc->oformat->flags &amp; AVFMT_GLOBALHEADER)&#xA;        c->flags |= CODEC_FLAG_GLOBAL_HEADER;&#xA;    return st;&#xA;}&#xA;

    &#xA;&#xA;

    SampleBuffer will turn into H264 and pushed out RTMP Streaming

    &#xA;&#xA;

    - (void)encoderToH264:(CMSampleBufferRef)sampleBuffer&#xA;{&#xA;    CVPixelBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);&#xA;    if (CVPixelBufferLockBaseAddress(imageBuffer, 0) == kCVReturnSuccess)&#xA;    {&#xA;        UInt8 *bufferbasePtr = (UInt8 *)CVPixelBufferGetBaseAddress(imageBuffer);&#xA;        UInt8 *bufferPtr = (UInt8 *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer,0);&#xA;        UInt8 *bufferPtr1 = (UInt8 *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer,1);&#xA;        size_t buffeSize = CVPixelBufferGetDataSize(imageBuffer);&#xA;        size_t width = CVPixelBufferGetWidth(imageBuffer);&#xA;        size_t height = CVPixelBufferGetHeight(imageBuffer);&#xA;        size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);&#xA;        size_t bytesrow0 = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer,0);&#xA;        size_t bytesrow1  = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer,1);&#xA;        size_t bytesrow2 = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer,2);&#xA;        UInt8 *yuv420_data = (UInt8 *)malloc(width * height *3/ 2); // buffer to store YUV with layout YYYYYYYYUUVV&#xA;&#xA;&#xA;        /* convert NV12 data to YUV420*/&#xA;        UInt8 *pY = bufferPtr ;&#xA;        UInt8 *pUV = bufferPtr1;&#xA;        UInt8 *pU = yuv420_data &#x2B; width*height;&#xA;        UInt8 *pV = pU &#x2B; width*height/4;&#xA;        for(int i =0;i/Read raw YUV data&#xA;        picture_buf = yuv420_data;&#xA;        pFrame->data[0] = picture_buf;              // Y&#xA;        pFrame->data[1] = picture_buf&#x2B; y_size;      // U&#xA;        pFrame->data[2] = picture_buf&#x2B; y_size*5/4;  // V&#xA;&#xA;        int got_picture = 0;&#xA;&#xA;        // Encode&#xA;        pFrame->width = 720;&#xA;        pFrame->height = 1280;&#xA;        pFrame->format = PIX_FMT_YUV420P;&#xA;&#xA;        AVCodecContext *c = video_st->codec;&#xA;        int ret = avcodec_encode_video2(c, &amp;pkt, pFrame, &amp;got_picture);&#xA;        if(ret &lt; 0)&#xA;        {&#xA;            printf("Failed to encode! \n");&#xA;        }&#xA;&#xA;        if (got_picture==1)&#xA;        {&#xA;            /* Compute current audio and video time. */&#xA;            video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0;&#xA;            pFrame->pts &#x2B;= av_rescale_q(1, video_st->codec->time_base, video_st->time_base);&#xA;&#xA;            if(pkt.size != 0)&#xA;            {&#xA;                printf("Succeed to encode frame: %5lld\tsize:%5d\n", pFrame->pts, pkt.size);&#xA;                pkt.stream_index = video_st->index;&#xA;                ret = av_write_frame(pFormatCtx, &amp;pkt);&#xA;                av_free_packet(&amp;pkt);&#xA;            }&#xA;        }&#xA;        free(yuv420_data);&#xA;    }&#xA;    CVPixelBufferUnlockBaseAddress(imageBuffer, 0);&#xA;}&#xA;

    &#xA;&#xA;

    SampleBuffer will turn into MP3 and pushed out RTMP Streaming

    &#xA;&#xA;

    -(void)encoderToMP3:(CMSampleBufferRef)sampleBuffer&#xA;{&#xA;    CMSampleTimingInfo timing_info;&#xA;    CMSampleBufferGetSampleTimingInfo(sampleBuffer, 0, &amp;timing_info);&#xA;    double  pts=0;&#xA;    double  dts=0;&#xA;    AVCodecContext *c;&#xA;    int got_packet, ret;&#xA;    c = audio_st->codec;&#xA;    CMItemCount numSamples = CMSampleBufferGetNumSamples(sampleBuffer);&#xA;&#xA;    NSUInteger channelIndex = 0;&#xA;&#xA;    CMBlockBufferRef audioBlockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);&#xA;&#xA;    size_t audioBlockBufferOffset = (channelIndex * numSamples * sizeof(SInt16));&#xA;    size_t lengthAtOffset = 0;&#xA;    size_t totalLength = 0;&#xA;    SInt16 *samples = NULL;&#xA;    CMBlockBufferGetDataPointer(audioBlockBuffer, audioBlockBufferOffset, &amp;lengthAtOffset, &amp;totalLength, (char **)(&amp;samples));&#xA;&#xA;    const AudioStreamBasicDescription *audioDescription = CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer));&#xA;&#xA;    SwrContext *swr = swr_alloc();&#xA;&#xA;    int in_smprt = (int)audioDescription->mSampleRate;&#xA;    av_opt_set_int(swr, "in_channel_layout",  AV_CH_LAYOUT_MONO, 0);&#xA;    av_opt_set_int(swr, "out_channel_layout", audio_st->codec->channel_layout,  0);&#xA;&#xA;    av_opt_set_int(swr, "in_channel_count", audioDescription->mChannelsPerFrame,  0);&#xA;    av_opt_set_int(swr, "out_channel_count", 1,  0);&#xA;&#xA;    av_opt_set_int(swr, "out_channel_layout", audio_st->codec->channel_layout,  0);&#xA;    av_opt_set_int(swr, "in_sample_rate",     audioDescription->mSampleRate,0);&#xA;&#xA;    av_opt_set_int(swr, "out_sample_rate",    audio_st->codec->sample_rate,0);&#xA;&#xA;    av_opt_set_sample_fmt(swr, "in_sample_fmt",  AV_SAMPLE_FMT_S16, 0);&#xA;&#xA;    av_opt_set_sample_fmt(swr, "out_sample_fmt", audio_st->codec->sample_fmt,  0);&#xA;&#xA;    swr_init(swr);&#xA;    uint8_t **input = NULL;&#xA;    int src_linesize;&#xA;    int in_samples = (int)numSamples;&#xA;    ret = av_samples_alloc_array_and_samples(&amp;input, &amp;src_linesize, audioDescription->mChannelsPerFrame, in_samples, AV_SAMPLE_FMT_S16P, 0);&#xA;&#xA;    *input=(uint8_t*)samples;&#xA;    uint8_t *output=NULL;&#xA;&#xA;    int out_samples = av_rescale_rnd(swr_get_delay(swr, in_smprt) &#x2B;in_samples, (int)audio_st->codec->sample_rate, in_smprt, AV_ROUND_UP);&#xA;&#xA;    av_samples_alloc(&amp;output, NULL, audio_st->codec->channels, out_samples, audio_st->codec->sample_fmt, 0);&#xA;    in_samples = (int)numSamples;&#xA;    out_samples = swr_convert(swr, &amp;output, out_samples, (const uint8_t **)input, in_samples);&#xA;&#xA;    aFrame->nb_samples =(int) out_samples;&#xA;&#xA;    ret = avcodec_fill_audio_frame(aFrame, audio_st->codec->channels, audio_st->codec->sample_fmt,&#xA;                                   (uint8_t *)output,&#xA;                                   (int) out_samples *&#xA;                                   av_get_bytes_per_sample(audio_st->codec->sample_fmt) *&#xA;                                   audio_st->codec->channels, 1);&#xA;    if (ret &lt; 0)&#xA;    {&#xA;        fprintf(stderr, "Error fill audio frame: %s\n", av_err2str(ret));&#xA;    }&#xA;    aFrame->channel_layout = audio_st->codec->channel_layout;&#xA;    aFrame->channels=audio_st->codec->channels;&#xA;    aFrame->sample_rate= audio_st->codec->sample_rate;&#xA;&#xA;    if (timing_info.presentationTimeStamp.timescale!=0)&#xA;        pts=(double) timing_info.presentationTimeStamp.value/timing_info.presentationTimeStamp.timescale;&#xA;&#xA;&#xA;    aFrame->pts = pts*audio_st->time_base.den;&#xA;    aFrame->pts = av_rescale_q(aFrame->pts, audio_st->time_base, audio_st->codec->time_base);&#xA;&#xA;    ret = avcodec_encode_audio2(c, &amp;pkt2, aFrame, &amp;got_packet);&#xA;&#xA;    if (ret &lt; 0)&#xA;    {&#xA;        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));&#xA;    }&#xA;    swr_free(&amp;swr);&#xA;&#xA;    if (got_packet)&#xA;    {&#xA;        pkt2.stream_index = audio_st->index;        &#xA;&#xA;        // Write the compressed frame to the media file.&#xA;&#xA;        ret = av_interleaved_write_frame(pFormatCtx, &amp;pkt2);&#xA;        if (ret != 0)&#xA;        {&#xA;            fprintf(stderr, "Error while writing audio frame: %s\n", av_err2str(ret));&#xA;            av_free_packet(&amp;pkt2);&#xA;        }&#xA;    }&#xA;}&#xA;

    &#xA;&#xA;

    Soon "Broken pipe" problem occurs after execution.&#xA;PTS is currently feeling is not adjusted, but do not know how to adjust the PTS.

    &#xA;&#xA;

    2016-03-09 16:57:41.058 PoliceCamPlayer[1004:193465] recordVideo....&#xA;[libx264 @ 0x12f8b6e00] using cpu capabilities: ARMv8 NEON&#xA;[libx264 @ 0x12f8b6e00] profile Constrained Baseline, level 3.1&#xA;[libx264 @ 0x12f8b6e00] 264 - core 148 - H.264/MPEG-4 AVC codec - Copyleft 2003-2016 - http://www.videolan.org/x264.html - options: cabac=0 ref=1 deblock=1:0:0 analyse=0x1:0x111 me=hex subme=2 psy=1 psy_rd=1.00:0.00 mixed_ref=0 me_range=16 chroma_me=1 trellis=0 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=0 threads=2 lookahead_threads=2 sliced_threads=1 slices=2 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=0 weightp=0 keyint=15 keyint_min=1 scenecut=40 intra_refresh=0 rc=abr mbtree=0 bitrate=3000 ratetol=1.0 qcomp=0.60 qpmin=25 qpmax=51 qpstep=4 ip_ratio=1.40 aq=1:1.00&#xA;Output #0, flv, to &#x27;rtmp://XXX.XX.XXX.XX/myapp/jackal&#x27;:&#xA;    Stream #0:0: Video: h264 (libx264), yuv420p, 720x1280, q=25-51, 3000 kb/s, 23 tbc&#xA;    Stream #0:1: Audio: mp3 (libmp3lame), 44100 Hz, mono, s16p, 64 kb/s&#xA;[flv @ 0x12f8b5400] Using AVStream.codec.time_base as a timebase hint to the muxer is deprecated. Set AVStream.time_base instead.&#xA;[flv @ 0x12f8b5400] Using AVStream.codec.time_base as a timebase hint to the muxer is deprecated. Set AVStream.time_base instead.&#xA;[libx264 @ 0x12f8b6e00] Provided packet is too small, needs to be 33468&#xA;Failed to encode! &#xA;Audio_pts:4154432515 pts_time:4.15443e&#x2B;06 dts:4154432515 dts_time:4.15443e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:43 pts_time:0.043 dts:43 dts_time:0.043 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154433667 pts_time:4.15443e&#x2B;06 dts:4154433667 dts_time:4.15443e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154434854 pts_time:4.15443e&#x2B;06 dts:4154434854 dts_time:4.15443e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:86 pts_time:0.086 dts:86 dts_time:0.086 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154435996 pts_time:4.15444e&#x2B;06 dts:4154435996 dts_time:4.15444e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154437138 pts_time:4.15444e&#x2B;06 dts:4154437138 dts_time:4.15444e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:129 pts_time:0.129 dts:129 dts_time:0.129 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154438281 pts_time:4.15444e&#x2B;06 dts:4154438281 dts_time:4.15444e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:172 pts_time:0.172 dts:172 dts_time:0.172 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154439467 pts_time:4.15444e&#x2B;06 dts:4154439467 dts_time:4.15444e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:215 pts_time:0.215 dts:215 dts_time:0.215 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154440609 pts_time:4.15444e&#x2B;06 dts:4154440609 dts_time:4.15444e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154441752 pts_time:4.15444e&#x2B;06 dts:4154441752 dts_time:4.15444e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:258 pts_time:0.258 dts:258 dts_time:0.258 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154442884 pts_time:4.15444e&#x2B;06 dts:4154442884 dts_time:4.15444e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154444071 pts_time:4.15444e&#x2B;06 dts:4154444071 dts_time:4.15444e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:301 pts_time:0.301 dts:301 dts_time:0.301 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154445213 pts_time:4.15445e&#x2B;06 dts:4154445213 dts_time:4.15445e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154446355 pts_time:4.15445e&#x2B;06 dts:4154446355 dts_time:4.15445e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:344 pts_time:0.344 dts:344 dts_time:0.344 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154447498 pts_time:4.15445e&#x2B;06 dts:4154447498 dts_time:4.15445e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:387 pts_time:0.387 dts:387 dts_time:0.387 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154448640 pts_time:4.15445e&#x2B;06 dts:4154448640 dts_time:4.15445e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154449826 pts_time:4.15445e&#x2B;06 dts:4154449826 dts_time:4.15445e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:430 pts_time:0.43 dts:430 dts_time:0.43 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154450969 pts_time:4.15445e&#x2B;06 dts:4154450969 dts_time:4.15445e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154452101 pts_time:4.15445e&#x2B;06 dts:4154452101 dts_time:4.15445e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;...................&#xA;...................&#xA;...................&#xA;Video_pts:4343 pts_time:4.343 dts:4343 dts_time:4.343 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154622619 pts_time:4.15462e&#x2B;06 dts:4154622619 dts_time:4.15462e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:4386 pts_time:4.386 dts:4386 dts_time:4.386 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154623761 pts_time:4.15462e&#x2B;06 dts:4154623761 dts_time:4.15462e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154624903 pts_time:4.15462e&#x2B;06 dts:4154624903 dts_time:4.15462e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Audio_pts:4154626090 pts_time:4.15463e&#x2B;06 dts:4154626090 dts_time:4.15463e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:4429 pts_time:4.429 dts:4429 dts_time:4.429 duration:0 duration_time:0 stream_index:0&#xA;Audio_pts:4154627222 pts_time:4.15463e&#x2B;06 dts:4154627222 dts_time:4.15463e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Video_pts:4472 pts_time:4.472 dts:4472 dts_time:4.472 duration:0 duration_time:0 stream_index:0&#xA;Error while writing audio frame: Broken pipe&#xA;Audio_pts:4154628365 pts_time:4.15463e&#x2B;06 dts:4154628365 dts_time:4.15463e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Error while writing audio frame: Broken pipe&#xA;Audio_pts:4154629507 pts_time:4.15463e&#x2B;06 dts:4154629507 dts_time:4.15463e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Error while writing audio frame: Broken pipe&#xA;Audio_pts:4154630693 pts_time:4.15463e&#x2B;06 dts:4154630693 dts_time:4.15463e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Error while writing audio frame: Broken pipe&#xA;Audio_pts:4154631836 pts_time:4.15463e&#x2B;06 dts:4154631836 dts_time:4.15463e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;Error while writing audio frame: Broken pipe&#xA;Audio_pts:4154632978 pts_time:4.15463e&#x2B;06 dts:4154632978 dts_time:4.15463e&#x2B;06 duration:1152 duration_time:1.152 stream_index:1&#xA;.......................&#xA;.......................&#xA;.......................&#xA;2016-03-09 16:57:49.345 PoliceCamPlayer[1004:193465] stopRecord!!!&#xA;Video_pts:7783 pts_time:7.783 dts:7783 dts_time:7.783 duration:0 duration_time:0 stream_index:0&#xA;[flv @ 0x12f8b5400] Failed to update header with correct duration.&#xA;[flv @ 0x12f8b5400] Failed to update header with correct filesize.&#xA;[libx264 @ 0x12f8b6e00] frame I:28    Avg QP:25.36  size: 24181&#xA;[libx264 @ 0x12f8b6e00] frame P:154   Avg QP:25.34  size:  6603&#xA;[libx264 @ 0x12f8b6e00] mb I  I16..4: 80.9%  0.0% 19.1%&#xA;[libx264 @ 0x12f8b6e00] mb P  I16..4:  5.9%  0.0%  0.2%  P16..4: 28.2%  4.4%  1.0%  0.0%  0.0%    skip:60.2%&#xA;[libx264 @ 0x12f8b6e00] final ratefactor: 16.70&#xA;[libx264 @ 0x12f8b6e00] coded y,uvDC,uvAC intra: 35.8% 9.3% 0.4% inter: 8.8% 1.6% 0.0%&#xA;[libx264 @ 0x12f8b6e00] i16 v,h,dc,p: 28% 26% 26% 21%&#xA;[libx264 @ 0x12f8b6e00] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 13% 26% 25%  3%  7%  4%  5%  3% 13%&#xA;[libx264 @ 0x12f8b6e00] i8c dc,h,v,p: 85%  9%  5%  0%&#xA;[libx264 @ 0x12f8b6e00] kb/s:1712.63&#xA;

    &#xA;