Recherche avancée

Médias (0)

Mot : - Tags -/logo

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (1)

  • Selection of projects using MediaSPIP

    2 mai 2011, par

    The examples below are representative elements of MediaSPIP specific uses for specific projects.
    MediaSPIP farm @ Infini
    The non profit organizationInfini develops hospitality activities, internet access point, training, realizing innovative projects in the field of information and communication technologies and Communication, and hosting of websites. It plays a unique and prominent role in the Brest (France) area, at the national level, among the half-dozen such association. Its members (...)

Sur d’autres sites (987)

  • ADD Image overlay to ffmpeg video stream

    1er juillet 2017, par Chris

    I am new to ffmpeg and want to add an HUD to the video stream, so a few questions.

    1. What file do I need to edit.
    2. What do I need to do to achieve this.

    Thanks in advance. Also I am VERY new to all of this, I will need instructions step by step

    I saw other questions saying to add this : ffmpeg -n -i video.mp4 -i logo.png -filter_complex "[0:v]setsar=sar=1[v];[v][1]blend=all_mode='overlay':all_opacity=0.7" -movflags +faststart tmb/video.mp4

    But I dont know where to put it, i entered it in the terminal and got this :

    pi@raspberrypi:~ $ ffmpeg -n -i video.mp4 -i logo.png -filter_complex "[0:v]setsar=sar=1[v];[v][1]blend=all_mode='overlay':all_opacity=0.7" -movflags +faststart tmb/video.mp4
    ffmpeg version N-86215-gb5228e4 Copyright (c) 2000-2017 the FFmpeg developers
     built with gcc 4.9.2 (Raspbian 4.9.2-10)
     configuration: --arch=armel --target-os=linux --enable-gpl --enable-libx264 --enable-nonfree --extra-libs=-ldl
     libavutil      55. 63.100 / 55. 63.100
     libavcodec     57. 96.101 / 57. 96.101
     libavformat    57. 72.101 / 57. 72.101
     libavdevice    57.  7.100 / 57.  7.100
     libavfilter     6. 90.100 /  6. 90.100
     libswscale      4.  7.101 /  4.  7.101
     libswresample   2.  8.100 /  2.  8.100
     libpostproc    54.  6.100 / 54.  6.100
    video.mp4: No such file or directory

    I dont understand what i am supposed to do with the video.mp4 ?

    HERE IS THE SCRIPT THAT SENDS THE VIDEO.

    import subprocess
    import shlex
    import re
    import os
    import time
    import urllib2
    import platform
    import json
    import sys
    import base64
    import random


    import argparse

    parser = argparse.ArgumentParser(description='robot control')
    parser.add_argument('camera_id')
    parser.add_argument('video_device_number', default=0, type=int)
    parser.add_argument('--kbps', default=450, type=int)
    parser.add_argument('--brightness', default=75, type=int, help='camera brightness')
    parser.add_argument('--contrast', default=75, type=int, help='camera contrast')
    parser.add_argument('--saturation', default=15, type=int, help='camera saturation')
    parser.add_argument('--rotate180', default=False, type=bool, help='rotate image 180 degrees')
    parser.add_argument('--env', default="prod")



    args = parser.parse_args()



    server = "runmyrobot.com"
    #server = "52.52.213.92"


    from socketIO_client import SocketIO, LoggingNamespace

    # enable raspicam driver in case a raspicam is being used
    os.system("sudo modprobe bcm2835-v4l2")


    if args.env == "dev":
       print "using dev port 8122"
       port = 8122
    elif args.env == "prod":
       print "using prod port 8022"
       port = 8022
    else:
       print "invalid environment"
       sys.exit(0)


    print "initializing socket io"
    print "server:", server
    print "port:", port
    socketIO = SocketIO(server, port, LoggingNamespace)
    print "finished initializing socket io"

    #ffmpeg -f qtkit -i 0 -f mpeg1video -b 400k -r 30 -s 320x240 http://52.8.81.124:8082/hello/320/240/


    def onHandleCameraCommand(*args):
       #thread.start_new_thread(handle_command, args)
       print args


    socketIO.on('command_to_camera', onHandleCameraCommand)


    def onHandleTakeSnapshotCommand(*args):
       print "taking snapshot"
       inputDeviceID = streamProcessDict['device_answer']
       snapShot(platform.system(), inputDeviceID)
       with open ("snapshot.jpg", 'rb') as f:
           data = f.read()
       print "emit"

       socketIO.emit('snapshot', {'image':base64.b64encode(data)})

    socketIO.on('take_snapshot_command', onHandleTakeSnapshotCommand)


    def randomSleep():
       """A short wait is good for quick recovery, but sometimes a longer delay is needed or it will just keep trying and failing short intervals, like because the system thinks the port is still in use and every retry makes the system think it's still in use. So, this has a high likelihood of picking a short interval, but will pick a long one sometimes."""

       timeToWait = random.choice((0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 5))
       print "sleeping", timeToWait
       time.sleep(timeToWait)



    def getVideoPort():


       url = 'http://%s/get_video_port/%s' % (server, cameraIDAnswer)


       for retryNumber in range(2000):
           try:
               print "GET", url
               response = urllib2.urlopen(url).read()
               break
           except:
               print "could not open url ", url
               time.sleep(2)

       return json.loads(response)['mpeg_stream_port']

    def getAudioPort():


       url = 'http://%s/get_audio_port/%s' % (server, cameraIDAnswer)


       for retryNumber in range(2000):
           try:
               print "GET", url
               response = urllib2.urlopen(url).read()
               break
           except:
               print "could not open url ", url
               time.sleep(2)

       return json.loads(response)['audio_stream_port']



    def runFfmpeg(commandLine):

       print commandLine
       ffmpegProcess = subprocess.Popen(shlex.split(commandLine))
       print "command started"

       return ffmpegProcess



    def handleDarwin(deviceNumber, videoPort, audioPort):


       p = subprocess.Popen(["ffmpeg", "-list_devices", "true", "-f", "qtkit", "-i", "dummy"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)

       out, err = p.communicate()

       print err

       deviceAnswer = raw_input("Enter the number of the camera device for your robot from the list above: ")
       commandLine = 'ffmpeg -f qtkit -i %s -f mpeg1video -b 400k -r 30 -s 320x240 http://%s:%s/hello/320/240/' % (deviceAnswer, server, videoPort)

       process = runFfmpeg(commandLine)

       return {'process': process, 'device_answer': deviceAnswer}


    def handleLinux(deviceNumber, videoPort, audioPort):

       print "sleeping to give the camera time to start working"
       randomSleep()
       print "finished sleeping"


       #p = subprocess.Popen(["ffmpeg", "-list_devices", "true", "-f", "qtkit", "-i", "dummy"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
       #out, err = p.communicate()
       #print err


       os.system("v4l2-ctl -c brightness={brightness} -c contrast={contrast} -c saturation={saturation}".format(brightness=args.brightness,
                                                                                                                contrast=args.contrast,
                                                                                                                saturation=args.saturation))


       if deviceNumber is None:
           deviceAnswer = raw_input("Enter the number of the camera device for your robot: ")
       else:
           deviceAnswer = str(deviceNumber)


       #commandLine = '/usr/local/bin/ffmpeg -s 320x240 -f video4linux2 -i /dev/video%s -f mpeg1video -b 1k -r 20 http://runmyrobot.com:%s/hello/320/240/' % (deviceAnswer, videoPort)
       #commandLine = '/usr/local/bin/ffmpeg -s 640x480 -f video4linux2 -i /dev/video%s -f mpeg1video -b 150k -r 20 http://%s:%s/hello/640/480/' % (deviceAnswer, server, videoPort)
       # For new JSMpeg
       #commandLine = '/usr/local/bin/ffmpeg -f v4l2 -framerate 25 -video_size 640x480 -i /dev/video%s -f mpegts -codec:v mpeg1video -s 640x480 -b:v 250k -bf 0 http://%s:%s/hello/640/480/' % (deviceAnswer, server, videoPort) # ClawDaddy
       #commandLine = '/usr/local/bin/ffmpeg -s 1280x720 -f video4linux2 -i /dev/video%s -f mpeg1video -b 1k -r 20 http://runmyrobot.com:%s/hello/1280/720/' % (deviceAnswer, videoPort)


       if args.rotate180:
           rotationOption = "-vf transpose=2,transpose=2"
       else:
           rotationOption = ""

       # video with audio
       videoCommandLine = '/usr/local/bin/ffmpeg -f v4l2 -framerate 25 -video_size 640x480 -i /dev/video%s %s -f mpegts -codec:v mpeg1video -s 640x480 -b:v %dk -bf 0 -muxdelay 0.001 http://%s:%s/hello/640/480/' % (deviceAnswer, rotationOption, args.kbps, server, videoPort)
       audioCommandLine = '/usr/local/bin/ffmpeg -f alsa -ar 44100 -ac 1 -i hw:1 -f mpegts -codec:a mp2 -b:a 32k -muxdelay 0.001 http://%s:%s/hello/640/480/' % (server, audioPort)


       print videoCommandLine
       print audioCommandLine

       videoProcess = runFfmpeg(videoCommandLine)
       audioProcess = runFfmpeg(audioCommandLine)

       return {'video_process': videoProcess, 'audioProcess': audioProcess, 'device_answer': deviceAnswer}



    def handleWindows(deviceNumber, videoPort):

       p = subprocess.Popen(["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", "dummy"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)


       out, err = p.communicate()
       lines = err.split('\n')

       count = 0

       devices = []

       for line in lines:

           #if "]  \"" in line:
           #    print "line:", line

           m = re.search('.*\\"(.*)\\"', line)
           if m != None:
               #print line
               if m.group(1)[0:1] != '@':
                   print count, m.group(1)
                   devices.append(m.group(1))
                   count += 1


       if deviceNumber is None:
           deviceAnswer = raw_input("Enter the number of the camera device for your robot from the list above: ")
       else:
           deviceAnswer = str(deviceNumber)

       device = devices[int(deviceAnswer)]
       commandLine = 'ffmpeg -s 640x480 -f dshow -i video="%s" -f mpegts -codec:v mpeg1video -b 200k -r 20 http://%s:%s/hello/640/480/' % (device, server, videoPort)


       process = runFfmpeg(commandLine)

       return {'process': process, 'device_answer': device}



    def handleWindowsScreenCapture(deviceNumber, videoPort):

       p = subprocess.Popen(["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", "dummy"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)


       out, err = p.communicate()

       lines = err.split('\n')

       count = 0

       devices = []

       for line in lines:

           #if "]  \"" in line:
           #    print "line:", line

           m = re.search('.*\\"(.*)\\"', line)
           if m != None:
               #print line
               if m.group(1)[0:1] != '@':
                   print count, m.group(1)
                   devices.append(m.group(1))
                   count += 1


       if deviceNumber is None:
           deviceAnswer = raw_input("Enter the number of the camera device for your robot from the list above: ")
       else:
           deviceAnswer = str(deviceNumber)



       device = devices[int(deviceAnswer)]
       commandLine = 'ffmpeg -f dshow -i video="screen-capture-recorder" -vf "scale=640:480" -f mpeg1video -b 50k -r 20 http://%s:%s/hello/640/480/' % (server, videoPort)

       print "command line:", commandLine

       process = runFfmpeg(commandLine)

       return {'process': process, 'device_answer': device}




    def snapShot(operatingSystem, inputDeviceID, filename="snapshot.jpg"):    

       try:
           os.remove('snapshot.jpg')
       except:
           print "did not remove file"

       commandLineDict = {
           'Darwin': 'ffmpeg -y -f qtkit -i %s -vframes 1 %s' % (inputDeviceID, filename),
           'Linux': '/usr/local/bin/ffmpeg -y -f video4linux2 -i /dev/video%s -vframes 1 -q:v 1000 -vf scale=320:240 %s' % (inputDeviceID, filename),
           'Windows': 'ffmpeg -y -s 320x240 -f dshow -i video="%s" -vframes 1 %s' % (inputDeviceID, filename)}

       print commandLineDict[operatingSystem]
       os.system(commandLineDict[operatingSystem])



    def startVideoCapture():

       videoPort = getVideoPort()
       audioPort = getAudioPort()
       print "video port:", videoPort
       print "audio port:", audioPort

       #if len(sys.argv) >= 3:
       #    deviceNumber = sys.argv[2]
       #else:
       #    deviceNumber = None
       deviceNumber = args.video_device_number

       result = None
       if platform.system() == 'Darwin':
           result = handleDarwin(deviceNumber, videoPort, audioPort)
       elif platform.system() == 'Linux':
           result = handleLinux(deviceNumber, videoPort, audioPort)
       elif platform.system() == 'Windows':
           #result = handleWindowsScreenCapture(deviceNumber, videoPort)
           result = handleWindows(deviceNumber, videoPort, audioPort)
       else:
           print "unknown platform", platform.system()

       return result


    def timeInMilliseconds():
       return int(round(time.time() * 1000))



    def main():

       print "main"

       streamProcessDict = None


       twitterSnapCount = 0

       while True:



           socketIO.emit('send_video_status', {'send_video_process_exists': True,
                                               'camera_id':cameraIDAnswer})


           if streamProcessDict is not None:
               print "stopping previously running ffmpeg (needs to happen if this is not the first iteration)"
               streamProcessDict['process'].kill()

           print "starting process just to get device result" # this should be a separate function so you don't have to do this
           streamProcessDict = startVideoCapture()
           inputDeviceID = streamProcessDict['device_answer']
           print "stopping video capture"
           streamProcessDict['process'].kill()

           #print "sleeping"
           #time.sleep(3)
           #frameCount = int(round(time.time() * 1000))

           videoWithSnapshots = False
           while videoWithSnapshots:

               frameCount = timeInMilliseconds()

               print "taking single frame image"
               snapShot(platform.system(), inputDeviceID, filename="single_frame_image.jpg")

               with open ("single_frame_image.jpg", 'rb') as f:

                   # every so many frames, post a snapshot to twitter
                   #if frameCount % 450 == 0:
                   if frameCount % 6000 == 0:
                           data = f.read()
                           print "emit"
                           socketIO.emit('snapshot', {'frame_count':frameCount, 'image':base64.b64encode(data)})
                   data = f.read()

               print "emit"
               socketIO.emit('single_frame_image', {'frame_count':frameCount, 'image':base64.b64encode(data)})
               time.sleep(0)

               #frameCount += 1


           if False:
            if platform.system() != 'Windows':
               print "taking snapshot"
               snapShot(platform.system(), inputDeviceID)
               with open ("snapshot.jpg", 'rb') as f:
                   data = f.read()
               print "emit"

               # skip sending the first image because it's mostly black, maybe completely black
               #todo: should find out why this black image happens
               if twitterSnapCount > 0:
                   socketIO.emit('snapshot', {'image':base64.b64encode(data)})




           print "starting video capture"
           streamProcessDict = startVideoCapture()


           # This loop counts out a delay that occurs between twitter snapshots.
           # Every 50 seconds, it kills and restarts ffmpeg.
           # Every 40 seconds, it sends a signal to the server indicating status of processes.
           period = 2*60*60 # period in seconds between snaps
           for count in range(period):
               time.sleep(1)

               if count % 20 == 0:
                   socketIO.emit('send_video_status', {'send_video_process_exists': True,
                                                       'camera_id':cameraIDAnswer})

               if count % 40 == 30:
                   print "stopping video capture just in case it has reached a state where it's looping forever, not sending video, and not dying as a process, which can happen"
                   streamProcessDict['video_process'].kill()
                   streamProcessDict['audio_process'].kill()
                   time.sleep(1)

               if count % 80 == 75:
                   print "send status about this process and its child process ffmpeg"
                   ffmpegProcessExists = streamProcessDict['process'].poll() is None
                   socketIO.emit('send_video_status', {'send_video_process_exists': True,
                                                       'ffmpeg_process_exists': ffmpegProcessExists,
                                                       'camera_id':cameraIDAnswer})

               #if count % 190 == 180:
               #    print "reboot system in case the webcam is not working"
               #    os.system("sudo reboot")

               # if the video stream process dies, restart it
               if streamProcessDict['video_process'].poll() is not None or streamProcessDict['audio_process'].poll():
                   # wait before trying to start ffmpeg
                   print "ffmpeg process is dead, waiting before trying to restart"
                   randomSleep()
                   streamProcessDict = startVideoCapture()

           twitterSnapCount += 1

    if __name__ == "__main__":


       #if len(sys.argv) > 1:
       #    cameraIDAnswer = sys.argv[1]
       #else:
       #    cameraIDAnswer = raw_input("Enter the Camera ID for your robot, you can get it by pointing a browser to the runmyrobot server %s: " % server)

       cameraIDAnswer = args.camera_id


       main()

    ERROR :

    ffmpeg -n -f mpegts -i http://54.183.232.63:12221 -i logo.png -filter_complex "[0:v]setsar=sar=1[v];[v][1]blend=all_mode='overlay':all_opacity=0.7" -movflags +faststart tmb/video.mp4
    ffmpeg version N-86215-gb5228e4 Copyright (c) 2000-2017 the FFmpeg developers
     built with gcc 4.9.2 (Raspbian 4.9.2-10)
     configuration: --arch=armel --target-os=linux --enable-gpl --enable-libx264 --enable-nonfree --extra-libs=-ldl
     libavutil      55. 63.100 / 55. 63.100
     libavcodec     57. 96.101 / 57. 96.101
     libavformat    57. 72.101 / 57. 72.101
     libavdevice    57.  7.100 / 57.  7.100
     libavfilter     6. 90.100 /  6. 90.100
     libswscale      4.  7.101 /  4.  7.101
     libswresample   2.  8.100 /  2.  8.100
     libpostproc    54.  6.100 / 54.  6.100
    [mpegts @ 0x1a57390] Could not detect TS packet size, defaulting to non-FEC/DVHS
    http://54.183.232.63:12221: could not find codec parameters
  • Unable to transfer continuous FFmpeg buffer to client browser using node.js

    10 décembre 2016, par chintitomasud

    I have tried to process a Video file transcoding on demand by using FFmpeg to transfer the chunk(buffer) to the client browser as mp4 format but I failed to show the mp4 content on html5 video player . Without using ffmpeg all code run properly . I have replaced createReadSteam with ffmpeg . Using it I have faced some problems. FFmpeg is new to me and I ’m kind of confused with spawn method. When I post a url path it shows the following text on the command line

    Spawning new process /samiul113039/1080.mp4:GET

    piping ffmpeg output to client, pid 10016

    HTTP connection disrupted, killing ffmpeg : 10016

    Spawning new process /samiul113039/1080.mp4:GET

    piping ffmpeg output to client, pid 4796

    HTTP connection disrupted, killing ffmpeg : 4796

    ffmpeg didn’t quit on q, sending signals ffmpeg has exited : 10016,

    code null ffmpeg didn’t quit on q, sending signals ffmpeg has exited :
    4796, code nul

    =

    var fs=require('fs');

    var url=require("url");
    var urlvalue="http://csestudents.uiu.ac.bd/samiul113039/1080.mp4";


    var parseurl=url.parse(urlvalue);

    var HDHomeRunIP = parseurl.hostname;
    var HDHomeRunPort = parseurl.port;
    var childKillTimeoutMs = 1000;

    var parseArgs = require('minimist')(process.argv.slice(2));

    // define startsWith for string
    if (typeof String.prototype.startsWith != 'function') {
     // see below for better implementation!
     String.prototype.startsWith = function (str){
       return this.indexOf(str) == 0;
     };
    }
    // Called when the response object fires the 'close' handler, kills ffmpeg
    function responseCloseHandler(command) {
     if (command.exited != true) {
       console.log('HTTP connection disrupted, killing ffmpeg: ' + command.pid);
       // Send a 'q' which signals ffmpeg to quit.
       // Then wait half a second, send a nice signal, wait another half second
       // and send SIGKILL
       command.stdin.write('q\n');
       command.stdin.destroy();
       // install timeout and wait
       setTimeout(function() {
         if (command.exited != true) {
           console.log('ffmpeg didn\'t quit on q, sending signals');
           // still connected, do safe sig kills
           command.kill();
           try {
             command.kill('SIGQUIT');
           } catch (err) {}
           try {
             command.kill('SIGINT');
           } catch (err) {}
           // wait some more!
           setTimeout(function() {
             if (command.exited != true) {
               console.log('ffmpeg didn\'t quit on signals, sending SIGKILL');
               // at this point, just give up and whack it
               try {
                 command.kill('SIGKILL');
               } catch (err) {}
             }
           }, childKillTimeoutMs);
         }    
       }, childKillTimeoutMs);
     }
    }

    // Performs a proxy. Copies data from proxy_request into response
    function doProxy(request,response,http,options) {
     var proxy_request = http.request(options, function (proxy_response) {
       proxy_response.on('data', function(chunk) {
         response.write(chunk, 'binary');
       });
       proxy_response.on('end', function() {
         response.end();
       });
       response.writeHead(proxy_response.statusCode, proxy_response.headers);
     });
     request.on('data', function(chunk) {
       proxy_request.write(chunk, 'binary');
     });
     // error handler
     proxy_request.on('error', function(e) {
       console.log('problem with request: ' + e.message);
       response.writeHeader(500);
       response.end();
     });

     proxy_request.end();
    }

    var child_process = require('child_process');
    var auth = require('./auth');
    // Performs the transcoding after the URL is validated
    function doTranscode(request,response) {
     //res.setHeader("Accept-Ranges", "bytes");
     response.setHeader("Accept-Ranges", "bytes");
     response.setHeader("Content-Type", "video/mp4");        
     response.setHeader("Connection","close");
     response.setHeader("Cache-Control","no-cache");
     response.setHeader("Pragma","no-cache");

     // always write the header
     response.writeHeader(200);

     // if get, spawn command stream it
     if (request.method == 'GET') {
       console.log('Spawning new process ' + request.url + ":" + request.method);

    var command = child_process.spawn('ffmpeg',
                                         ['-i','http://csestudents.uiu.ac.bd/samiul113039/1080.mp4','-f','mpegts','-'],
                                         { stdio: ['pipe','pipe','ignore'] });

        command.exited = false;
       // handler for when ffmpeg dies unexpectedly
       command.on('exit',function(code,signal) {
         console.log('ffmpeg has exited: ' + command.pid + ", code " + code);
         // set flag saying we've quit
         command.exited = true;
         response.end();
       });
       command.on('error',function(error) {
         console.log('ffmpeg error handler - unable to kill: ' + command.pid);
         // on well, might as well give up
         command.exited = true;
         try {
           command.stdin.close();
         } catch (err) {}
         try {
           command.stdout.close();
         } catch (err) {}
         try {
           command.stderr.close();
         } catch (err) {}
         response.end();
       });
       // handler for when client closes the URL connection - stop ffmpeg
       response.on('end',function() {
        responseCloseHandler(command);
       });
       // handler for when client closes the URL connection - stop ffmpeg
       response.on('close',function() {
         responseCloseHandler(command);
       });

       // now stream
       console.log('piping ffmpeg output to client, pid ' + command.pid);
       command.stdout.pipe(response);
       command.stdin.on('error',function(err) {
         console.log("Weird error in stdin pipe ", err);
         response.end();
       });
       command.stdout.on('error',function(err) {
         console.log("Weird error in stdout pipe ",err);
         response.end();
       });
     }
     else {
       // not GET, so close response
       response.end();
     }
    }

    // Load the http module to create an http server.
    var http = require('http');

    // Configure our HTTP server to respond with Hello World to all requests.
    var server = http.createServer(function (request, response) {
     //console.log("New connection from " + request.socket.remoteAddress + ":" + request.url);

     if (auth.validate(request,response)) {
       // first send a HEAD request to our HD Home Run with the same url to see if the address is valid.
       // This prevents an ffmpeg instance to spawn when clients request invalid things - like robots.txt/etc
       var options = {method: 'HEAD', hostname: HDHomeRunIP, port: HDHomeRunPort, path: request.url};
       var req = http.request(options, function(res) {
         // if they do a get, and it returns good status
         if (request.method == "GET" &&
             res.statusCode == 200 &&
             res.headers["content-type"] != null &&
             res.headers["content-type"].startsWith("video")) {
           // transcode is possible, start it now!
           doTranscode(request,response);
         }
         else {
           // no video or error, cannot transcode, just forward the response from the HD Home run to the client
           if (request.method == "HEAD") {
             response.writeHead(res.statusCode,res.headers);
             response.end();
           }
           else {
             // do a 301 redirect and have the device response directly  

             // just proxy it, that way browser doesn't redirect to HDHomeRun IP but keeps the node.js server IP
             options = {method: request.method, hostname: HDHomeRunIP, /* port: HDHomeRunPort, */path: request.url};
             doProxy(request,response,http,options);
           }
         }
       });
       req.on('error', function(e) {
         console.log('problem with request: ' + e.message);
         response.writeHeader(500);
         response.end();
       });
       // finish the client request, rest of processing done in the async callbacks
       req.end();
     }
    });

    // turn on no delay for tcp
    server.on('connection', function (socket) {
     socket.setNoDelay(true);
    });
    server.listen(7000);

    stdio : [’pipe’,’pipe’,’ignore’]
    Actually the code was written by someone. i have just modified the code.
    [’pipe’,’pipe’,’ignore’] what does pipe,pipe.ignore mean here,

  • openGL ES 2.0 on android , YUV to RGB and Rendering with ffMpeg

    14 octobre 2013, par 101110101100111111101101

    My renderer dies 1 2 frames later when video shows after.

    FATAL ERROR 11 : blabla...(Exactly occurs in glDrawElements (Y part))

    I think problem is 'glPixelStorei' or 'GL_RGB', 'GL_LUMINANCE' but.. I don't get it.

    My rendering way :

    1. Decode data that got from network, (SDK Getting-> NDK Decoding), Enqueueing.

    2. Dequeueing another threads (of course synchronized) get ready to setup OpenGL ES 2.0.(SDK)

    3. When onDrawFrame, onSurfaceCreated, onSurfaceChanged methods are called, it shrink down to NDK. (My Renderer source in NDK will attach below.)

    4. Rendering.

    As you know, Fragment shader is using for conversion.
    My Data is YUV 420p (pix_fmt_YUV420p) (12bit per pixel)

    Here is my entire source.

    I haven't any knowledge about OpenGL ES before, this is first time.

    Please let me know what am I do improving performance.

    and What am I use parameters in 'glTexImage2D', 'glTexSubImage2D', 'glRenderbufferStorage' ????
    GL_LUMINANCE ? GL_RGBA ? GL_RGB ? (GL_LUMINANCE is using now)

    void Renderer::set_draw_frame(JNIEnv* jenv, jbyteArray yData, jbyteArray uData, jbyteArray vData)
    {
       for (int i = 0; i < 3; i++) {
           if (yuv_data_[i] != NULL) {
               free(yuv_data_[i]);
           }
       }

     int YSIZE = -1;
     int USIZE = -1;
     int VSIZE = -1;

     if (yData != NULL) {
           YSIZE = (int)jenv->GetArrayLength(yData);
       LOG_DEBUG("YSIZE : %d", YSIZE);
           yuv_data_[0] = (unsigned char*)malloc(sizeof(unsigned char) * YSIZE);
       memset(yuv_data_[0], 0, YSIZE);
           jenv->GetByteArrayRegion(yData, 0, YSIZE, (jbyte*)yuv_data_[0]);
       yuv_data_[0] = reinterpret_cast<unsigned>(yuv_data_[0]);
       } else {
           YSIZE = (int)jenv->GetArrayLength(yData);
           yuv_data_[0] = (unsigned char*)malloc(sizeof(unsigned char) * YSIZE);
       memset(yuv_data_[0], 1, YSIZE);
     }

       if (uData != NULL) {
           USIZE = (int)jenv->GetArrayLength(uData);
       LOG_DEBUG("USIZE : %d", USIZE);
           yuv_data_[1] = (unsigned char*)malloc(sizeof(unsigned char) * USIZE);
       memset(yuv_data_[1], 0, USIZE);
           jenv->GetByteArrayRegion(uData, 0, USIZE, (jbyte*)yuv_data_[1]);
       yuv_data_[1] = reinterpret_cast<unsigned>(yuv_data_[1]);
       } else {
           USIZE = YSIZE/4;
           yuv_data_[1] = (unsigned char*)malloc(sizeof(unsigned char) * USIZE);
       memset(yuv_data_[1], 1, USIZE);
     }

       if (vData != NULL) {
           VSIZE = (int)jenv->GetArrayLength(vData);
       LOG_DEBUG("VSIZE : %d", VSIZE);
           yuv_data_[2] = (unsigned char*)malloc(sizeof(unsigned char) * VSIZE);
       memset(yuv_data_[2], 0, VSIZE);
           jenv->GetByteArrayRegion(vData, 0, VSIZE, (jbyte*)yuv_data_[2]);
       yuv_data_[2] = reinterpret_cast<unsigned>(yuv_data_[2]);
       } else {
           VSIZE = YSIZE/4;
           yuv_data_[2] = (unsigned char*)malloc(sizeof(unsigned char) * VSIZE);
       memset(yuv_data_[2], 1, VSIZE);
     }

       glClearColor(1.0F, 1.0F, 1.0F, 1.0F);
       check_gl_error("glClearColor");
       glClear(GL_COLOR_BUFFER_BIT);
       check_gl_error("glClear");
    }

    void Renderer::draw_frame()
    {
     // Binding created FBO
     glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer_object_);
     check_gl_error("glBindFramebuffer");
       // Add program to OpenGL environment
       glUseProgram(program_object_);
       check_gl_error("glUseProgram");

     for (int i = 0; i &lt; 3; i++) {
       LOG_DEBUG("Success");
         //Bind texture
         glActiveTexture(GL_TEXTURE0 + i);
         check_gl_error("glActiveTexture");
         glBindTexture(GL_TEXTURE_2D, yuv_texture_id_[i]);
         check_gl_error("glBindTexture");
         glUniform1i(yuv_texture_object_[i], i);
         check_gl_error("glBindTexture");
       glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, stream_yuv_width_[i], stream_yuv_height_[i], GL_RGBA, GL_UNSIGNED_BYTE, yuv_data_[i]);
         check_gl_error("glTexSubImage2D");
     }

     LOG_DEBUG("Success");
       // Load vertex information
       glVertexAttribPointer(position_object_, 2, GL_FLOAT, GL_FALSE, kStride, kVertexInformation);
       check_gl_error("glVertexAttribPointer");
       // Load texture information
       glVertexAttribPointer(texture_position_object_, 2, GL_SHORT, GL_FALSE, kStride, kTextureCoordinateInformation);
       check_gl_error("glVertexAttribPointer");

    LOG_DEBUG("9");
       glEnableVertexAttribArray(position_object_);
       check_gl_error("glEnableVertexAttribArray");
       glEnableVertexAttribArray(texture_position_object_);
       check_gl_error("glEnableVertexAttribArray");

     // Back to window buffer
     glBindFramebuffer(GL_FRAMEBUFFER, 0);
     check_gl_error("glBindFramebuffer");
     LOG_DEBUG("Success");
       // Draw the Square
       glDrawElements(GL_TRIANGLE_STRIP, 6, GL_UNSIGNED_SHORT, kIndicesInformation);
       check_gl_error("glDrawElements");
    }

    void Renderer::setup_render_to_texture()
    {
       glGenFramebuffers(1, &amp;frame_buffer_object_);
       check_gl_error("glGenFramebuffers");
       glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer_object_);
       check_gl_error("glBindFramebuffer");
       glGenRenderbuffers(1, &amp;render_buffer_object_);
       check_gl_error("glGenRenderbuffers");
       glBindRenderbuffer(GL_RENDERBUFFER, render_buffer_object_);
       check_gl_error("glBindRenderbuffer");
       glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, stream_yuv_width_[0], stream_yuv_height_[0]);
       check_gl_error("glRenderbufferStorage");
       glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, render_buffer_object_);
       check_gl_error("glFramebufferRenderbuffer");
     glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, yuv_texture_id_[0], 0);
       check_gl_error("glFramebufferTexture2D");  
     glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, yuv_texture_id_[1], 0);
       check_gl_error("glFramebufferTexture2D");  
     glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, yuv_texture_id_[2], 0);
       check_gl_error("glFramebufferTexture2D");  

     glBindFramebuffer(GL_FRAMEBUFFER, 0);
       check_gl_error("glBindFramebuffer");

       GLint status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
       if (status != GL_FRAMEBUFFER_COMPLETE) {
           print_log("renderer.cpp", "setup_graphics", "FBO setting fault.", LOGERROR);
           LOG_ERROR("%d\n", status);
           return;
       }
    }

    void Renderer::setup_yuv_texture()
    {
       // Use tightly packed data
       glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
       check_gl_error("glPixelStorei");

     for (int i = 0; i &lt; 3; i++) {
       if (yuv_texture_id_[i]) {
         glDeleteTextures(1, &amp;yuv_texture_id_[i]);
         check_gl_error("glDeleteTextures");
       }
         glActiveTexture(GL_TEXTURE0+i);
         check_gl_error("glActiveTexture");
         // Generate texture object
         glGenTextures(1, &amp;yuv_texture_id_[i]);
         check_gl_error("glGenTextures");
         glBindTexture(GL_TEXTURE_2D, yuv_texture_id_[i]);
         check_gl_error("glBindTexture");
       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
         check_gl_error("glTexParameteri");
         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
         check_gl_error("glTexParameteri");
         glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
         check_gl_error("glTexParameterf");
         glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
         check_gl_error("glTexParameterf");
       glEnable(GL_TEXTURE_2D);
       check_gl_error("glEnable");
       glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, maximum_yuv_width_[i], maximum_yuv_height_[i], 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
         check_gl_error("glTexImage2D");
     }
    }

    void Renderer::setup_graphics()
    {
       print_gl_string("Version", GL_VERSION);
       print_gl_string("Vendor", GL_VENDOR);
       print_gl_string("Renderer", GL_RENDERER);
       print_gl_string("Extensions", GL_EXTENSIONS);

       program_object_ = create_program(kVertexShader, kFragmentShader);
       if (!program_object_) {
           print_log("renderer.cpp", "setup_graphics", "Could not create program.", LOGERROR);
           return;
       }

       position_object_ = glGetAttribLocation(program_object_, "vPosition");
       check_gl_error("glGetAttribLocation");
       texture_position_object_ = glGetAttribLocation(program_object_, "vTexCoord");
       check_gl_error("glGetAttribLocation");

       yuv_texture_object_[0] = glGetUniformLocation(program_object_, "yTexture");
       check_gl_error("glGetUniformLocation");
     yuv_texture_object_[1] = glGetUniformLocation(program_object_, "uTexture");
       check_gl_error("glGetUniformLocation");
       yuv_texture_object_[2] = glGetUniformLocation(program_object_, "vTexture");
       check_gl_error("glGetUniformLocation");

     setup_yuv_texture();
       setup_render_to_texture();

     glViewport(0, 0, stream_yuv_width_[0], stream_yuv_height_[0]);//736, 480);//1920, 1080);//maximum_yuv_width_[0], maximum_yuv_height_[0]);
     check_gl_error("glViewport");
    }

    GLuint Renderer::create_program(const char* vertex_source, const char* fragment_source)
    {
       GLuint vertexShader = load_shader(GL_VERTEX_SHADER, vertex_source);
       if (!vertexShader) {
           return 0;
       }

       GLuint pixelShader = load_shader(GL_FRAGMENT_SHADER, fragment_source);
       if (!pixelShader) {
           return 0;
       }

       GLuint program = glCreateProgram();
       if (program) {
           glAttachShader(program, vertexShader);
           check_gl_error("glAttachShader");
           glAttachShader(program, pixelShader);
           check_gl_error("glAttachShader");
           glLinkProgram(program);
           /* Get a Status */
           GLint linkStatus = GL_FALSE;
           glGetProgramiv(program, GL_LINK_STATUS, &amp;linkStatus);
           if (linkStatus != GL_TRUE) {
               GLint bufLength = 0;
               glGetProgramiv(program, GL_INFO_LOG_LENGTH, &amp;bufLength);
               if (bufLength) {
                   char* buf = (char*) malloc(bufLength);
                   if (buf) {
                       glGetProgramInfoLog(program, bufLength, NULL, buf);
                       print_log("renderer.cpp", "create_program", "Could not link program.", LOGERROR);
                       LOG_ERROR("%s\n", buf);
                       free(buf);
                   }
               }
               glDeleteProgram(program);
               program = 0;
           }
       }
       return program;
    }

    GLuint Renderer::load_shader(GLenum shaderType, const char* pSource)
    {
       GLuint shader = glCreateShader(shaderType);
           if (shader) {
               glShaderSource(shader, 1, &amp;pSource, NULL);
               glCompileShader(shader);
               /* Get a Status */
               GLint compiled = 0;
               glGetShaderiv(shader, GL_COMPILE_STATUS, &amp;compiled);
               if (!compiled) {
                   GLint infoLen = 0;
                   glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &amp;infoLen);
                   if (infoLen) {
                       char* buf = (char*) malloc(infoLen);
                       if (buf) {
                           glGetShaderInfoLog(shader, infoLen, NULL, buf);
                           print_log("renderer.cpp", "load_shader", "Could not link program.", LOGERROR);
                                     LOG_ERROR("%d :: %s\n", shaderType, buf);
                           free(buf);
                       }
                       glDeleteShader(shader);
                       shader = 0;
                   }
               }
           }
       return shader;
    }


    void Renderer::onDrawFrame(JNIEnv* jenv, jbyteArray yData, jbyteArray uData, jbyteArray vData)
    {
       set_draw_frame(jenv, yData, uData, vData);
       draw_frame();
       return;
    }

    void Renderer::setSize(int stream_width, int stream_height) {
     stream_yuv_width_[0] = stream_width;
     stream_yuv_width_[1] = stream_width/2;
     stream_yuv_width_[2] = stream_width/2;
     stream_yuv_height_[0] = stream_height;
     stream_yuv_height_[1] = stream_height/2;
     stream_yuv_height_[2] = stream_height/2;
    }

    void Renderer::onSurfaceChanged(int width, int height)
    {
     mobile_yuv_width_[0] = width;
     mobile_yuv_width_[1] = width/2;
     mobile_yuv_width_[2] = width/2;
     mobile_yuv_height_[0] = height;
     mobile_yuv_height_[1] = height/2;
     mobile_yuv_height_[2] = height/2;

     maximum_yuv_width_[0] = 1920;
     maximum_yuv_width_[1] = 1920/2;
     maximum_yuv_width_[2] = 1920/2;
     maximum_yuv_height_[0] = 1080;
     maximum_yuv_height_[1] = 1080/2;
     maximum_yuv_height_[2] = 1080/2;

     // If stream size not setting, default size D1
     //if (stream_yuv_width_[0] == 0) {
       stream_yuv_width_[0] = 736;
       stream_yuv_width_[1] = 736/2;
       stream_yuv_width_[2] = 736/2;
       stream_yuv_height_[0] = 480;
       stream_yuv_height_[1] = 480/2;
       stream_yuv_height_[2] = 480/2;
     //}

       setup_graphics();
       return;
    }
    </unsigned></unsigned></unsigned>

    Here is my Fragment, Vertex source and coordinates :

    static const char kVertexShader[] =
       "attribute vec4 vPosition;      \n"
         "attribute vec2 vTexCoord;        \n"
         "varying vec2 v_vTexCoord;        \n"
       "void main() {                        \n"
           "gl_Position = vPosition;       \n"
           "v_vTexCoord = vTexCoord;       \n"
       "}                                          \n";

    static const char kFragmentShader[] =
           "precision mediump float;               \n"
           "varying vec2 v_vTexCoord;          \n"
           "uniform sampler2D yTexture;        \n"
           "uniform sampler2D uTexture;        \n"
           "uniform sampler2D vTexture;        \n"
           "void main() {                      \n"
               "float y=texture2D(yTexture, v_vTexCoord).r;\n"
               "float u=texture2D(uTexture, v_vTexCoord).r - 0.5;\n"
               "float v=texture2D(vTexture, v_vTexCoord).r - 0.5;\n"
               "float r=y + 1.13983 * v;\n"
               "float g=y - 0.39465 * u - 0.58060 * v;\n"
               "float b=y + 2.03211 * u;\n"
               "gl_FragColor = vec4(r, g, b, 1.0);\n"
           "}\n";

    static const GLfloat kVertexInformation[] =
    {
            -1.0f, 1.0f,           // TexCoord 0 top left
            -1.0f,-1.0f,           // TexCoord 1 bottom left
             1.0f,-1.0f,           // TexCoord 2 bottom right
             1.0f, 1.0f            // TexCoord 3 top right
    };
    static const GLshort kTextureCoordinateInformation[] =
    {
             0, 0,         // TexCoord 0 top left
             0, 1,         // TexCoord 1 bottom left
             1, 1,         // TexCoord 2 bottom right
             1, 0          // TexCoord 3 top right
    };
    static const GLuint kStride = 0;//COORDS_PER_VERTEX * 4;
    static const GLshort kIndicesInformation[] =
    {
       0, 1, 2,
       0, 2, 3
    };