
Recherche avancée
Médias (9)
-
Stereo master soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Audio
-
Elephants Dream - Cover of the soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Image
-
#7 Ambience
16 octobre 2011, par
Mis à jour : Juin 2015
Langue : English
Type : Audio
-
#6 Teaser Music
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#5 End Title
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#3 The Safest Place
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
Autres articles (75)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (10338)
-
How to real time preview change of Video recorder on camera like Vigo Video Hypstar
7 mai 2023, par Arjun sainiI want to change the Surface preview bottom overlay with gif or image Like Vigo



Like this






Please tell me any sdk or what I am using for this Filter



I am able to change the overlay on the top view using this






PictureCallback cameraPictureCallbackJpeg = new PictureCallback() 
 { 
 @Override
 public void onPictureTaken(byte[] data, Camera camera) 
 {
 // TODO Auto-generated method stub 
 Bitmap cameraBitmap = BitmapFactory.decodeByteArray
 (data, 0, data.length);

 int wid = cameraBitmap.getWidth();
 int hgt = cameraBitmap.getHeight();

 // Toast.makeText(getApplicationContext(), wid+""+hgt, Toast.LENGTH_SHORT).show();
 Bitmap newImage = Bitmap.createBitmap
 (wid, hgt, Bitmap.Config.ARGB_8888);

 Canvas canvas = new Canvas(newImage);

 canvas.drawBitmap(cameraBitmap, 0f, 0f, null);

 Drawable drawable = getResources().getDrawable
 (R.drawable.mark3);
 drawable.setBounds(20, 30, drawable.getIntrinsicWidth()+20, drawable.getIntrinsicHeight()+30);
 drawable.draw(canvas);



 File storagePath = new File(Environment.
 getExternalStorageDirectory() + "/PhotoAR/"); 
 storagePath.mkdirs(); 

 File myImage = new File(storagePath,
 Long.toString(System.currentTimeMillis()) + ".jpg");

 try
 {
 FileOutputStream out = new FileOutputStream(myImage);
 newImage.compress(Bitmap.CompressFormat.JPEG, 80, out);


 out.flush();
 out.close();
 }
 catch(FileNotFoundException e)
 {
 Log.d("In Saving File", e + ""); 
 }
 catch(IOException e)
 {
 Log.d("In Saving File", e + "");
 }

 camera.startPreview();



 newImage.recycle();
 newImage = null;

 Intent intent = new Intent();
 intent.setAction(Intent.ACTION_VIEW);

 intent.setDataAndType(Uri.parse("file://" + myImage.getAbsolutePath()), "image/*");
 startActivity(intent);

 }
 };




output of this








-
When using ffmpeg to compress and create in real time mpeg4 video file from bathc of images getting low quality video how can i improve quality ?
30 juin 2015, par Brubaker HaimThe goal here is to create a compressed mp4 video file in real time. I’m saving screenshots as bitmaps type on my hard disk. And i want to create mp4 file and compress the mp4 video file in real time.
using System;
using System.Windows.Forms;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Drawing;
using System.IO.Pipes;
using System.Runtime.InteropServices;
using System.Diagnostics;
using System.IO;
using DannyGeneral;
namespace Youtube_Manager
{
class Ffmpeg
{
NamedPipeServerStream p;
String pipename = "mytestpipe";
System.Diagnostics.Process process;
string ffmpegFileName = "ffmpeg.exe";
string workingDirectory;
public Ffmpeg()
{
workingDirectory = Path.GetDirectoryName(Application.ExecutablePath);
Logger.Write("workingDirectory: " + workingDirectory);
if (!Directory.Exists(workingDirectory))
{
Directory.CreateDirectory(workingDirectory);
}
ffmpegFileName = Path.Combine(workingDirectory, ffmpegFileName);
Logger.Write("FfmpegFilename: " + ffmpegFileName);
}
public void Start(string pathFileName, int BitmapRate)
{
try
{
string outPath = pathFileName;
p = new NamedPipeServerStream(pipename, PipeDirection.Out, 1, PipeTransmissionMode.Byte);
ProcessStartInfo psi = new ProcessStartInfo();
psi.WindowStyle = ProcessWindowStyle.Hidden;
psi.UseShellExecute = false;
psi.CreateNoWindow = false;
psi.FileName = ffmpegFileName;
psi.WorkingDirectory = workingDirectory;
psi.Arguments = @"-f rawvideo -vcodec rawvideo -pix_fmt rgb24 -video_size 1920x1080 -i \\.\pipe\mytestpipe -map 0 -c:v mpeg4 -r " + BitmapRate + " " + outPath;
process = Process.Start(psi);
process.EnableRaisingEvents = false;
psi.RedirectStandardError = true;
p.WaitForConnection();
}
catch (Exception err)
{
Logger.Write("Exception Error: " + err.ToString());
}
}
public void PushFrame(Bitmap bmp)
{
try
{
int length;
// Lock the bitmap's bits.
//bmp = new Bitmap(1920, 1080);
Rectangle rect = new Rectangle(0, 0, bmp.Width, bmp.Height);
//Rectangle rect = new Rectangle(0, 0, 1280, 720);
System.Drawing.Imaging.BitmapData bmpData =
bmp.LockBits(rect, System.Drawing.Imaging.ImageLockMode.ReadOnly,
bmp.PixelFormat);
int absStride = Math.Abs(bmpData.Stride);
// Get the address of the first line.
IntPtr ptr = bmpData.Scan0;
// Declare an array to hold the bytes of the bitmap.
//length = 3 * bmp.Width * bmp.Height;
length = absStride * bmpData.Height;
byte[] rgbValues = new byte[length];
//Marshal.Copy(ptr, rgbValues, 0, length);
int j = bmp.Height - 1;
for (int i = 0; i < bmp.Height; i++)
{
IntPtr pointer = new IntPtr(bmpData.Scan0.ToInt32() + (bmpData.Stride * j));
System.Runtime.InteropServices.Marshal.Copy(pointer, rgbValues, absStride * (bmp.Height - i - 1), absStride);
j--;
}
p.Write(rgbValues, 0, length);
bmp.UnlockBits(bmpData);
}
catch(Exception err)
{
Logger.Write("Error: " + err.ToString());
}
}
public void Close()
{
p.Close();
}
}
}The Bitmaps images files on my hard disk each one is 1920x1080 and Bit depth 32.
The video file is on the hard disk at size 1.24MB
This is a screenshot i took from the video file when playing it.
You can see how bad the quality is.This is a link for 10 images of the screenshots i’m using creating from the video file. They are Bitmaps.
Its something with the arguments :
This line dosen’t work when i run the program the command prompt window close at once. I can’t even see if there is any erorr/s the command prompt window close too fast.psi.Arguments = @"-f libx264 -vcodec libx264 -pix_fmt bgra -video_size 1920x1080 -i \\.\pipe\mytestpipe -map 0 -c:v mpeg4 -r " + BitmapRate + " " + outPath;
When i’m using this arguments it’s working but making bad quality video :
psi.Arguments = @"-f rawvideo -pix_fmt bgra -video_size 1920x1080 -i \\.\pipe\mytestpipe -map 0 -c:v mpeg4 -r " + BitmapRate + " " + outPath;
-
FFMPEG Issue : Video breaks a lot hence the real time video gets distorted and gets a little delayed while streaming on a webpage from drone
26 mars 2022, par ashiyaa nunhuck****I am trying to detect a face from my drone camera in real time.The video streams successfully but it is delayed and breaks a lot. Is there any solution to this problem ? How can i have a smooth video streaming with little delay and no video breaking in order to succeed in detecting a face ? Your help will be much appreciated.
Also, this is printed while my code is running: :




INFO:werkzeug:127.0.0.1 - - [27/May/2021 15:16:14] "GET
/video/streaming HTTP/1.1" 200 -
INFO:drone_face_recognition_and_tracking.controllers.server :'action' :
'command', 'cmd' : 'takeOff'
INFO:drone_face_recognition_and_tracking.models.manage_drone :'action' :
'send_command', 'command' : 'takeoff' [h264 @ 0x55aa924a2e40] error
while decoding MB 45 38, bytestream -6 [h264 @ 0x55aa924a2e40]
concealing 424 DC, 424 AC, 424 MV errors in I frame [h264 @
0x55aa922a9a00] concealing 687 DC, 687 AC, 687 MV errors in P frame
[h264 @ 0x55aa923f79c0] left block unavailable for requested intra
mode [h264 @ 0x55aa923f79c0] error while decoding MB 0 34, bytestream
1347 [h264 @ 0x55aa923f79c0] concealing 709 DC, 709 AC, 709 MV errors
in P frame INFO:werkzeug:127.0.0.1 - - [27/May/2021 15:16:17] "POST
/api/command/ HTTP/1.1" 200 - pipe:0 : corrupt decoded frame in stream
0
Last message repeated 2 times [h264 @ 0x55aa922a9a00] error while decoding MB 49 30, bytestream -6 [h264 @ 0x55aa922a9a00] concealing
900 DC, 900 AC, 900 MV errors in P frame pipe:0 : corrupt decoded frame
in stream 0
INFO:drone_face_recognition_and_tracking.models.manage_drone :'action' :
'receive_response', 'response' : b'ok'
INFO:drone_face_recognition_and_tracking.controllers.server :'action' :
'command', 'cmd' : 'faceDetectAndTrack' INFO:werkzeug:127.0.0.1 - -
[27/May/2021 15:16:21] "POST /api/command/ HTTP/1.1" 200 - [h264 @
0x55aa924a2e40] left block unavailable for requested intra4x4 mode -1
[h264 @ 0x55aa924a2e40] error while decoding MB 0 30, bytestream 1624
[h264 @ 0x55aa924a2e40] concealing 949 DC, 949 AC, 949 MV errors in I
frame pipe:0 : corrupt decoded frame in stream 0 [h264 @
0x55aa9244d400] left block unavailable for requested intra mode [h264
@ 0x55aa9244d400] error while decoding MB 0 12, bytestream 2936 [h264
@ 0x55aa9244d400] concealing 2029 DC, 2029 AC, 2029 MV errors in I
frame pipe:0 : corrupt decoded frame in stream 0 [h264 @
0x55aa924bf700] concealing 1632 DC, 1632 AC, 1632 MV errors in P frame
pipe:0 : corrupt decoded frame in stream 0 [h264 @ 0x55aa92414280]
concealing 1571 DC, 1571 AC, 1571 MV errors in P frame




Here is my code :****


import logging
import contextlib
import os
import socket
import subprocess
import threading
import time
import cv2 as cv
import numpy as np

from drone_face_recognition_and_tracking.models.base import Singleton

logger = logging.getLogger(__name__)

DEFAULT_DISTANCE = 0.30
DEFAULT_SPEED = 10
DEFAULT_DEGREE = 10

FRAME_X = int(320) # try 640
FRAME_Y = int(240) # try 480
FRAME_AREA = FRAME_X * FRAME_Y

FRAME_SIZE = FRAME_AREA * 3
FRAME_CENTER_X = FRAME_X / 2
FRAME_CENTER_Y = FRAME_Y / 2

CMD_FFMPEG = (f'ffmpeg -probesize 32 -hwaccel auto -hwaccel_device opencl -i pipe:0 '
 f'-pix_fmt bgr24 -s {FRAME_X}x{FRAME_Y} -f rawvideo pipe:1')

FACE_DETECT_XML_FILE = './drone_face_recognition_and_tracking/models/haarcascade_frontalface_default.xml'


def receive_video(stop_event, pipe_in, host_ip, video_port):
 with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock_video:
 sock_video.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
 sock_video.settimeout(.5)
 sock_video.bind((host_ip, video_port))
 data = bytearray(2048)
 while not stop_event.is_set():
 try:
 size, addr = sock_video.recvfrom_into(data)
 # logger.info({'action': 'receive_video', 'data': data})
 except socket.timeout as ex:
 logger.warning({'action': 'receive_video', 'ex': ex})
 time.sleep(0.5)
 continue
 except socket.error as ex:
 logger.error({'action': 'receive_video', 'ex': ex})
 break

 try:
 pipe_in.write(data[:size])
 pipe_in.flush()
 except Exception as ex:
 logger.error({'action': 'receive_video', 'ex': ex})
 break


class Tello_Drone(metaclass=Singleton):
 def __init__(self, host_ip='192.168.10.2', host_port=8889,
 drone_ip='192.168.10.1', drone_port=8889,
 is_imperial=False, speed=DEFAULT_SPEED):
 self.host_ip = host_ip
 self.host_port = host_port
 self.drone_ip = drone_ip
 self.drone_port = drone_port
 self.drone_address = (drone_ip, drone_port)
 self.is_imperial = is_imperial
 self.speed = speed
 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
 self.socket.bind((self.host_ip, self.host_port))

 self.response = None
 self.stop_event = threading.Event()
 self._response_thread = threading.Thread(target=self.receive_response, args=(self.stop_event, ))
 self._response_thread.start()

 self.proc = subprocess.Popen(CMD_FFMPEG.split(' '),
 stdin=subprocess.PIPE,
 stdout=subprocess.PIPE)
 self.proc_stdin = self.proc.stdin
 self.proc_stdout = self.proc.stdout

 self.video_port = 11111

 self._receive_video_thread = threading.Thread(
 target=receive_video,
 args=(self.stop_event, self.proc_stdin,
 self.host_ip, self.video_port,))
 self._receive_video_thread.start()

 self.face_cascade = cv.CascadeClassifier(FACE_DETECT_XML_FILE)
 self._is_enable_face_detect = False

 self.send_command('command')
 self.send_command('streamon')
 self.set_speed(self.speed)

 def receive_response(self, stop_event):
 while not stop_event.is_set():
 try:
 self.response, ip = self.socket.recvfrom(3000)
 logger.info({'action': 'receive_response',
 'response': self.response})
 except socket.error as ex:
 logger.error({'action': 'receive_response',
 'ex': ex})
 break

 def __dell__(self):
 self.stop()

 def stop(self):
 self.stop_event.set()
 retry = 0
 while self._response_thread.is_alive():
 time.sleep(0.3)
 if retry > 30:
 break
 retry += 1
 self.socket.close()
 os.kill(self.proc.pid, 9)

 def send_command(self, command):
 logger.info({'action': 'send_command', 'command': command})
 self.socket.sendto(command.encode('utf-8'), self.drone_address)

 retry = 0
 while self.response is None:
 time.sleep(0.3)
 if retry > 3:
 break
 retry += 1

 if self.response is None:
 response = None
 else:
 response = self.response.decode('utf-8')
 self.response = None
 return response

 def takeoff(self):
 return self.send_command('takeoff')

 def land(self):
 return self.send_command('land')

 def move(self, direction, distance):
 distance = float(distance)
 if self.is_imperial:
 distance = int(round(distance * 30.48))
 else:
 distance = int(round(distance * 100))
 return self.send_command(f'{direction} {distance}')

 def up(self, distance=DEFAULT_DISTANCE):
 return self.move('up', distance)

 def down(self, distance=DEFAULT_DISTANCE):
 return self.move('down', distance)

 def left(self, distance=DEFAULT_DISTANCE):
 return self.move('left', distance)

 def right(self, distance=DEFAULT_DISTANCE):
 return self.move('right', distance)

 def forward(self, distance=DEFAULT_DISTANCE):
 return self.move('forward', distance)

 def back(self, distance=DEFAULT_DISTANCE):
 return self.move('back', distance)

 def set_speed(self, speed):
 return self.send_command(f'speed {speed}')

 def clockwise(self, degree=DEFAULT_DEGREE):
 return self.send_command(f'cw {degree}')

 def counter_clockwise(self, degree=DEFAULT_DEGREE):
 return self.send_command(f'ccw {degree}')

 def video_binary_generator(self):
 while True:
 try:
 frame = self.proc_stdout.read(FRAME_SIZE)
 except Exception as ex:
 logger.error({'action': 'video_binary_generator', 'ex': ex})
 continue

 if not frame:
 continue

 frame = np.fromstring(frame, np.uint8).reshape(FRAME_Y, FRAME_X, 3)
 yield frame

 def enable_face_detect(self):
 self._is_enable_face_detect = True

 def disable_face_detect(self):
 self._is_enable_face_detect = False

 def video_jpeg_generator(self):
 for frame in self.video_binary_generator():
 if self._is_enable_face_detect:
 gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
 faces = self.face_cascade.detectMultiScale(gray, 1.2, 4)
 for (x, y, w, h) in faces:
 cv.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
 break

 _, jpeg = cv.imencode('.jpg', frame)
 jpeg_binary = jpeg.tobytes()
 yield jpeg_binary