
Recherche avancée
Médias (1)
-
Bug de détection d’ogg
22 mars 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Video
Autres articles (91)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...) -
ANNEXE : Les plugins utilisés spécifiquement pour la ferme
5 mars 2010, parLe site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)
Sur d’autres sites (9063)
-
How to efficiently keep a stable frame rate video stream in a PyQt application ?
2 octobre 2024, par Jeroen De GeeterI am developing a PyQt (PySide6) application that needs to display and store multiple camera streams at the same time. The display of the camera streams goes well ; however, storing these streams seems to slow down the application significantly up to a point where the GUI doesn't work fluently anymore.


I have a minimal working example using a stub to demonstrate how my code currently works. However, given that it is a minimal working example, it will not visibly slow down.


import sys
from time import sleep

import av
import numpy as np
import pyqtgraph as pg
from PySide6.QtCore import QThread, Signal, Slot, Qt
from PySide6.QtWidgets import QApplication, QHBoxLayout, QWidget, QVBoxLayout, QPushButton, QGroupBox


class RGBCameraStub(QThread):

 newFrame = Signal(np.ndarray)

 def __init__(self):
 super().__init__()
 self.killSwitch = True

 def stop(self):
 self.killSwitch = False
 self.quit()
 self.wait()

 def run(self):
 self.killSwitch = True
 while self.killSwitch:
 self.newFrame.emit((np.random.rand(1456, 1080, 3) * 255).astype(np.uint8))
 sleep((20 + int(np.random.rand() * 30))/ 1000)


class VideoWriter(QThread):

 def __init__(self):
 super().__init__()
 self.output_container = av.open('output_video.mkv', mode='w')
 self.stream = self.output_container.add_stream('ffv1', rate=None)
 self.stream.width = 1456
 self.stream.height = 1080
 self.stream.pix_fmt = 'yuv420p'

 @Slot(np.ndarray)
 def addFrame(self, frame: np.ndarray):
 av_frame = av.VideoFrame.from_ndarray(frame, format='rgb24')
 av_frame.pts = None # Leave emtpy for auto-handling - variable framerate?
 for packet in self.stream.encode(av_frame):
 self.output_container.mux(packet)

 def stop(self):
 self.output_container.close()
 self.quit()
 self.wait()

 def run(self):
 self.exec()


class VideoBox(QGroupBox):

 def __init__(self, title):
 super().__init__(title=title)
 self.createLayout()
 self.videoWidget.setImage((np.random.rand(1456, 1080, 3) * 255).astype(np.uint8))

 def createLayout(self):
 layout = QVBoxLayout()
 self.videoWidget = pg.RawImageWidget()
 layout.addWidget(self.videoWidget)
 self.setLayout(layout)
 self.setStyleSheet("""QGroupBox {
 border: 1px solid #494B4F;
 margin-top: 8px;
 min-width: 180px;
 min-height: 180px;
 padding: 2px 0px 0px 0px;
 }
 QGroupBox::title {
 color: #aeb0b8;
 subcontrol-origin: margin;
 subcontrol-position: top left;
 left: 20px;
 padding: 0 8px;
 }""")

 def setImage(self, data: np.ndarray):
 self.videoWidget.setImage(data)

class MainWindow(QWidget):

 closeSignal = Signal()

 def __init__(self):
 super().__init__()
 self.setGeometry(0, 0, 900, 720)
 self.createLayout()

 def createLayout(self):
 self.vimbaImage = VideoBox("RGB")
 self.info = self.infoLayout()

 layout = QVBoxLayout()
 layout.addWidget(self.vimbaImage)
 layout.addWidget(self.info)
 self.setLayout(layout)

 self.setAttribute(Qt.WA_StyledBackground, True)
 self.setStyleSheet("MainWindow { background-color: #1e1f22; }")

 def infoLayout(self):
 widget = QWidget()
 layout = QVBoxLayout()

 rgbButtonWidget = QWidget()
 buttonLayout = QHBoxLayout()
 self.connectButton = QPushButton('Connect', parent=self)
 self.disconnectButton = QPushButton('Disconnect', parent=self)
 buttonLayout.addWidget(self.connectButton)
 buttonLayout.addWidget(self.disconnectButton)
 buttonLayout.addStretch()
 rgbButtonWidget.setLayout(buttonLayout)
 layout.addWidget(rgbButtonWidget)

 widget.setLayout(layout)
 return widget

 def closeEvent(self, event):
 self.closeSignal.emit()
 event.accept()



if __name__ == "__main__":
 app = QApplication(sys.argv)

 rgbCamera = RGBCameraStub()
 videoWriter = VideoWriter()
 videoWriter.start()

 main_window = MainWindow()

 # Button connections
 main_window.connectButton.clicked.connect(rgbCamera.start)
 main_window.disconnectButton.clicked.connect(rgbCamera.stop)
 # main_window.disconnectButton.clicked.connect(videoWriter.stop)

 # Display frames
 rgbCamera.newFrame.connect(main_window.vimbaImage.setImage)

 # Write frame to file
 rgbCamera.newFrame.connect(videoWriter.addFrame)

 # Close application
 main_window.closeSignal.connect(rgbCamera.stop)
 main_window.closeSignal.connect(videoWriter.stop)

 main_window.show()
 sys.exit(app.exec())




My question(s) therefore are :


- 

- How can I increase the performance of the
VideoWriter
? I am currently adding frame by frame as soon as the camera thread provides a new frame. Maybe this is not the best approach ? - The frame rate of the camera is not completely stable, I therefore set
av_frame.pts = None
but maybe this is also not the approach to take ? - With code as is, the resulting media file quickly blows up in size, is there a way of dealing with this without quality loss ?








As a side note, I currently use the PyAV wrapper for the FFmpeg library, however I am open to other suggestions.


- How can I increase the performance of the
-
gcc "relocation R_X86_64_PC32 against symbol `ff_M24A'" error when linking statically against ffmpeg on linux
15 juillet 2021, par YB_EvilI am trying to build a JNI shared library which statically links to ffmpeg.



But at the linking stage, gcc fails with the following error :



/usr/bin/ld: ./lib_lin64/libswscale.a(swscale.o): relocation R_X86_64_PC32 against symbol `ff_M24A' can not be used when making a shared object; recompile with -fPIC




I am using the following commands to compile my jni library :



gcc -I $JAVA_HOME/include -I $JAVA_HOME/include/linux -I ./include -fPIC -c *.c

gcc -shared -Wl,--no-undefined -o libnv_avc_dec.so *.o -Wl,-Bstatic -L./lib_lin64 -lavcodec -lavutil -lswresample -lswscale -Wl,-Bdynamic -lm




And I only use h264 decoding feature, so I am also building ffmpeg from source with the minimal required feature set. The ./configure command I use is :



./configure \
--enable-pic --prefix=ffmpeg-dist \
--disable-debug --enable-version3 --enable-gpl \
--disable-everything --enable-hwaccel=h264_vdpau --enable-hwaccel=h264_vaapi --enable-hwaccel=h264_qsv --enable-hwaccel=h264_mmal \
--enable-decoder=h264 --enable-decoder=h264_vdpau --enable-decoder=h264_crystalhd --enable-decoder=h264_mmal --enable-decoder=h264_qsv \
--disable-iconv --disable-securetransport --disable-xlib --disable-zlib --disable-lzma --disable-bzlib --disable-doc --disable-programs --disable-avformat --disable-avfilter --disable-postproc




So, as I understand, the linker tells me that ffmpeg should be compiled with -fPIC flag in order to make a shared library. But I believe that I've already done so by specifying the —enable-pic configure flag. And I am pretty much stuck here because I am not very familiar with autotools, nor with ffmpeg build process in particular.



If this is the issue of ffmpeg .a libs not being compiled with -fPIC flag, how can i force it ? And if this is not the case, what am i doing wrong and how can i fix this error ?



Environment details : Ubuntu 14.04.3 64-bit in Virtualbox, gcc 4.8.5 and 5.3 (both give the same results), ffmpeg v.2.8.5


-
gcc "relocation R_X86_64_PC32 against symbol `ff_M24A'" error when linking statically against ffmpeg on linux
22 janvier 2016, par YB_EvilI am trying to build a JNI shared library which statically links to ffmpeg.
But at the linking stage, gcc fails with the following error :
/usr/bin/ld: ./lib_lin64/libswscale.a(swscale.o): relocation R_X86_64_PC32 against symbol `ff_M24A' can not be used when making a shared object; recompile with -fPIC
I am using the following commands to compile my jni library :
gcc -I $JAVA_HOME/include -I $JAVA_HOME/include/linux -I ./include -fPIC -c *.c
gcc -shared -Wl,--no-undefined -o libnv_avc_dec.so *.o -Wl,-Bstatic -L./lib_lin64 -lavcodec -lavutil -lswresample -lswscale -Wl,-Bdynamic -lm
And I only use h264 decoding feature, so I am also building ffmpeg from source with the minimal required feature set. The ./configure command I use is :
./configure \
--enable-pic --prefix=ffmpeg-dist \
--disable-debug --enable-version3 --enable-gpl \
--disable-everything --enable-hwaccel=h264_vdpau --enable-hwaccel=h264_vaapi --enable-hwaccel=h264_qsv --enable-hwaccel=h264_mmal \
--enable-decoder=h264 --enable-decoder=h264_vdpau --enable-decoder=h264_crystalhd --enable-decoder=h264_mmal --enable-decoder=h264_qsv \
--disable-iconv --disable-securetransport --disable-xlib --disable-zlib --disable-lzma --disable-bzlib --disable-doc --disable-programs --disable-avformat --disable-avfilter --disable-postprocSo, as I understand, the linker tells me that ffmpeg should be compiled with -fPIC flag in order to make a shared library. But I believe that I’ve already done so by specifying the —enable-pic configure flag. And I am pretty much stuck here because I am not very familiar with autotools, nor with ffmpeg build process in particular.
If this is the issue of ffmpeg .a libs not being compiled with -fPIC flag, how can i force it ? And if this is not the case, what am i doing wrong and how can i fix this error ?
Environment details : Ubuntu 14.04.3 64-bit in Virtualbox, gcc 4.8.5 and 5.3 (both give the same results), ffmpeg v.2.8.5