
Recherche avancée
Autres articles (78)
-
Encoding and processing into web-friendly formats
13 avril 2011, parMediaSPIP automatically converts uploaded files to internet-compatible formats.
Video files are encoded in MP4, Ogv and WebM (supported by HTML5) and MP4 (supported by Flash).
Audio files are encoded in MP3 and Ogg (supported by HTML5) and MP3 (supported by Flash).
Where possible, text is analyzed in order to retrieve the data needed for search engine detection, and then exported as a series of image files.
All uploaded files are stored online in their original format, so you can (...) -
MediaSPIP Player : problèmes potentiels
22 février 2011, parLe lecteur ne fonctionne pas sur Internet Explorer
Sur Internet Explorer (8 et 7 au moins), le plugin utilise le lecteur Flash flowplayer pour lire vidéos et son. Si le lecteur ne semble pas fonctionner, cela peut venir de la configuration du mod_deflate d’Apache.
Si dans la configuration de ce module Apache vous avez une ligne qui ressemble à la suivante, essayez de la supprimer ou de la commenter pour voir si le lecteur fonctionne correctement : /** * GeSHi (C) 2004 - 2007 Nigel McNie, (...) -
Supporting all media types
13 avril 2011, parUnlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)
Sur d’autres sites (4912)
-
Blackmagic SDK - OpenGL Letterbox and FFMPEG
18 mars 2015, par Marco Reisacherso I left my Idea using the DirectShow filters due to lack of Support of my needed Video formats.
The native API uses OpenGL which i am a total beginner to.
I stumbled upon the following problems :-
How to automatically apply a letterbox or pillarbox depending on the width and height of the Frame that gets passed to OpenGL (I’m using bpctrlanchormap.h to autosize everything and i get squeezed/stretched images)
-
How to record a Video of the OpenGL stream (I looked around and saw that ffmpeg should be able to do so, but I can’t get it running.) Also what would be nice is Audio recording into the same file of a microphone
I’m using the Blackmagic "Capture Preview" sample.
This is the sourcecode that initialises the OpenGL renderer and passes the Frames
#include "stdafx.h"
#include <gl></gl>gl.h>
#include "PreviewWindow.h"
PreviewWindow::PreviewWindow()
: m_deckLinkScreenPreviewHelper(NULL), m_refCount(1), m_previewBox(NULL), m_previewBoxDC(NULL), m_openGLctx(NULL)
{}
PreviewWindow::~PreviewWindow()
{
if (m_deckLinkScreenPreviewHelper != NULL)
{
m_deckLinkScreenPreviewHelper->Release();
m_deckLinkScreenPreviewHelper = NULL;
}
if (m_openGLctx != NULL)
{
wglDeleteContext(m_openGLctx);
m_openGLctx = NULL;
}
if (m_previewBoxDC != NULL)
{
m_previewBox->ReleaseDC(m_previewBoxDC);
m_previewBoxDC = NULL;
}
}
bool PreviewWindow::init(CStatic *previewBox)
{
m_previewBox = previewBox;
// Create the DeckLink screen preview helper
if (CoCreateInstance(CLSID_CDeckLinkGLScreenPreviewHelper, NULL, CLSCTX_ALL, IID_IDeckLinkGLScreenPreviewHelper (void**)&m_deckLinkScreenPreviewHelper) != S_OK)
return false;
// Initialise OpenGL
return initOpenGL();
}
bool PreviewWindow::initOpenGL()
{
PIXELFORMATDESCRIPTOR pixelFormatDesc;
int pixelFormat;
bool result = false;
//
// Here, we create an OpenGL context attached to the screen preview box
// so we can use it later on when we need to draw preview frames.
// Get the preview box drawing context
m_previewBoxDC = m_previewBox->GetDC();
if (m_previewBoxDC == NULL)
return false;
// Ensure the preview box DC uses ARGB pixel format
ZeroMemory(&pixelFormatDesc, sizeof(pixelFormatDesc));
pixelFormatDesc.nSize = sizeof(pixelFormatDesc);
pixelFormatDesc.nVersion = 1;
pixelFormatDesc.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL;
pixelFormatDesc.iPixelType = PFD_TYPE_RGBA;
pixelFormatDesc.cColorBits = 32;
pixelFormatDesc.cDepthBits = 16;
pixelFormatDesc.cAlphaBits = 8;
pixelFormatDesc.iLayerType = PFD_MAIN_PLANE;
pixelFormat = ChoosePixelFormat(m_previewBoxDC->m_hDC, &pixelFormatDesc);
if (SetPixelFormat(m_previewBoxDC->m_hDC, pixelFormat, &pixelFormatDesc) == false)
return false;
// Create OpenGL rendering context
m_openGLctx = wglCreateContext(m_previewBoxDC->m_hDC);
if (m_openGLctx == NULL)
return false;
// Make the new OpenGL context the current rendering context so
// we can initialise the DeckLink preview helper
if (wglMakeCurrent(m_previewBoxDC->m_hDC, m_openGLctx) == FALSE)
return false;
if (m_deckLinkScreenPreviewHelper->InitializeGL() == S_OK)
result = true;
// Reset the OpenGL rendering context
wglMakeCurrent(NULL, NULL);
return result;
}
HRESULT PreviewWindow::DrawFrame(IDeckLinkVideoFrame* theFrame)
{
// Make sure we are initialised
if ((m_deckLinkScreenPreviewHelper == NULL) || (m_previewBoxDC == NULL) || (m_openGLctx == NULL))
return E_FAIL;
// First, pass the frame to the DeckLink screen preview helper
m_deckLinkScreenPreviewHelper->SetFrame(theFrame);
// Then set the OpenGL rendering context to the one we created before
wglMakeCurrent(m_previewBoxDC->m_hDC, m_openGLctx);
// and let the helper take care of the drawing
m_deckLinkScreenPreviewHelper->PaintGL();
// Last, reset the OpenGL rendering context
wglMakeCurrent(NULL, NULL);
return S_OK;
} -
-
"Non-monotonous DTS in output stream 0:0 This may result in incorrect timestamps in the output file." error
1er août 2019, par petaireI’m trying to go from .mkv to .mp4 through ffmpeg. Normally I would use
ffmpeg -i $1 -codec copy -strict -2 $2
But on this particular file, I get this error, like, A LOT :
Non-monotonous DTS in output stream 0:0; previous: 49189232, current: 49189232; changing to 49189233. This may result in incorrect timestamps in the output file.
I guess it has something to do with the DTS :
[mp4 @ 0x7f8b14001200] Invalid DTS: 14832 PTS: 13552 in output stream 0:0, replacing by guess
[mp4 @ 0x7f8b14001200] Invalid DTS: 15472 PTS: 12272 in output stream 0:0, replacing by guessI would not care if the result was ok, but the sound is out of sync, and the video is "stuttering". Feels like everything is out of sync.
I’ve tried a lot of things, including -async 1 -vsync 1 , but nothing seems to work.
Here’s some mediainfo :
Complete name : /Users/petaire/Desktop/CNEWS-2019-07-23_16-00-00h.mkv
Format : Matroska
Format version : Version 2
File size : 1.19 GiB
Movie name : Time-2019-07-23_16:00
Writing application : Tvheadend 4.3-1801~g7f952c2ed
Writing library : Tvheadend Matroska muxer
Original source form : TV
Comment : Time recording
IsTruncated : Yes
DATE_BROADCASTED : 2019-07-23 16:00:00
Video
ID : 1
Format : AVC
Format/Info : Advanced Video Codec
Format profile : High@L4
Format settings : CABAC / 4 Ref Frames
Format settings, CABAC : Yes
Format settings, ReFrames : 4 frames
Codec ID : V_MPEG4/ISO/AVC
Width : 1 920 pixels
Height : 1 080 pixels
Display aspect ratio : 16:9
Frame rate mode : Constant
Frame rate : 25.000 fps
Standard : Component
Color space : YUV
Chroma subsampling : 4:2:0
Bit depth : 8 bits
Scan type : MBAFF
Scan type, store method : Interleaved fields
Scan order : Top Field First
Language : English
Default : Yes
Forced : No
Color range : Limited
Color primaries : BT.709
Transfer characteristics : BT.709
Matrix coefficients : BT.709
Audio
ID : 2
Format : E-AC-3
Format/Info : Enhanced AC-3
Commercial name : Dolby Digital Plus
Codec ID : A_EAC3
Bit rate mode : Constant
Bit rate : 128 Kbps
Channel(s) : 2 channels
Channel layout : L R
Sampling rate : 48.0 KHz
Frame rate : 31.250 fps (1536 SPF)
Compression mode : Lossy
Delay relative to video : -757ms
Language : French
Service kind : Complete Main
Default : Yes
Forced : No
Text
ID : 3
Format : DVB Subtitle
Codec ID : S_DVBSUB
Codec ID/Info : Picture based subtitle format used on DVBs
Language : French
Default : Yes
Forced : NoAny idea ?
-
How AVCodecContext bitrate, framerate and timebase is used when encoding single frame
28 mars 2023, par CyrusI am trying to learn FFmpeg from examples as there is a tight schedule. The task is to encode a raw YUV image into JPEG format of the given width and height. I have found examples from ffmpeg official website, which turns out to be quite straight-forward. However there are some fields in AVCodecContext that I thought only makes sense when encoding videos(e.g. bitrate, framerate, timebase, gopsize, max_b_frames etc).


I understand on a high level what those values are when it comes to videos, but do I need to care about those when I just want a single image ? Currently for testing, I am just setting them as dummy values and it seems to work. But I want to make sure that I am not making terrible assumptions that will break in the long run.


EDIT :


Here is the code I got. Most of them are copy and paste from examples, with some changes to replace old APIs with newer ones.


#include "thumbnail.h"
#include "libavcodec/avcodec.h"
#include "libavutil/imgutils.h"
#include 
#include 
#include 

void print_averror(int error_code) {
 char err_msg[100] = {0};
 av_strerror(error_code, err_msg, 100);
 printf("Reason: %s\n", err_msg);
}

ffmpeg_status_t save_yuv_as_jpeg(uint8_t* source_buffer, char* output_thumbnail_filename, int thumbnail_width, int thumbnail_height) {
 const AVCodec* mjpeg_codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
 if (!mjpeg_codec) {
 printf("Codec for mjpeg cannot be found.\n");
 return FFMPEG_THUMBNAIL_CODEC_NOT_FOUND;
 }

 AVCodecContext* codec_ctx = avcodec_alloc_context3(mjpeg_codec);
 if (!codec_ctx) {
 printf("Codec context cannot be allocated for the given mjpeg codec.\n");
 return FFMPEG_THUMBNAIL_ALLOC_CONTEXT_FAILED;
 }

 AVPacket* pkt = av_packet_alloc();
 if (!pkt) {
 printf("Thumbnail packet cannot be allocated.\n");
 return FFMPEG_THUMBNAIL_ALLOC_PACKET_FAILED;
 }

 AVFrame* frame = av_frame_alloc();
 if (!frame) {
 printf("Thumbnail frame cannot be allocated.\n");
 return FFMPEG_THUMBNAIL_ALLOC_FRAME_FAILED;
 }

 // The part that I don't understand
 codec_ctx->bit_rate = 400000;
 codec_ctx->width = thumbnail_width;
 codec_ctx->height = thumbnail_height;
 codec_ctx->time_base = (AVRational){1, 25};
 codec_ctx->framerate = (AVRational){1, 25};

 codec_ctx->gop_size = 10;
 codec_ctx->max_b_frames = 1;
 codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
 int ret = av_image_fill_arrays(frame->data, frame->linesize, source_buffer, AV_PIX_FMT_YUV420P, thumbnail_width, thumbnail_height, 32);
 if (ret < 0) {
 print_averror(ret);
 printf("Pixel format: yuv420p, width: %d, height: %d\n", thumbnail_width, thumbnail_height);
 return FFMPEG_THUMBNAIL_FILL_FRAME_DATA_FAILED;
 }

 ret = avcodec_send_frame(codec_ctx, frame);
 if (ret < 0) {
 print_averror(ret);
 printf("Failed to send frame to encoder.\n");
 return FFMPEG_THUMBNAIL_FILL_SEND_FRAME_FAILED;
 }

 ret = avcodec_receive_packet(codec_ctx, pkt);
 if (ret < 0) {
 print_averror(ret);
 printf("Failed to receive packet from encoder.\n");
 return FFMPEG_THUMBNAIL_FILL_SEND_FRAME_FAILED;
 }

 // store the thumbnail in output
 int fd = open(output_thumbnail_filename, O_CREAT | O_RDWR);
 write(fd, pkt->data, pkt->size);
 close(fd);

 // freeing allocated structs
 avcodec_free_context(&codec_ctx);
 av_frame_free(&frame);
 av_packet_free(&pkt);
 return FFMPEG_SUCCESS;
}