
Recherche avancée
Médias (91)
-
Géodiversité
9 septembre 2011, par ,
Mis à jour : Août 2018
Langue : français
Type : Texte
-
USGS Real-time Earthquakes
8 septembre 2011, par
Mis à jour : Septembre 2011
Langue : français
Type : Texte
-
SWFUpload Process
6 septembre 2011, par
Mis à jour : Septembre 2011
Langue : français
Type : Texte
-
La conservation du net art au musée. Les stratégies à l’œuvre
26 mai 2011
Mis à jour : Juillet 2013
Langue : français
Type : Texte
-
Podcasting Legal guide
16 mai 2011, par
Mis à jour : Mai 2011
Langue : English
Type : Texte
-
Creativecommons informational flyer
16 mai 2011, par
Mis à jour : Juillet 2013
Langue : English
Type : Texte
Autres articles (69)
-
Encoding and processing into web-friendly formats
13 avril 2011, parMediaSPIP automatically converts uploaded files to internet-compatible formats.
Video files are encoded in MP4, Ogv and WebM (supported by HTML5) and MP4 (supported by Flash).
Audio files are encoded in MP3 and Ogg (supported by HTML5) and MP3 (supported by Flash).
Where possible, text is analyzed in order to retrieve the data needed for search engine detection, and then exported as a series of image files.
All uploaded files are stored online in their original format, so you can (...) -
MediaSPIP Player : problèmes potentiels
22 février 2011, parLe lecteur ne fonctionne pas sur Internet Explorer
Sur Internet Explorer (8 et 7 au moins), le plugin utilise le lecteur Flash flowplayer pour lire vidéos et son. Si le lecteur ne semble pas fonctionner, cela peut venir de la configuration du mod_deflate d’Apache.
Si dans la configuration de ce module Apache vous avez une ligne qui ressemble à la suivante, essayez de la supprimer ou de la commenter pour voir si le lecteur fonctionne correctement : /** * GeSHi (C) 2004 - 2007 Nigel McNie, (...) -
Supporting all media types
13 avril 2011, parUnlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)
Sur d’autres sites (4715)
-
Error submitting the frame for encoding when submitting NV12 texture
6 mai 2022, par Caio AugustoI'm trying to encode D3D11 NV12 Texture on QSV encoder but getting [h264_qsv @ 00000244ce6f50c0] Error submitting the frame for encoding.


Main :


int width = 1920;
int height = 1080;
FILE* outfile;

fopen_s(&outfile, "D:\\Sources\\D3D11QSV\\x64\\Debug\\outfile.264", "wb");

const AVCodec* codec = avcodec_find_encoder_by_name("h264_qsv");
AVCodecContext* ctx = avcodec_alloc_context3(codec);

ctx->width = width;
ctx->height = height;
ctx->time_base = AVRational{ 1, 60 };
ctx->framerate = AVRational{ 60, 1 };
ctx->slices = 1;

ctx->sw_pix_fmt = AV_PIX_FMT_NV12;
ctx->pix_fmt = AV_PIX_FMT_NV12;
ctx->bit_rate = 400000;
ctx->gop_size = 10;
ctx->max_b_frames = 1;

auto status = avcodec_open2(ctx, codec, NULL);
if (status < 0) {
 std::cout << "Open codec error!\n";
}

AVFrame* sw_frame = av_frame_alloc();
sw_frame->format = ctx->sw_pix_fmt;
sw_frame->width = ctx->width;
sw_frame->height = ctx->height;
status = av_frame_get_buffer(sw_frame, 0);

fill_frame(sw_frame, ctx);



Filling the frame :


auto ret = 0;

if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data\n");
 exit(1);
}

int i, y, x, c = 0;
for (i = 0; i < 60; i++) {
 fflush(stdout);

 ret = av_frame_make_writable(frame);
 
 auto texture = create_texture();
 auto desc = (AVD3D11FrameDescriptor*)frame->buf[0]->data;
 desc->texture = (ID3D11Texture2D*)texture;
 desc->index = 0;

 frame->data[0] = (std::uint8_t*)texture;
 frame->data[1] = 0;
 frame->linesize[0] = width * 4;

 frame->pts = i;

 encode(frame, ctx);
}



Creating Texture :


D3D11_TEXTURE2D_DESC const desc = CD3D11_TEXTURE2D_DESC(
 DXGI_FORMAT_NV12, // HoloLens PV camera format, common for video sources
 width, // Width of the video frames
 height, // Height of the video frames
 1, // Number of textures in the array
 1, // Number of miplevels in each texture
 D3D11_BIND_SHADER_RESOURCE, // We read from this texture in the shader
 D3D11_USAGE_DYNAMIC, // Because we'll be copying from CPU memory
 D3D11_CPU_ACCESS_WRITE // We only need to write into the texture
);

ID3D11Device* pd3dDevice = create_d3d11_device();

ID3D11Texture2D* pTexture = NULL;
HRESULT err = pd3dDevice->CreateTexture2D(&desc, nullptr, &pTexture);


if (SUCCEEDED(err)) {
 D3D11_SHADER_RESOURCE_VIEW_DESC SRVDesc = CD3D11_SHADER_RESOURCE_VIEW_DESC(
 pTexture,
 D3D11_SRV_DIMENSION_TEXTURE2D,
 DXGI_FORMAT_R8_UNORM
 );

 ID3D11ShaderResourceView* texSRV = NULL;

 err = pd3dDevice->CreateShaderResourceView(pTexture,
 &SRVDesc, &texSRV);

 D3D11_SHADER_RESOURCE_VIEW_DESC const chrominancePlaneDesc = CD3D11_SHADER_RESOURCE_VIEW_DESC(
 pTexture,
 D3D11_SRV_DIMENSION_TEXTURE2D,
 DXGI_FORMAT_R8G8_UNORM
 );

 ID3D11ShaderResourceView* m_chrominanceView = NULL;

 err = pd3dDevice->CreateShaderResourceView(pTexture,
 &chrominancePlaneDesc, &m_chrominanceView);

}

if (FAILED(err))
{
 fprintf(stderr, "Error creating texture\n");
 exit(1);
}

return pTexture;



Creating D3D11 device :


ID3D11Device* dev11 = NULL;
 ID3D11DeviceContext* devcon11 = NULL;

 D3D_FEATURE_LEVEL featureLevels[]{
 D3D_FEATURE_LEVEL_11_1,
 D3D_FEATURE_LEVEL_11_0,
 D3D_FEATURE_LEVEL_10_1,
 D3D_FEATURE_LEVEL_10_0,
 D3D_FEATURE_LEVEL_9_3,
 D3D_FEATURE_LEVEL_9_2,
 D3D_FEATURE_LEVEL_9_1
 };


 int err = D3D11CreateDevice(
 nullptr,
 D3D_DRIVER_TYPE_HARDWARE,
 nullptr,
 D3D11_CREATE_DEVICE_VIDEO_SUPPORT,
 featureLevels, sizeof(featureLevels) / sizeof(D3D_FEATURE_LEVEL),
 D3D11_SDK_VERSION,
 &dev11,
 nullptr,
 &devcon11);

 return dev11;



Encoding :


auto status = 0;

status = avcodec_send_frame(ctx, frame); //error happening here

AVPacket* pkt;

pkt = av_packet_alloc();

if (status < 0) {
 fprintf(stderr, "Error sending a frame for encoding\n");
 exit(1);
}

while (status >= 0) {
 status = avcodec_receive_packet(ctx, pkt);
 if (status == AVERROR(EAGAIN) || status == AVERROR_EOF)
 return;
 else if (status < 0) {
 fprintf(stderr, "Error during encoding\n");
 exit(1);
 }

 printf("Write packet \n", pkt->pts, pkt->size);
 fwrite(pkt->data, 1, pkt->size, outfile);
 av_packet_unref(pkt);
}



Everything runs well until encoding the frame. I have tried sending a dummy nv12 data (not a d3d11 texture) and it works well.


-
How to Transcode ALL Audio streams from input to output using ffmpeg ?
24 novembre 2022, par user1940163I have an input MPEG TS file 'unit_test.ts'. This file has following content (shown by ffprobe) :


Input #0, mpegts, from 'unit_test.ts':
 Duration: 00:00:57.23, start: 73674.049844, bitrate: 2401 kb/s
 Program 1
 Metadata:
 service_name : Service01
 service_provider: FFmpeg
 Stream #0:0[0x31]: Video: h264 (Main) ([27][0][0][0] / 0x001B), yuv420p(progressive), 852x480 [SAR 640:639 DAR 16:9], Closed Captions, 59.94 fps, 59.94 tbr, 90k tbn, 119.88 tbc
 Stream #0:1[0x34](eng): Audio: ac3 ([129][0][0][0] / 0x0081), 48000 Hz, 5.1(side), fltp, 448 kb/s
 Stream #0:2[0x35](spa): Audio: ac3 ([129][0][0][0] / 0x0081), 48000 Hz, stereo, fltp, 192 kb/s



I want to convert it into another MPEG TS file. Requirement is that the Video stream of the input should be directly copied to the output whereas ALL the audio streams should be transcoded "aac" format.


I tried this command :


ffmpeg -i unit_test.ts -map 0 -c copy -c:a aac maud_test.ts


It converted it into 'maud_test.ts' with following contents (shown by ffprobe)


Input #0, mpegts, from 'maud_test.ts':
 Duration: 00:00:57.25, start: 1.400000, bitrate: 2211 kb/s
 Program 1
 Metadata:
 service_name : Service01
 service_provider: FFmpeg
 Stream #0:0[0x100]: Video: h264 (Main) ([27][0][0][0] / 0x001B), yuv420p(progressive), 852x480 [SAR 640:639 DAR 16:9], Closed Captions, 59.94 fps, 59.94 tbr, 90k tbn, 119.88 tbc
 Stream #0:1[0x101](eng): Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, 6 channels, fltp, 391 kb/s
 Stream #0:2[0x102](spa): Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 133 kb/s



So it appeared as if the command worked....However when I play the maud_test.ts file in vlc player I can see both audio streams listed in the menu ; but Stream 1 (eng) remains silent............whereas Stream 2 (spa) has proper audio. (Original TS file has both audio streams properly audible)


I have tried this with different input files and have seen that same problem occurs in each case.


What that I am doing is not right ?


How should I get this done ? (I can write explicit stream by stream map and channel arguments to get that done ; however I want the command line to be generic, in that the input file could be having any configuration with one Video and several Audios with different formats. The configuration will not be known beforehand.)


-
How to convert from GL_RGB to AVFrame
11 avril 2022, par benny bFor my project I need to convert a RGB (
GL_RGB
) Image generated byglReadPixels
into aAVFrame
. I've googled it and found just examples of the other way around. But in this case I need fromGL_RGB
toAVFrame
.

Here's my code :


Here's how I set my codec :


/* Allocate resources and write header data to the output file. */
void ffmpeg_encoder_start(AVCodecID codec_id, int fps, int width, int height) {
 const AVCodec *codec;
 int ret;
 codec = avcodec_find_encoder(codec_id);
 if (!codec ) {
 std::cerr << "Codec not found" << std::endl;
 exit(1);
 }
 c = avcodec_alloc_context3(codec);
 if (!c) {
 std::cerr << "Could not allocate video codec context" << std::endl;
 exit(1);
 }
 c->bit_rate = 400000;
 c->width = width;
 c->height = height;
 c->time_base.num = 1;
 c->time_base.den = fps;
 c->keyint_min = 600;
 c->pix_fmt = AV_PIX_FMT_YUV420P;
 if (avcodec_open2(c, codec, NULL) < 0) {
 std::cerr << "Could not open codec" << std::endl;
 exit(1);
 }
 frame = av_frame_alloc();
 if (!frame) {
 std::cerr << "Could not allocate video frame" << std::endl;
 exit(1);
 }
 frame->format = c->pix_fmt;
 frame->width = c->width;
 frame->height = c->height;
 ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
 if (ret < 0) {
 std::cerr << "Could not allocate raw picture buffer" << std::endl;
 exit(1);
 }
}



Fetching the pixels and setting the new frame :


BYTE* pixels = new BYTE[3 * DEFAULT_MONITOR.maxResolution.width * DEFAULT_MONITOR.maxResolution.height];

glReadPixels(0, 0, DEFAULT_MONITOR.maxResolution.width, DEFAULT_MONITOR.maxResolution.height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
screenSrc->setNextFrame(pixels, DEFAULT_MONITOR.maxResolution.width, DEFAULT_MONITOR.maxResolution.height);



And the function that I have for the conversion :


static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
 const int in_linesize[1] = { 3 * c->width };
 sws_context = sws_getCachedContext(sws_context,
 c->width, c->height, AV_PIX_FMT_RGB24,
 c->width, c->height, AV_PIX_FMT_YUV420P,
 0, 0, 0, 0);
 sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
 c->height, frame->data, frame->linesize);
}



All the code can be found here
Here is the line that results in segmentation fault.


Unfortunately, the function gives me a segmentation fault. Do you have an idea how to solve this problem ?