
Recherche avancée
Autres articles (28)
-
La file d’attente de SPIPmotion
28 novembre 2010, parUne file d’attente stockée dans la base de donnée
Lors de son installation, SPIPmotion crée une nouvelle table dans la base de donnée intitulée spip_spipmotion_attentes.
Cette nouvelle table est constituée des champs suivants : id_spipmotion_attente, l’identifiant numérique unique de la tâche à traiter ; id_document, l’identifiant numérique du document original à encoder ; id_objet l’identifiant unique de l’objet auquel le document encodé devra être attaché automatiquement ; objet, le type d’objet auquel (...) -
Contribute to documentation
13 avril 2011Documentation is vital to the development of improved technical capabilities.
MediaSPIP welcomes documentation by users as well as developers - including : critique of existing features and functions articles contributed by developers, administrators, content producers and editors screenshots to illustrate the above translations of existing documentation into other languages
To contribute, register to the project users’ mailing (...) -
Gestion des droits de création et d’édition des objets
8 février 2011, parPar défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;
Sur d’autres sites (4898)
-
Need help about noise in my video audio after decoding with ffmpeg in c++ (Tutorial from http://dranger.com/ffmpeg/)
13 février 2019, par Abe JahwinI just read and implemented a tutorial about
ffmpeg
on (http://dranger.com/ffmpeg).Then after implementing the tutorial 03 and 04 about playing sound, the program was running nice but the output of sound is bad (there is too much noise with the real audio). I do not know where the problem is.
But I tried to change the audio format but still the same.
#include <iostream>
#include <vector>
// FFmpeg
extern "C" {
#include
#include
#include
#undef main
#include
#include <libavformat></libavformat>avformat.h>
#include <libavcodec></libavcodec>avcodec.h>
#include <libavutil></libavutil>avutil.h>
#include <libavutil></libavutil>pixdesc.h>
#include <libswscale></libswscale>swscale.h>
}
// compatibility with newer API
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif
#define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIO_FRAME_SIZE 192000
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
#define FF_REFRESH_EVENT (SDL_USEREVENT)
#define FF_QUIT_EVENT (SDL_USEREVENT + 1)
#define VIDEO_PICTURE_QUEUE_SIZE 1
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
typedef struct VideoPicture {
SDL_Overlay *bmp;
int width, height; /* source height & width */
int allocated;
} VideoPicture;
typedef struct VideoState {
AVFormatContext *pFormatCtx = nullptr;
int videoStream, audioStream;
AVStream *audio_st;
AVCodecContext *audio_ctx;
PacketQueue audioq;
uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
unsigned int audio_buf_size;
unsigned int audio_buf_index;
AVFrame audio_frame;
AVPacket audio_pkt;
uint8_t *audio_pkt_data;
int audio_pkt_size;
AVStream *video_st;
AVCodecContext *video_ctx = nullptr;
PacketQueue videoq;
struct SwsContext *sws_ctx;
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
SDL_cond *pictq_cond;
SDL_Thread *parse_tid;
SDL_Thread *video_tid;
char filename[1024];
int quit;
} VideoState;
SDL_Surface *screen;
SDL_mutex *screen_mutex;
VideoState *global_video_state;
void packet_queue_init(PacketQueue *q) {
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
AVPacketList *pkt1;
if (av_dup_packet(pkt) < 0) {
return -1;
}
pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for (;;) {
if (global_video_state->quit) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
}
else if (!block) {
ret = 0;
break;
}
else {
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size) {
int len1, data_size = 0;
AVPacket *pkt = &is->audio_pkt;
for (;;) {
while (is->audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4(is->audio_ctx, &is->audio_frame, &got_frame, pkt);
if (len1 < 0) {
/* if error, skip frame */
is->audio_pkt_size = 0;
break;
}
data_size = 0;
if (got_frame) {
data_size = av_samples_get_buffer_size(NULL,
is->audio_ctx->channels,
is->audio_frame.nb_samples,
is->audio_ctx->sample_fmt,
1);
assert(data_size <= buf_size);
memcpy(audio_buf, is->audio_frame.data[0], data_size);
}
is->audio_pkt_data += len1;
is->audio_pkt_size -= len1;
if (data_size <= 0) {
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
return data_size;
}
if (pkt->data)
av_free_packet(pkt);
if (is->quit) {
return -1;
}
/* next packet */
if (packet_queue_get(&is->audioq, pkt, 1) < 0) {
return -1;
}
is->audio_pkt_data = pkt->data;
is->audio_pkt_size = pkt->size;
}
}
void audio_callback(void *userdata, Uint8 *stream, int len) {
VideoState *is = (VideoState *)userdata;
int len1, audio_size;
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
/* We have already sent all our data; get more */
audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf));
if (audio_size < 0) {
/* If error, output silence */
is->audio_buf_size = 1024;
memset(is->audio_buf, 0, is->audio_buf_size);
}
else {
is->audio_buf_size = audio_size;
}
is->audio_buf_index = 0;
}
len1 = is->audio_buf_size - is->audio_buf_index;
if (len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
}
int decode_interrupt_cb(void) {
return (global_video_state && global_video_state->quit);
}
void alloc_picture(void *userdata) {
VideoState *is = (VideoState *)userdata;
VideoPicture *vp;
vp = &is->pictq[is->pictq_windex];
if (vp->bmp) {
// we already have one make another, bigger/smaller
SDL_FreeYUVOverlay(vp->bmp);
}
// Allocate a place to put our YUV image on that screen
SDL_LockMutex(screen_mutex);
vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
is->video_st->codec->height,
SDL_YV12_OVERLAY,
screen);
SDL_UnlockMutex(screen_mutex);
vp->width = is->video_st->codec->width;
vp->height = is->video_st->codec->height;
vp->allocated = 1;
}
int queue_picture(VideoState *is, AVFrame *pFrame) {
VideoPicture *vp;
int dst_pix_fmt;
AVPicture pict;
/* wait until we have space for a new pic */
SDL_LockMutex(is->pictq_mutex);
while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
!is->quit) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
if (is->quit)
return -1;
// windex is set to 0 initially
vp = &is->pictq[is->pictq_windex];
/* allocate or resize the buffer! */
if (!vp->bmp ||
vp->width != is->video_st->codec->width ||
vp->height != is->video_st->codec->height) {
SDL_Event event;
vp->allocated = 0;
alloc_picture(is);
if (is->quit) {
return -1;
}
}
if (vp->bmp) {
SDL_LockYUVOverlay(vp->bmp);
dst_pix_fmt = AV_PIX_FMT_YUV420P;
/* point pict at the queue */
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
// Convert the image into YUV format that SDL uses
sws_scale(is->sws_ctx, (uint8_t const * const *)pFrame->data,
pFrame->linesize, 0, is->video_st->codec->height,
pict.data, pict.linesize);
SDL_UnlockYUVOverlay(vp->bmp);
/* now we inform our display thread that we have a pic ready */
if (+Š>pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
is->pictq_windex = 0;
}
SDL_LockMutex(is->pictq_mutex);
is->pictq_size++;
SDL_UnlockMutex(is->pictq_mutex);
}
return 0;
}
int video_thread(void *arg) {
VideoState *is = (VideoState *)arg;
AVPacket pkt1, packet;
int frameFinished;
AVFrame *pFrame = av_frame_alloc();
for (;;) {
if (packet_queue_get(&is->videoq, &packet, 1) < 0) {
// means we quit getting packets
break;
}
// Decode video frame
avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished) {
if (queue_picture(is, pFrame) < 0) {
break;
}
}
av_free_packet(&packet);
}
av_free(pFrame);
return 0;
}
int stream_component_open(VideoState *is, int stream_index) {
AVFormatContext *pFormatCtx = is->pFormatCtx;
AVCodecContext *codecCtx = nullptr;
AVCodec *codec = nullptr;
SDL_AudioSpec wanted_spec, spec;
AVCodecParameters *CodecParameters = NULL;
if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
return -1;
}
CodecParameters = pFormatCtx->streams[stream_index]->codecpar;
codecCtx = pFormatCtx->streams[stream_index]->codec;
codec = avcodec_find_decoder(codecCtx->codec_id);
if (!codec) {
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
codecCtx = avcodec_alloc_context3(codec);
if (avcodec_parameters_to_context(codecCtx, CodecParameters) < 0)
{
fprintf(stderr, "Couldn't copy codec context");
return -1;
}
if (codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {
// Set audio settings from codec info
wanted_spec.freq = codecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = codecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = is;
if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
return -1;
}
}
if (avcodec_open2(codecCtx, codec, NULL) < 0) {
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
switch (codecCtx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
is->audioStream = stream_index;
is->audio_st = pFormatCtx->streams[stream_index];
is->audio_ctx = codecCtx;
is->audio_buf_size = 0;
is->audio_buf_index = 0;
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
packet_queue_init(&is->audioq);
SDL_PauseAudio(0);
break;
case AVMEDIA_TYPE_VIDEO:
is->videoStream = stream_index;
is->video_st = pFormatCtx->streams[stream_index];
is->video_ctx = codecCtx;
packet_queue_init(&is->videoq);
is->video_tid = SDL_CreateThread(video_thread, is);
is->sws_ctx = sws_getContext(is->video_st->codec->width, is->video_st->codec->height,
is->video_st->codec->pix_fmt, is->video_st->codec->width,
is->video_st->codec->height, AV_PIX_FMT_YUV420P,
SWS_BILINEAR, NULL, NULL, NULL
);
break;
default:
break;
}
}
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {
SDL_Event event;
event.type = FF_REFRESH_EVENT;
event.user.data1 = opaque;
SDL_PushEvent(&event);
return 0; /* 0 means stop timer */
}
/* schedule a video refresh in 'delay' ms */
static void schedule_refresh(VideoState *is, int delay) {
SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
}
void video_display(VideoState *is) {
SDL_Rect rect;
VideoPicture *vp;
float aspect_ratio;
int w, h, x, y;
int i;
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
if (is->video_st->codec->sample_aspect_ratio.num == 0) {
aspect_ratio = 0;
}
else {
aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) *
is->video_st->codec->width / is->video_st->codec->height;
}
if (aspect_ratio <= 0.0) {
aspect_ratio = (float)is->video_st->codec->width /
(float)is->video_st->codec->height;
}
h = screen->h;
w = ((int)rint(h * aspect_ratio)) & -3;
if (w > screen->w) {
w = screen->w;
h = ((int)rint(w / aspect_ratio)) & -3;
}
x = (screen->w - w) / 2;
y = (screen->h - h) / 2;
rect.x = x;
rect.y = y;
rect.w = w;
rect.h = h;
SDL_LockMutex(screen_mutex);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
SDL_UnlockMutex(screen_mutex);
}
}
void video_refresh_timer(void *userdata) {
VideoState *is = (VideoState *)userdata;
VideoPicture *vp;
if (is->video_st) {
if (is->pictq_size == 0) {
schedule_refresh(is, 1);
}
else {
vp = &is->pictq[is->pictq_rindex];
/* Timing code goes here */
schedule_refresh(is, 80);
/* show the picture! */
video_display(is);
/* update queue for next picture! */
if (+Š>pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {
is->pictq_rindex = 0;
}
SDL_LockMutex(is->pictq_mutex);
is->pictq_size--;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
}
else {
schedule_refresh(is, 100);
}
}
int decode_thread(void *arg) {
VideoState *is = (VideoState *)arg;
AVFormatContext* pFormatCtx = avformat_alloc_context();
AVPacket pkt1, *packet = &pkt1;
int video_index = -1;
int audio_index = -1;
int i;
int ret;
is->videoStream = -1;
is->audioStream = -1;
global_video_state = is;
ret = avformat_open_input(&pFormatCtx, is->filename, nullptr, nullptr);
// Open video file
if (ret != 0)
printf("Failed to open the file");
//return -1; // Couldn't open file
is->pFormatCtx = pFormatCtx;
// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, is->filename, 0);
// Find the first video stream
for (i = 0; inb_streams; i++) {
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
video_index < 0) {
video_index = i;
}
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
audio_index < 0) {
audio_index = i;
}
}
if (video_index >= 0) {
stream_component_open(is, video_index);
}
if (audio_index >= 0) {
stream_component_open(is, audio_index);
}
if (is->videoStream < 0) {
fprintf(stderr, "%s: could not open Video codecs\n", is->filename);
system("pause");
goto fail;
}
if (is->audioStream < 0) {
fprintf(stderr, "%s: could not open Audio codecs\n", is->filename);
system("pause");
goto fail;
}
// main decode loop
for (;;) {
if (is->quit) {
break;
}
// seek stuff goes here
if (is->audioq.size > MAX_AUDIOQ_SIZE ||
is->videoq.size > MAX_VIDEOQ_SIZE) {
SDL_Delay(10);
continue;
}
if (av_read_frame(is->pFormatCtx, packet) < 0) {
if (is->pFormatCtx->pb->error == 0) {
SDL_Delay(100); /* no error; wait for user input */
continue;
}
else {
break;
}
}
// Is this a packet from the video stream?
if (packet->stream_index == is->videoStream) {
packet_queue_put(&is->videoq, packet);
}
else if (packet->stream_index == is->audioStream) {
packet_queue_put(&is->audioq, packet);
}
else {
av_free_packet(packet);
}
}
/* all done - wait for it */
while (!is->quit) {
SDL_Delay(100);
}
fail:
if (1) {
SDL_Event event;
event.type = FF_QUIT_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
}
return 0;
}
int main() {
SDL_Event event;
VideoState *is;
is = (VideoState *)av_mallocz(sizeof(VideoState));
// Register all formats and codecs
av_register_all();
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
// Make a screen to put our video
#ifndef __DARWIN__
screen = SDL_SetVideoMode(640, 480, 0, 0);
#else
screen = SDL_SetVideoMode(640, 480, 24, 0);
#endif
if (!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
screen_mutex = SDL_CreateMutex();
strncpy(is->filename, "C:/vidoefile.mp4", sizeof(is->filename));
is->pictq_mutex = SDL_CreateMutex();
is->pictq_cond = SDL_CreateCond();
schedule_refresh(is, 40);
is->parse_tid = SDL_CreateThread(decode_thread, is);
if (!is->parse_tid) {
av_free(is);
return -1;
}
for (;;) {
SDL_WaitEvent(&event);
switch (event.type) {
case FF_QUIT_EVENT:
case SDL_QUIT:
is->quit = 1;
SDL_Quit();
return 0;
break;
case FF_REFRESH_EVENT:
video_refresh_timer(event.user.data1);
break;
default:
break;
}
}
printf("Done playing video\n");
system("pause");
return 0;
}
</vector></iostream>I just need the sound to be played in normal way without noise.
-
How to check live stream is still alive use "ffprobe" command ?
17 août 2018, par Yin JianFengI want to schedule a job script to check a live stream is still alive use "ffprobe" command. So that I can change database state for those steam already dead.
I tried the command :
ffprobe -v quiet -print_format json -show_streams rtmp://xxxx
but when the stream is not avaiable, the command will hang.
I tried add -timeout argument, but still cannot work properly. -
Merge commit ’803d21f38bdafe7c4e2571a9ae7052013a12923b’
3 mai 2013, par Michael NiedermayerMerge commit ’803d21f38bdafe7c4e2571a9ae7052013a12923b’
* commit ’803d21f38bdafe7c4e2571a9ae7052013a12923b’ :
lavc : schedule AVCodecContext.lowres for removal on next bump.Conflicts :
libavcodec/avcodec.hMerged-by : Michael Niedermayer <michaelni@gmx.at>