
Recherche avancée
Médias (1)
-
Publier une image simplement
13 avril 2011, par ,
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (96)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela. -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...)
Sur d’autres sites (7229)
-
Images to video in FFMpeg from within java program
12 décembre 2017, par Cronch cAt the minute I am writing BufferedImages to a temp folder then calling ffmpeg to convert them into a video. Is there a way to send the images directly to ffmpeg from within java rather than writing them out first ? I am assuming this would be a faster way to do it if it’s possible ?
This is what I have which is currently working fine reading from a directory.
ProcessBuilder processBuilder = new ProcessBuilder(
"ffmpeg",
"-y",
"-r", framerate,
"-f", "image2",
"-i", imageFramesDir,
"-itsoffset", offset.toString(),
"-i", audioFile,
"-c:v", "libx264",
"-c:a", "aac",
"-pix_fmt", "yuv420p",
"-crf", "23",
"-r", "24",
"-vf", "scale=720x406,setdar=16:9",
outputFile);
processBuilder.redirectErrorStream(false);
Process process = processBuilder.start(); -
Get image output from python program and use ffmpeg push the real time video stream to web
26 mai 2018, par Nick TsengI have a question. How to push a real time video streaming after processing ?
First, below program is to preprocess image from usbcamera
Second, push the image into real time video streaming by another process (ffmpeg -r 29 -i test.avi http://ffserver IP:8090/camera.ffm)
cap = cv2.VideoCapture(1) ##activate usbcamera
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
ysize, xsize = frame_height , frame_width
filepath = 'test.avi'
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(filepath , fourcc , 29.0 , (frame_width , frame_height),True)
while(cap.isOpened()):
#read camera image
ret , frame = cap.read()
#color selection
tmp = color_select_test(frame)
# Mix with original image
tmp = weighted_img(tmp, frame, 0.5, 1.0, 0.0)
# write in video file
out.write(tmp)
#cv2.imwrite('test.avi',tmp)
cv2.imshow('FrameWrite' , tmp)
if(cv2.waitKey(1) == 27):
##cv2.imwrite('test_blue.jpg',frame)
break
cap.release()
cv2.destroyAllWindows() -
cuda invalid resource handle when using h264_cuvid decoder in my C program [closed]
29 juillet 2021, par ChrisFisherI try to use GPU acceleration in my ffmpeg decode program, and I check the codecs of ffmpeg by command :
ffmpeg -codecs | grep nv
, and it shows that I can use h264_cuvid decoder(In fact, I have already used ffmpeg command line to encode and decode a test video with hardware acceleration and it turned out everything was all fine), but when I use the decoder in my program

AVCodec *pCodec = avcodec_find_decoder_by_name("h264_cuvid");


here is part of my program


void FFMPEGCodec::initDecoder()
{
 AVCodec *pCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
 if (!pCodec) {
 LOG("Codec decoder not found\n");
 exit(1);
 }

 pCodecCtx = avcodec_alloc_context3(pCodec);
 if (!pCodecCtx) {
 LOG("Could not allocate video codec context\n");
 exit(1);
 }

 pCodecCtx->width = gConfig->totalWidth;
 pCodecCtx->height = gConfig->totalHeight;
 pCodecCtx->has_b_frames = 0;

 if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
 LOG("Could not open codec\n");
 exit(1);
 }

 if(_x264rgb){
 //used to convert GBRP frame to RGB image.
 convertCtx = sws_getContext(gConfig->totalWidth, gConfig->totalHeight, AV_PIX_FMT_GBRP, 
 gConfig->totalWidth, gConfig->totalHeight, AV_PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); 
 } else {
 //used to convert YUV frame to RGB image.
 convertCtx = sws_getContext(gConfig->totalWidth, gConfig->totalHeight, AV_PIX_FMT_YUV420P, 
 gConfig->totalWidth, gConfig->totalHeight, AV_PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); 
 }

 if(convertCtx == NULL){
 LOG("Failed to get SwsContext\n");
 exit(1);
 }

 //when using x264rgb, it's actually GBRP frame, 
 //just don't want to define another variable
 yuvFrame = av_frame_alloc();
 if (!yuvFrame) {
 LOG("Failed to allocate yuv frame\n");
 exit(1);
 }

 rgbFrame = av_frame_alloc();
 if (!rgbFrame) {
 LOG("Failed to allocate rgb frame\n");
 exit(1);
 }

 rgbFrame->format = AV_PIX_FMT_RGB24;
 rgbFrame->width = pCodecCtx->width;
 rgbFrame->height = pCodecCtx->height;
 
 int ret = av_image_alloc(rgbFrame->data, rgbFrame->linesize, rgbFrame->width, rgbFrame->height,
 AV_PIX_FMT_RGB24, 32);
 if (ret < 0) {
 LOG("Failed to allocate raw picture buffer\n");
 exit(1);
 }
} 



and


int FFMPEGCodec::decode(byte* pktData, int pktSize, byte* imgData)
{
 int ret = 0, got_packet = 0;
 AVPacket pkt;
 av_init_packet(&pkt);
 pkt.data = pktData;
 pkt.size = pktSize;

 // decode video frame
 ret = avcodec_decode_video2(pCodecCtx, yuvFrame, &got_packet, &pkt);
 if (ret < 0) {
 LOG("Error decoding frame\n");
 return -1;
 }

 sws_scale(convertCtx, yuvFrame->data, yuvFrame->linesize, 0, pCodecCtx->height, rgbFrame->data, rgbFrame->linesize);
 
 if (got_packet) {
 int width = pCodecCtx->width, height = pCodecCtx->height;
 int fsize = rgbFrame->linesize[0] * rgbFrame->height;
 int size = 54 + fsize;

 byte bmp_file_header[14] = { 'B', 'M', 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, 0, 0, };
 byte bmp_info_header[40] = { 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 24, 0, };
 byte bmp_pad[3] = { 0, 0, 0 };

 bmp_file_header[2] = (unsigned char)size;
 bmp_file_header[3] = (unsigned char)(size >> 8);
 bmp_file_header[4] = (unsigned char)(size >> 16);
 bmp_file_header[5] = (unsigned char)(size >> 24);

 bmp_info_header[4] = (unsigned char)(width);
 bmp_info_header[5] = (unsigned char)(width >> 8);
 bmp_info_header[6] = (unsigned char)(width >> 16);
 bmp_info_header[7] = (unsigned char)(width >> 24);
 bmp_info_header[8] = (unsigned char)(height);
 bmp_info_header[9] = (unsigned char)(height >> 8);
 bmp_info_header[10] = (unsigned char)(height >> 16);
 bmp_info_header[11] = (unsigned char)(height >> 24);

 memcpy(imgData, bmp_file_header, 14);
 memcpy(imgData + 14, bmp_info_header, 40);
 memcpy(imgData + 54, rgbFrame->data[0], fsize);
 ret = size;
 }
 av_free_packet(&pkt);

 return ret;
}




after compiling, I ran the program, and the decoder throw me a error :


ctx->cvdl->cuvidDecodePicture(ctx->cudecoder, picparams) failed -> CUDA_ERROR_INVALID_HANDLE: invalid resource handle

when calling the functionavcodec_decode_video2


I don't know why this error occurred, by the way, I use a GTX1060 6G(Sorry I'm not a native English speaker)