
Recherche avancée
Autres articles (57)
-
Mise à jour de la version 0.1 vers 0.2
24 juin 2013, parExplications des différents changements notables lors du passage de la version 0.1 de MediaSPIP à la version 0.3. Quelles sont les nouveautés
Au niveau des dépendances logicielles Utilisation des dernières versions de FFMpeg (>= v1.2.1) ; Installation des dépendances pour Smush ; Installation de MediaInfo et FFprobe pour la récupération des métadonnées ; On n’utilise plus ffmpeg2theora ; On n’installe plus flvtool2 au profit de flvtool++ ; On n’installe plus ffmpeg-php qui n’est plus maintenu au (...) -
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...)
Sur d’autres sites (8840)
-
How to explain the given ffplay C code snippet ?
20 juillet 2015, par Jerikc XIONGThe following code snippet is from ffplay :
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
int got_frame = 0;
do {
int ret = -1;
if (d->queue->abort_request)
return -1;
if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
AVPacket pkt;
do {
if (d->queue->nb_packets == 0)
SDL_CondSignal(d->empty_queue_cond);
if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
return -1;
if (pkt.data == flush_pkt.data) {
avcodec_flush_buffers(d->avctx);
d->finished = 0;
d->next_pts = d->start_pts;
d->next_pts_tb = d->start_pts_tb;
}
} while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
av_free_packet(&d->pkt);
d->pkt_temp = d->pkt = pkt;
d->packet_pending = 1;
}
switch (d->avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
if (decoder_reorder_pts == -1) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
} else if (decoder_reorder_pts) {
frame->pts = frame->pkt_pts;
} else {
frame->pts = frame->pkt_dts;
}
}
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
AVRational tb = (AVRational){1, frame->sample_rate};
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
else if (frame->pkt_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
else if (d->next_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
if (frame->pts != AV_NOPTS_VALUE) {
d->next_pts = frame->pts + frame->nb_samples;
d->next_pts_tb = tb;
}
}
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
break;
}
if (ret < 0) {
d->packet_pending = 0;
} else {
d->pkt_temp.dts =
d->pkt_temp.pts = AV_NOPTS_VALUE;
if (d->pkt_temp.data) {
if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
ret = d->pkt_temp.size;
d->pkt_temp.data += ret;
d->pkt_temp.size -= ret;
if (d->pkt_temp.size <= 0)
d->packet_pending = 0;
} else {
if (!got_frame) {
d->packet_pending = 0;
d->finished = d->pkt_serial; // FLAG
}
}
}
} while (!got_frame && !d->finished);
return got_frame;
}It’s difficult for me to understand the following code :
d->finished = d->pkt_serial; // FLAG
Can anyone help me ?
Thanks.
-
How to understand the given ffplay C code snippet ?
20 juillet 2015, par Jerikc XIONGThe following code snippet is from ffplay :
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
int got_frame = 0;
do {
int ret = -1;
if (d->queue->abort_request)
return -1;
if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
AVPacket pkt;
do {
if (d->queue->nb_packets == 0)
SDL_CondSignal(d->empty_queue_cond);
if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
return -1;
if (pkt.data == flush_pkt.data) {
avcodec_flush_buffers(d->avctx);
d->finished = 0;
d->next_pts = d->start_pts;
d->next_pts_tb = d->start_pts_tb;
}
} while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
av_free_packet(&d->pkt);
d->pkt_temp = d->pkt = pkt;
d->packet_pending = 1;
}
switch (d->avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
if (decoder_reorder_pts == -1) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
} else if (decoder_reorder_pts) {
frame->pts = frame->pkt_pts;
} else {
frame->pts = frame->pkt_dts;
}
}
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
AVRational tb = (AVRational){1, frame->sample_rate};
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
else if (frame->pkt_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
else if (d->next_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
if (frame->pts != AV_NOPTS_VALUE) {
d->next_pts = frame->pts + frame->nb_samples;
d->next_pts_tb = tb;
}
}
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
break;
}
if (ret < 0) {
d->packet_pending = 0;
} else {
d->pkt_temp.dts =
d->pkt_temp.pts = AV_NOPTS_VALUE;
if (d->pkt_temp.data) {
if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
ret = d->pkt_temp.size;
d->pkt_temp.data += ret;
d->pkt_temp.size -= ret;
if (d->pkt_temp.size <= 0)
d->packet_pending = 0;
} else {
if (!got_frame) {
d->packet_pending = 0;
d->finished = d->pkt_serial; // FLAG
}
}
}
} while (!got_frame && !d->finished);
return got_frame;
}It’s difficult for me to understand the following code :
d->finished = d->pkt_serial; // FLAG
Can anyone help me ?
Thanks.
-
IplImage crop and rotate - Android
26 février 2015, par GaneshI’m using ffmpeg to video capture for 30 seconds.
@Override
public void onPreviewFrame(byte[] data, Camera camera) {
if (yuvIplimage != null && recording && rec)
{
new SaveFrame().execute(data);
}
}
}save frame class is below
private class SaveFrame extends AsyncTask {
long t;
protected File doInBackground(byte[]... arg) {
t = 1000 * (System.currentTimeMillis() - firstTime - pausedTime);
toSaveFrames++;
File pathCache = new File(Environment.getExternalStorageDirectory()+"/DCIM", (System.currentTimeMillis() / 1000L)+ "_" + toSaveFrames + ".tmp");
BufferedOutputStream bos;
try {
bos = new BufferedOutputStream(new FileOutputStream(pathCache));
bos.write(arg[0]);
bos.flush();
bos.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
pathCache = null;
toSaveFrames--;
} catch (IOException e) {
e.printStackTrace();
pathCache = null;
toSaveFrames--;
}
return pathCache;
}
@Override
protected void onPostExecute(File filename)
{
if(filename!=null)
{
savedFrames++;
tempList.add(new FileFrame(t,filename));
}
}
}finally i add all frames with crop and rotation
private class AddFrame extends AsyncTask {
private int serial = 0;
@Override
protected Void doInBackground(Void... params) {
for(int i=0; i/ final int startY = 640*(480-480)/2;
// final int lenY = 640*480;
// yuvIplimage.getByteBuffer().put(bytes, startY, lenY);
// final int startVU = 640*480+ 640*(480-480)/4;
// final int lenVU = 640* 480/2;
// yuvIplimage.getByteBuffer().put(bytes, startVU, lenVU);
if (tempList.get(i).time > recorder.getTimestamp()) {
recorder.setTimestamp(tempList.get(i).time);
}
image = cropImage(image);
image = rotate(image, 270);
// image = rotateImage(image);
recorder.record(image);
Log.i(LOG_TAG, "record " + i);
image = null;
serial++;
publishProgress(serial);
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} catch (com.googlecode.javacv.FrameRecorder.Exception e) {
e.printStackTrace();
}
}
return null;
}
@Override
protected void onProgressUpdate(Integer... serial) {
int value = serial[0];
creatingProgress.setProgress(value);
}
@Override
protected void onPostExecute(Void v)
{
creatingProgress.dismiss();
if (recorder != null && recording) {
recording = false;
Log.v(LOG_TAG,"Finishing recording, calling stop and release on recorder");
try {
recorder.stop();
recorder.release();
finish();
startActivity(new Intent(RecordActivity.this,AnswerViewActivity.class));
} catch (FFmpegFrameRecorder.Exception e) {
e.printStackTrace();
}
recorder = null;
}
}
}my crop and rotate method are below
private IplImage cropImage(IplImage src)
{
cvSetImageROI(src, r);
IplImage cropped = IplImage.create(imageHeight, imageHeight, IPL_DEPTH_8U, 2);
cvCopy(src, cropped);
return cropped;
}
public static IplImage rotate(IplImage image, double angle) {
IplImage copy = opencv_core.cvCloneImage(image);
IplImage rotatedImage = opencv_core.cvCreateImage(opencv_core.cvGetSize(copy), copy.depth(), copy.nChannels());
CvMat mapMatrix = opencv_core.cvCreateMat( 2, 3, opencv_core.CV_32FC1 );
//Define Mid Point
CvPoint2D32f centerPoint = new CvPoint2D32f();
centerPoint.x(copy.width()/2);
centerPoint.y(copy.height()/2);
//Get Rotational Matrix
opencv_imgproc.cv2DRotationMatrix(centerPoint, angle, 1.0, mapMatrix);
//Rotate the Image
opencv_imgproc.cvWarpAffine(copy, rotatedImage, mapMatrix, opencv_imgproc.CV_INTER_CUBIC + opencv_imgproc.CV_WARP_FILL_OUTLIERS, opencv_core.cvScalarAll(170));
opencv_core.cvReleaseImage(copy);
opencv_core.cvReleaseMat(mapMatrix);
return rotatedImage;
}my final video crop and rotate but green frames and colored frames mixed with this.
How to fix this problem. I’m not aware of iplimage. In some blogs they mention its YUV format. first u need to convert Y and then convert UV.
How to solve this problem ?