
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (70)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Contribute to a better visual interface
13 avril 2011MediaSPIP is based on a system of themes and templates. Templates define the placement of information on the page, and can be adapted to a wide range of uses. Themes define the overall graphic appearance of the site.
Anyone can submit a new graphic theme or template and make it available to the MediaSPIP community. -
Ajouter notes et légendes aux images
7 février 2011, parPour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
Modification lors de l’ajout d’un média
Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...)
Sur d’autres sites (11606)
-
Frame sliced into 2 under some degree. How fix it ?
26 novembre 2020, par Алекс АникейI trying use h264 codec in videochat application. And in some reason frame sliced into 2 triangle (picture below). I try send my desktop image to another person and get this image on another client.


What settings i set wrong ?
My code :


Init :


VCSession *vc_new_x264(Logger *log, ToxAV *av, uint32_t friend_number, toxav_video_receive_frame_cb *cb, void *cb_data,
 VCSession *vc)
{

 if (x264_param_default_preset(&param, "slow", "zerolatency") < 0) {
 // goto fail;
 }

 param.i_csp = X264_CSP_I420;
 param.i_width = 1920;
 param.i_height = 1080;
 vc->h264_enc_width = param.i_width;
 vc->h264_enc_height = param.i_height;

 param.i_keyint_max = 30;

 param.b_vfr_input = 1; /* VFR input. If 1, use timebase and timestamps for ratecontrol purposes.
 * If 0, use fps only. */
 param.i_timebase_num = 1; // 1 ms = timebase units = (1/1000)s
 param.i_timebase_den = 1000; // 1 ms = timebase units = (1/1000)s
 param.b_repeat_headers = 1;
 param.b_annexb = 1;

 param.rc.f_rate_tolerance = VIDEO_F_RATE_TOLERANCE_H264;
 param.rc.i_vbv_buffer_size = 1500;
 param.rc.i_vbv_max_bitrate = VIDEO_BITRATE_INITIAL_VALUE_H264 * 1;

 vc->h264_enc_bitrate = VIDEO_BITRATE_INITIAL_VALUE_H264;

 param.rc.i_qp_min = 13;
 param.rc.i_qp_max = 35; // max quantizer for x264

 vc->h264_enc_bitrate = VIDEO_BITRATE_INITIAL_VALUE_H264;

 param.rc.b_stat_read = 0;
 param.rc.b_stat_write = 0;


 if (x264_param_apply_profile(&param,
 "high") < 0) { // "baseline", "main", "high", "high10", "high422", "high444"
 // goto fail;
 }


 if (x264_picture_alloc(&(vc->h264_in_pic), param.i_csp, param.i_width, param.i_height) < 0) {
 // goto fail;
 }

 vc->h264_encoder = x264_encoder_open(&param);

 AVCodec *codec = NULL;
 vc->h264_decoder = NULL;
 avcodec_register_all();
 codec = NULL;

 codec = avcodec_find_decoder(AV_CODEC_ID_H264);

 if (!codec) {
 LOGGER_WARNING(log, "codec not found H264 on decoder");
 }

 vc->h264_decoder = avcodec_alloc_context3(codec);

 if (codec->capabilities & AV_CODEC_CAP_TRUNCATED) {
 vc->h264_decoder->flags |= AV_CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
 }


 vc->h264_decoder->delay = 5;

 if (avcodec_open2(vc->h264_decoder, codec, NULL) < 0) {
 LOGGER_WARNING(log, "could not open codec H264 on decoder");
 }


 return vc;
}



Get frame and decoding him :


void vc_iterate_x264(VCSession *vc)
{

 if (!vc) {
 return;
 }

 pthread_mutex_lock(vc->queue_mutex);

 struct RTPMessage *p;

 if (!rb_read(vc->vbuf_raw, (void **)&p)) {
 LOGGER_TRACE(vc->log, "no Video frame data available");
 pthread_mutex_unlock(vc->queue_mutex);
 return;
 }

 pthread_mutex_unlock(vc->queue_mutex);
 const struct RTPHeader *const header = &p->header;

 uint32_t full_data_len;

 if (header->flags & RTP_LARGE_FRAME) {
 full_data_len = header->data_length_full;
 LOGGER_WARNING(vc->log, "vc_iterate:001:full_data_len=%d", (int)full_data_len);
 } else {
 full_data_len = p->len;
 if (header->data_length_lower != full_data_len)
 {
 LOGGER_ERROR("Data header and packet don't equal: %d - header %d - packet", header->data_length_lower, full_data_len);
 }
 LOGGER_DEBUG(vc->log, "vc_iterate:002");
 }

 decode_frame_h264(vc, p, full_data_len);
}

void decode_frame_h264(VCSession *vc,
 struct RTPMessage *p,
 uint32_t full_data_len)
{

 AVPacket *compr_data;
 compr_data = av_packet_alloc();


 uint8_t *tmp_buf = calloc(1, full_data_len + FF_INPUT_BUFFER_PADDING_SIZE);
 memcpy(tmp_buf, p->data, full_data_len);

 compr_data->data = tmp_buf; // p->data;
 compr_data->size = (int)full_data_len; // hmm, "int" again

 avcodec_send_packet(vc->h264_decoder, compr_data);

 int ret_ = 0;
 while (ret_ >= 0) {
 AVFrame *frame = av_frame_alloc();
 ret_ = avcodec_receive_frame(vc->h264_decoder, frame);
 if (ret_ == AVERROR(EAGAIN) || ret_ == AVERROR_EOF) {
 // error
 break;
 } else if (ret_ < 0) {
 // Error during decoding
 break;
 } else if (ret_ == 0) {
 vc->vcb(vc->av, vc->friend_number, frame->width, frame->height,
 (const uint8_t *)frame->data[0],
 (const uint8_t *)frame->data[1],
 (const uint8_t *)frame->data[2],
 frame->linesize[0], frame->linesize[1],
 frame->linesize[2], vc->vcb_user_data);
 } else {
 // some other error
 }
 av_frame_free(&frame);
 }
 av_packet_free(&compr_data);
 free(tmp_buf);
 free(p);
}



Send frame and encoding :


bool toxav_video_send_frame(ToxAV *av, uint32_t friend_number, uint16_t width, uint16_t height, const uint8_t *y,
 const uint8_t *u, const uint8_t *v, Toxav_Err_Send_Frame *error, int16_t kf_max_dist, vpx_codec_er_flags_t error_resilient,
 unsigned int my_lag_in_frames, uint16_t kf_mode, uint16_t quality_mode)
{
 Toxav_Err_Send_Frame rc = TOXAV_ERR_SEND_FRAME_OK;
 ToxAVCall *call;
 uint64_t video_frame_record_timestamp = current_time_monotonic(av->m->mono_time);

 int vpx_encode_flags = 0;

 pthread_mutex_lock(call->mutex_video);
 pthread_mutex_unlock(av->mutex);

 if (y == nullptr || u == nullptr || v == nullptr) {
 pthread_mutex_unlock(call->mutex_video);
 rc = TOXAV_ERR_SEND_FRAME_NULL;
 goto RETURN;
 }


 if (call->video_rtp->ssrc < VIDEO_SEND_X_KEYFRAMES_FIRST) {
 // Key frame flag for first frames
 vpx_encode_flags = VPX_EFLAG_FORCE_KF;
 LOGGER_INFO(av->m->log, "I_FRAME_FLAG:%d only-i-frame mode", call->video_rtp->ssrc);

 ++call->video_rtp->ssrc;
 } else if (call->video_rtp->ssrc == VIDEO_SEND_X_KEYFRAMES_FIRST) {
 // normal keyframe placement
 vpx_encode_flags = 0;
 LOGGER_INFO(av->m->log, "I_FRAME_FLAG:%d normal mode", call->video_rtp->ssrc);

 ++call->video_rtp->ssrc;
 }


 x264_nal_t *nal = NULL;
 int i_frame_size = 0;

 uint32_t result = encode_frame_h264(av, friend_number, width, height,
 y, u, v,
 &video_frame_record_timestamp,
 vpx_encode_flags,
 &nal,
 &i_frame_size);
 if (result != 0) {
 pthread_mutex_unlock(call->mutex_video);
 rc = TOXAV_ERR_SEND_FRAME_INVALID;
 goto RETURN;
 }

 ++call->video->frame_counter;

 rc = send_frames_h264(av, friend_number, width, height,
 y, u, v, call,
 &video_frame_record_timestamp,
 vpx_encode_flags,
 &nal,
 &i_frame_size,
 &rc);

 pthread_mutex_unlock(call->mutex_video);

RETURN:

 if (error) {
 *error = rc;
 }

 return rc == TOXAV_ERR_SEND_FRAME_OK;
}

uint32_t send_frames_h264(ToxAV *av, uint32_t friend_number, uint16_t width, uint16_t height,
 const uint8_t *y,
 const uint8_t *u, const uint8_t *v, ToxAVCall *call,
 uint64_t *video_frame_record_timestamp,
 int vpx_encode_flags,
 x264_nal_t **nal,
 int *i_frame_size,
 TOXAV_ERR_SEND_FRAME *rc)
{

 if (*i_frame_size > 0) {

 // use the record timestamp that was actually used for this frame
 *video_frame_record_timestamp = (uint64_t)call->video->h264_in_pic.i_pts;
 const uint32_t frame_length_in_bytes = *i_frame_size;
 const int keyframe = (int)call->video->h264_out_pic.b_keyframe;

 LOGGER_DEBUG(av->m->log, "video packet record time: %lu", (*video_frame_record_timestamp));

 int res = rtp_send_data
 (
 call->video_rtp,
 (const uint8_t *)((*nal)->p_payload),
 frame_length_in_bytes,
 keyframe,
 *video_frame_record_timestamp,
 av->m->log
 );

 (*video_frame_record_timestamp)++;

 if (res < 0) {
 LOGGER_WARNING(av->m->log, "Could not send video frame: %s", strerror(errno));
 *rc = TOXAV_ERR_SEND_FRAME_RTP_FAILED;
 return 1;
 }

 return 0;
 } else {
 *rc = TOXAV_ERR_SEND_FRAME_RTP_FAILED;
 return 1;
 }

}



I get image like this :




-
I am using ffmpeg java library to convert captured screenshots to video. Video output is blurry
2 octobre 2020, par dark princeI am using ffmpeg java library to convert captured screenshots to video. Video which is generated as output is blurry.


I am using bit rate as 9000, frames per sec as 25 and video size as that of desktop screen size.


Any suggestions on how to solve this issue.


P.S. I cannot use ffmpeg.exe and command line due to certain restrictions and hence I am opting for ffmpeg java library.


Any suggestions on the issue or suggestions on any better approach will be helpful.


import java.awt.AWTException;
 import java.awt.Dimension;
 import java.awt.FlowLayout;
 import java.awt.Rectangle;
 import java.awt.Robot;
 import java.awt.Toolkit;
 import java.awt.event.ActionEvent;
 import java.awt.event.ActionListener;
 import java.awt.image.BufferedImage;
 import java.io.File;
 import java.io.IOException;
 import java.util.Date;
 
 import javax.imageio.ImageIO;
 import javax.swing.JButton;
 import javax.swing.JFrame;
 import javax.swing.JLabel;
 import javax.swing.JOptionPane;
 
 import org.bytedeco.javacpp.avcodec;
 import org.bytedeco.javacv.FFmpegFrameRecorder;
 import org.bytedeco.javacv.OpenCVFrameConverter;
 
 public class ScreenRecorder{
 
 public static boolean videoComplete=false;
 public static String inputImageDir="inputImgFolder"+File.separator;
 public static String inputImgExt="png";
 public static String outputVideo="recording.mp4"; 
 public static int counter=0;
 public static int imgProcessed=0;
 public static FFmpegFrameRecorder recorder=null;
 public static int videoWidth=1920;
 public static int videoHeight=1080;
 public static int videoFrameRate=3;
 public static int videoQuality=0; // 0 is the max quality
 public static int videoBitRate=9000;
 public static String videoFormat="mp4";
 public static int videoCodec=avcodec.AV_CODEC_ID_MPEG4;
 public static Thread t1=null;
 public static Thread t2=null;
 public static JFrame frame=null;
 public static boolean isRegionSelected=false;
 public static int c1=0;
 public static int c2=0;
 public static int c3=0;
 public static int c4=0;
 
 
 public static void main(String[] args) {
 
 try {
 if(getRecorder()==null)
 {
 System.out.println("Cannot make recorder object, Exiting program");
 System.exit(0);
 }
 if(getRobot()==null)
 {
 System.out.println("Cannot make robot object, Exiting program");
 System.exit(0);
 }
 File scanFolder=new File(inputImageDir);
 scanFolder.delete();
 scanFolder.mkdirs();
 
 createGUI();
 } catch (Exception e) {
 System.out.println("Exception in program "+e.getMessage());
 }
 }
 
 public static void createGUI()
 {
 frame=new JFrame("Screen Recorder");
 JButton b1=new JButton("Select Region for Recording");
 JButton b2=new JButton("Start Recording");
 JButton b3=new JButton("Stop Recording");
 JLabel l1=new JLabel("<br />If you dont select a region then full screen recording <br /> will be made when you click on Start Recording");
 l1.setFont (l1.getFont ().deriveFont (20.0f));
 b1.addActionListener(new ActionListener() {
 @Override
 public void actionPerformed(ActionEvent e) {
 try {
 JOptionPane.showMessageDialog(frame, "A new window will open. Use your mouse to select the region you like to record");
 new CropRegion().getImage();
 } catch (Exception e1) {
 // TODO Auto-generated catch block
 System.out.println("Issue while trying to call the module to crop region");
 e1.printStackTrace();
 } 
 }
 });
 b2.addActionListener(new ActionListener() {
 @Override
 public void actionPerformed(ActionEvent e) {
 counter=0;
 startRecording();
 }
 });
 b3.addActionListener(new ActionListener() {
 @Override
 public void actionPerformed(ActionEvent e) {
 stopRecording();
 System.out.print("Exiting...");
 System.exit(0);
 }
 });
 
 frame.add(b1);
 frame.add(b2);
 frame.add(b3);
 frame.add(l1);
 frame.setLayout(new FlowLayout(0));
 frame.setVisible(true);
 frame.setSize(1000, 170);
 frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
 }
 
 public static void startRecording()
 {
 t1=new Thread()
 {
 public void run() {
 try {
 takeScreenshot(getRobot());
 } catch (Exception e) {
 JOptionPane.showMessageDialog(frame, "Cannot make robot object, Exiting program "+e.getMessage());
 System.out.println("Cannot make robot object, Exiting program "+e.getMessage());
 System.exit(0);
 }
 }
 };
 
 t2=new Thread()
 {
 public void run() {
 prepareVideo();
 }
 };
 
 t1.start();
 t2.start();
 System.out.println("Started recording at "+new Date());
 }
 
 public static Robot getRobot() throws Exception
 {
 Robot r=null;
 try {
 r = new Robot();
 return r;
 } catch (AWTException e) {
 JOptionPane.showMessageDialog(frame, "Issue while initiating Robot object "+e.getMessage());
 System.out.println("Issue while initiating Robot object "+e.getMessage());
 throw new Exception("Issue while initiating Robot object");
 }
 }
 
 public static void takeScreenshot(Robot r)
 {
 Dimension size = Toolkit.getDefaultToolkit().getScreenSize();
 Rectangle rec=new Rectangle(size);
 if(isRegionSelected)
 {
 rec=new Rectangle(c1, c2, c3-c1, c4-c2);
 }
 while(!videoComplete)
 {
 counter++;
 BufferedImage img = r.createScreenCapture(rec);
 try {
 ImageIO.write(img, inputImgExt, new File(inputImageDir+counter+"."+inputImgExt));
 } catch (IOException e) {
 JOptionPane.showMessageDialog(frame, "Got an issue while writing the screenshot to disk "+e.getMessage());
 System.out.println("Got an issue while writing the screenshot to disk "+e.getMessage());
 counter--;
 }
 }
 }
 
 public static void prepareVideo()
 {
 File scanFolder=new File(inputImageDir);
 while(!videoComplete)
 {
 File[] inputFiles=scanFolder.listFiles();
 try {
 getRobot().delay(500);
 } catch (Exception e) {
 }
 //for(int i=0;i/imgProcessed++;
 addImageToVideo(inputFiles[i].getAbsolutePath());
 //String imgToAdd=scanFolder.getAbsolutePath()+File.separator+imgProcessed+"."+inputImgExt;
 //addImageToVideo(imgToAdd);
 //new File(imgToAdd).delete();
 inputFiles[i].delete();
 }
 }
 
 File[] inputFiles=scanFolder.listFiles();
 for(int i=0;i/ maximum quality
 recorder.start();
 }
 catch(Exception e)
 {
 JOptionPane.showMessageDialog(frame, "Exception while starting the recorder object "+e.getMessage());
 System.out.println("Exception while starting the recorder object "+e.getMessage());
 throw new Exception("Unable to start recorder");
 }
 return recorder;
 }
 
 public static OpenCVFrameConverter.ToIplImage getFrameConverter()
 {
 OpenCVFrameConverter.ToIplImage grabberConverter = new OpenCVFrameConverter.ToIplImage();
 return grabberConverter;
 }
 
 public static void addImageToVideo(String imgPath)
 {
 try {
 getRecorder().record(getFrameConverter().convert(cvLoadImage(imgPath)));
 } catch (Exception e) {
 JOptionPane.showMessageDialog(frame, "Exception while adding image to video "+e.getMessage());
 System.out.println("Exception while adding image to video "+e.getMessage());
 }
 }
 
 public static void stopRecording()
 {
 try {
 videoComplete=true;
 System.out.println("Stopping recording at "+new Date());
 t1.join();
 System.out.println("Screenshot thread complete");
 t2.join();
 System.out.println("Video maker thread complete");
 getRecorder().stop();
 System.out.println("Recording has been saved successfully at "+new File(outputVideo).getAbsolutePath());
 JOptionPane.showMessageDialog(frame, "Recording has been saved successfully at "+new File(outputVideo).getAbsolutePath());
 } catch (Exception e) {
 System.out.println("Exception while stopping the recorder "+e.getMessage());
 }
 }
 }



Imagepanel.java


import java.awt.Dimension;
import java.awt.Graphics;
import java.awt.Image;
import javax.swing.ImageIcon;
import javax.swing.JPanel;

class ImagePanel
 extends JPanel
{
 private Image img;
 
 public ImagePanel(String img)
 {
 this(new ImageIcon(img).getImage());
 }
 
 public ImagePanel(Image img)
 {
 this.img = img;
 Dimension size = new Dimension(img.getWidth(null), img.getHeight(null));
 
 setPreferredSize(size);
 setMinimumSize(size);
 setMaximumSize(size);
 setSize(size);
 setLayout(null);
 }
 
 public void paintComponent(Graphics g)
 {
 g.drawImage(this.img, 0, 0, null);
 }
}



CropRegion.java


import java.awt.AWTException;
import java.awt.Dimension;
import java.awt.FlowLayout;
import java.awt.Graphics;
import java.awt.Rectangle;
import java.awt.Robot;
import java.awt.Toolkit;
import java.awt.event.MouseEvent;
import java.awt.event.MouseListener;
import java.awt.event.MouseMotionListener;
import java.awt.image.BufferedImage;
import java.io.IOException;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JOptionPane;


public class CropRegion implements MouseListener,
 MouseMotionListener {

 int drag_status = 0;
 int c1;
 int c2;
 int c3;
 int c4;
 JFrame frame=null;
 static int counter=0;
 JLabel background=null;

 
 public void getImage() throws AWTException, IOException, InterruptedException {
 Dimension size = Toolkit.getDefaultToolkit().getScreenSize();
 Robot robot = new Robot();
 BufferedImage img = robot.createScreenCapture(new Rectangle(size));
 ImagePanel panel = new ImagePanel(img);
 frame=new JFrame();
 frame.add(panel);
 frame.setLocation(0, 0);
 frame.setSize(size);
 frame.setLayout(new FlowLayout());
 frame.setUndecorated(true);
 frame.setVisible(true);
 frame.addMouseListener(this);
 frame.addMouseMotionListener(this);
 frame.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
 }

 public void draggedScreen() throws Exception {
 ScreenRecorder.c1=c1;
 ScreenRecorder.c2=c2;
 ScreenRecorder.c3=c3;
 ScreenRecorder.c4=c4;
 ScreenRecorder.isRegionSelected=true;
 JOptionPane.showMessageDialog(frame, "Region Selected.Please click on Start Recording button to record the selected region.");
 frame.dispose();
 }

 public void mouseClicked(MouseEvent arg0) {
 }

 public void mouseEntered(MouseEvent arg0) {
 }

 public void mouseExited(MouseEvent arg0) {
 }

 public void mousePressed(MouseEvent arg0) {
 paint();
 this.c1 = arg0.getX();
 this.c2 = arg0.getY();
 }

 public void mouseReleased(MouseEvent arg0) {
 paint();
 if (this.drag_status == 1) {
 this.c3 = arg0.getX();
 this.c4 = arg0.getY();
 try {
 draggedScreen();
 } catch (Exception e) {
 e.printStackTrace();
 }
 }
 }

 public void mouseDragged(MouseEvent arg0) {
 paint();
 this.drag_status = 1;
 this.c3 = arg0.getX();
 this.c4 = arg0.getY();
 }

 public void mouseMoved(MouseEvent arg0) {
 }

 public void paint() {
 Graphics g = frame.getGraphics();
 frame.repaint();
 int w = this.c1 - this.c3;
 int h = this.c2 - this.c4;
 w *= -1;
 h *= -1;
 if (w < 0) {
 w *= -1;
 }
 g.drawRect(this.c1, this.c2, w, h);
 }
}



-
What is the commmand line to add watermark while screen recording using ffmpeg ? [duplicate]
11 septembre 2020, par Yong JuI tried to record screen using ffmpeg commmand line. So, I have complete it using this commmand.
ffmpeg.exe -rtbufsize 1500M -f -y -rtbufsize 100M -f gdigrab -t 00:02:00 -framerate 30 0 0 -probesize 10M 1920 1080 -draw_mouse 1 -i desktop -c:v libx264 -r 30 -preset ultrafast -tune zerolatency -crf 30 -pix_fmt yuv420p output.avi".


Now, I want to add watermark while recording video.
If you have good experience this field, Please give me good advice.
Thanks for your attention.
Sincerely.