
Recherche avancée
Médias (29)
-
#7 Ambience
16 octobre 2011, par
Mis à jour : Juin 2015
Langue : English
Type : Audio
-
#6 Teaser Music
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#5 End Title
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#3 The Safest Place
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#4 Emo Creates
15 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#2 Typewriter Dance
15 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
Autres articles (14)
-
List of compatible distributions
26 avril 2011, parThe table below is the list of Linux distributions compatible with the automated installation script of MediaSPIP. Distribution nameVersion nameVersion number Debian Squeeze 6.x.x Debian Weezy 7.x.x Debian Jessie 8.x.x Ubuntu The Precise Pangolin 12.04 LTS Ubuntu The Trusty Tahr 14.04
If you want to help us improve this list, you can provide us access to a machine whose distribution is not mentioned above or send the necessary fixes to add (...) -
Encodage et transformation en formats lisibles sur Internet
10 avril 2011MediaSPIP transforme et ré-encode les documents mis en ligne afin de les rendre lisibles sur Internet et automatiquement utilisables sans intervention du créateur de contenu.
Les vidéos sont automatiquement encodées dans les formats supportés par HTML5 : MP4, Ogv et WebM. La version "MP4" est également utilisée pour le lecteur flash de secours nécessaire aux anciens navigateurs.
Les documents audios sont également ré-encodés dans les deux formats utilisables par HTML5 :MP3 et Ogg. La version "MP3" (...) -
Problèmes fréquents
10 mars 2010, parPHP et safe_mode activé
Une des principales sources de problèmes relève de la configuration de PHP et notamment de l’activation du safe_mode
La solution consiterait à soit désactiver le safe_mode soit placer le script dans un répertoire accessible par apache pour le site
Sur d’autres sites (5427)
-
5 Top Google Optimize Alternatives to Consider
17 mars 2023, par Erin — Analytics Tips -
Audio & Video not synchronized properly if i merged more videos in mp4parser
1er octobre 2013, par maniyaI have used mp4parser for merging video with dynamic pause and record video capture for max 6 second recording. In preview its working fine when recorded video with minimum pause/record, If i tried with more than 3 pause/record mean the last video file not get merged properly with audio.At the start of the video the sync is ok but at the end the video hanged and audio playing in screen for the remaining file duration about 1sec.
My Recording manager
public class RecordingManager implements Camera.ErrorCallback, MediaRecorder.OnErrorListener, MediaRecorder.OnInfoListener {
private static final String TAG = RecordingManager.class.getSimpleName();
private static final int FOCUS_AREA_RADIUS = 32;
private static final int FOCUS_MAX_VALUE = 1000;
private static final int FOCUS_MIN_VALUE = -1000;
private static final long MINIMUM_RECORDING_TIME = 2000;
private static final int MAXIMUM_RECORDING_TIME = 70 * 1000;
private static final long LOW_STORAGE_THRESHOLD = 5 * 1024 * 1024;
private static final long RECORDING_FILE_LIMIT = 100 * 1024 * 1024;
private boolean paused = true;
private MediaRecorder mediaRecorder = null;
private boolean recording = false;
private FrameLayout previewFrame = null;
private boolean mPreviewing = false;
// private TextureView mTextureView = null;
// private SurfaceTexture mSurfaceTexture = null;
// private boolean mSurfaceTextureReady = false;
//
private SurfaceView surfaceView = null;
private SurfaceHolder surfaceHolder = null;
private boolean surfaceViewReady = false;
private Camera camera = null;
private Camera.Parameters cameraParameters = null;
private CamcorderProfile camcorderProfile = null;
private int mOrientation = -1;
private OrientationEventListener mOrientationEventListener = null;
private long mStartRecordingTime;
private int mVideoWidth;
private int mVideoHeight;
private long mStorageSpace;
private Handler mHandler = new Handler();
// private Runnable mUpdateRecordingTimeTask = new Runnable() {
// @Override
// public void run() {
// long recordingTime = System.currentTimeMillis() - mStartRecordingTime;
// Log.d(TAG, String.format("Recording time:%d", recordingTime));
// mHandler.postDelayed(this, CLIP_GRAPH_UPDATE_INTERVAL);
// }
// };
private Runnable mStopRecordingTask = new Runnable() {
@Override
public void run() {
stopRecording();
}
};
private static RecordingManager mInstance = null;
private Activity currentActivity = null;
private String destinationFilepath = "";
private String snapshotFilepath = "";
public static RecordingManager getInstance(Activity activity, FrameLayout previewFrame) {
if (mInstance == null || mInstance.currentActivity != activity) {
mInstance = new RecordingManager(activity, previewFrame);
}
return mInstance;
}
private RecordingManager(Activity activity, FrameLayout previewFrame) {
currentActivity = activity;
this.previewFrame = previewFrame;
}
public int getVideoWidth() {
return this.mVideoWidth;
}
public int getVideoHeight() {
return this.mVideoHeight;
}
public void setDestinationFilepath(String filepath) {
this.destinationFilepath = filepath;
}
public String getDestinationFilepath() {
return this.destinationFilepath;
}
public void setSnapshotFilepath(String filepath) {
this.snapshotFilepath = filepath;
}
public String getSnapshotFilepath() {
return this.snapshotFilepath;
}
public void init(String videoPath, String snapshotPath) {
Log.v(TAG, "init.");
setDestinationFilepath(videoPath);
setSnapshotFilepath(snapshotPath);
if (!Utils.isExternalStorageAvailable()) {
showStorageErrorAndFinish();
return;
}
openCamera();
if (camera == null) {
showCameraErrorAndFinish();
return;
}
public void onResume() {
Log.v(TAG, "onResume.");
paused = false;
// Open the camera
if (camera == null) {
openCamera();
if (camera == null) {
showCameraErrorAndFinish();
return;
}
}
// Initialize the surface texture or surface view
// if (useTexture() && mTextureView == null) {
// initTextureView();
// mTextureView.setVisibility(View.VISIBLE);
// } else if (!useTexture() && mSurfaceView == null) {
initSurfaceView();
surfaceView.setVisibility(View.VISIBLE);
// }
// Start the preview
if (!mPreviewing) {
startPreview();
}
}
private void openCamera() {
Log.v(TAG, "openCamera");
try {
camera = Camera.open();
camera.setErrorCallback(this);
camera.setDisplayOrientation(90); // Since we only support portrait mode
cameraParameters = camera.getParameters();
} catch (RuntimeException e) {
e.printStackTrace();
camera = null;
}
}
private void closeCamera() {
Log.v(TAG, "closeCamera");
if (camera == null) {
Log.d(TAG, "Already stopped.");
return;
}
camera.setErrorCallback(null);
if (mPreviewing) {
stopPreview();
}
camera.release();
camera = null;
}
private void initSurfaceView() {
surfaceView = new SurfaceView(currentActivity);
surfaceView.getHolder().addCallback(new SurfaceViewCallback());
surfaceView.setVisibility(View.GONE);
FrameLayout.LayoutParams params = new LayoutParams(
LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT, Gravity.CENTER);
surfaceView.setLayoutParams(params);
Log.d(TAG, "add surface view to preview frame");
previewFrame.addView(surfaceView);
}
private void releaseSurfaceView() {
if (surfaceView != null) {
previewFrame.removeAllViews();
surfaceView = null;
surfaceHolder = null;
surfaceViewReady = false;
}
}
private void startPreview() {
// if ((useTexture() && !mSurfaceTextureReady) || (!useTexture() && !mSurfaceViewReady)) {
// return;
// }
Log.v(TAG, "startPreview.");
if (mPreviewing) {
stopPreview();
}
setCameraParameters();
resizePreview();
try {
// if (useTexture()) {
// mCamera.setPreviewTexture(mSurfaceTexture);
// } else {
camera.setPreviewDisplay(surfaceHolder);
// }
camera.startPreview();
mPreviewing = true;
} catch (Exception e) {
closeCamera();
e.printStackTrace();
Log.e(TAG, "startPreview failed.");
}
}
private void stopPreview() {
Log.v(TAG, "stopPreview");
if (camera != null) {
camera.stopPreview();
mPreviewing = false;
}
}
public void onPause() {
paused = true;
if (recording) {
stopRecording();
}
closeCamera();
// if (useTexture()) {
// releaseSurfaceTexture();
// } else {
releaseSurfaceView();
// }
}
private void setCameraParameters() {
if (CamcorderProfile.hasProfile(CamcorderProfile.QUALITY_720P)) {
camcorderProfile = CamcorderProfile.get(CamcorderProfile.QUALITY_720P);
} else if (CamcorderProfile.hasProfile(CamcorderProfile.QUALITY_480P)) {
camcorderProfile = CamcorderProfile.get(CamcorderProfile.QUALITY_480P);
} else {
camcorderProfile = CamcorderProfile.get(CamcorderProfile.QUALITY_HIGH);
}
mVideoWidth = camcorderProfile.videoFrameWidth;
mVideoHeight = camcorderProfile.videoFrameHeight;
camcorderProfile.fileFormat = MediaRecorder.OutputFormat.MPEG_4;
camcorderProfile.videoFrameRate = 30;
Log.v(TAG, "mVideoWidth=" + mVideoWidth + " mVideoHeight=" + mVideoHeight);
cameraParameters.setPreviewSize(mVideoWidth, mVideoHeight);
if (cameraParameters.getSupportedWhiteBalance().contains(Camera.Parameters.WHITE_BALANCE_AUTO)) {
cameraParameters.setWhiteBalance(Camera.Parameters.WHITE_BALANCE_AUTO);
}
if (cameraParameters.getSupportedFocusModes().contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) {
cameraParameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
}
cameraParameters.setRecordingHint(true);
cameraParameters.set("cam_mode", 1);
camera.setParameters(cameraParameters);
cameraParameters = camera.getParameters();
camera.setDisplayOrientation(90);
android.hardware.Camera.CameraInfo info = new android.hardware.Camera.CameraInfo();
Log.d(TAG, info.orientation + " degree");
}
private void resizePreview() {
Log.d(TAG, String.format("Video size:%d|%d", mVideoWidth, mVideoHeight));
Point optimizedSize = getOptimizedPreviewSize(mVideoWidth, mVideoHeight);
Log.d(TAG, String.format("Optimized size:%d|%d", optimizedSize.x, optimizedSize.y));
ViewGroup.LayoutParams params = (ViewGroup.LayoutParams) previewFrame.getLayoutParams();
params.width = optimizedSize.x;
params.height = optimizedSize.y;
previewFrame.setLayoutParams(params);
}
public void setOrientation(int ori) {
this.mOrientation = ori;
}
public void setOrientationEventListener(OrientationEventListener listener) {
this.mOrientationEventListener = listener;
}
public Camera getCamera() {
return camera;
}
@SuppressWarnings("serial")
public void setFocusArea(float x, float y) {
if (camera != null) {
int viewWidth = surfaceView.getWidth();
int viewHeight = surfaceView.getHeight();
int focusCenterX = FOCUS_MAX_VALUE - (int) (x / viewWidth * (FOCUS_MAX_VALUE - FOCUS_MIN_VALUE));
int focusCenterY = FOCUS_MIN_VALUE + (int) (y / viewHeight * (FOCUS_MAX_VALUE - FOCUS_MIN_VALUE));
final int left = focusCenterY - FOCUS_AREA_RADIUS < FOCUS_MIN_VALUE ? FOCUS_MIN_VALUE : focusCenterY - FOCUS_AREA_RADIUS;
final int top = focusCenterX - FOCUS_AREA_RADIUS < FOCUS_MIN_VALUE ? FOCUS_MIN_VALUE : focusCenterX - FOCUS_AREA_RADIUS;
final int right = focusCenterY + FOCUS_AREA_RADIUS > FOCUS_MAX_VALUE ? FOCUS_MAX_VALUE : focusCenterY + FOCUS_AREA_RADIUS;
final int bottom = focusCenterX + FOCUS_AREA_RADIUS > FOCUS_MAX_VALUE ? FOCUS_MAX_VALUE : focusCenterX + FOCUS_AREA_RADIUS;
Camera.Parameters params = camera.getParameters();
params.setFocusAreas(new ArrayList() {
{
add(new Camera.Area(new Rect(left, top, right, bottom), 1000));
}
});
camera.setParameters(params);
camera.autoFocus(new AutoFocusCallback() {
@Override
public void onAutoFocus(boolean success, Camera camera) {
Log.d(TAG, "onAutoFocus");
}
});
}
}
public void startRecording(String destinationFilepath) {
if (!recording) {
updateStorageSpace();
setDestinationFilepath(destinationFilepath);
if (mStorageSpace <= LOW_STORAGE_THRESHOLD) {
Log.v(TAG, "Storage issue, ignore the start request");
Toast.makeText(currentActivity, "Storage issue, ignore the recording request", Toast.LENGTH_LONG).show();
return;
}
if (!prepareMediaRecorder()) {
Toast.makeText(currentActivity, "prepareMediaRecorder failed.", Toast.LENGTH_LONG).show();
return;
}
Log.d(TAG, "Successfully prepare media recorder.");
try {
mediaRecorder.start();
} catch (RuntimeException e) {
Log.e(TAG, "MediaRecorder start failed.");
releaseMediaRecorder();
return;
}
mStartRecordingTime = System.currentTimeMillis();
if (mOrientationEventListener != null) {
mOrientationEventListener.disable();
}
recording = true;
}
}
public void stopRecording() {
if (recording) {
if (!paused) {
// Capture at least 1 second video
long currentTime = System.currentTimeMillis();
if (currentTime - mStartRecordingTime < MINIMUM_RECORDING_TIME) {
mHandler.postDelayed(mStopRecordingTask, MINIMUM_RECORDING_TIME - (currentTime - mStartRecordingTime));
return;
}
}
if (mOrientationEventListener != null) {
mOrientationEventListener.enable();
}
// mHandler.removeCallbacks(mUpdateRecordingTimeTask);
try {
mediaRecorder.setOnErrorListener(null);
mediaRecorder.setOnInfoListener(null);
mediaRecorder.stop(); // stop the recording
Toast.makeText(currentActivity, "Video file saved.", Toast.LENGTH_LONG).show();
long stopRecordingTime = System.currentTimeMillis();
Log.d(TAG, String.format("stopRecording. file:%s duration:%d", destinationFilepath, stopRecordingTime - mStartRecordingTime));
// Calculate the duration of video
MediaMetadataRetriever mmr = new MediaMetadataRetriever();
mmr.setDataSource(this.destinationFilepath);
String _length = mmr.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION);
if (_length != null) {
Log.d(TAG, String.format("clip duration:%d", Long.parseLong(_length)));
}
// Taking the snapshot of video
Bitmap snapshot = ThumbnailUtils.createVideoThumbnail(this.destinationFilepath, Thumbnails.MICRO_KIND);
try {
FileOutputStream out = new FileOutputStream(this.snapshotFilepath);
snapshot.compress(Bitmap.CompressFormat.JPEG, 70, out);
out.close();
} catch (Exception e) {
e.printStackTrace();
}
// mActivity.showPlayButton();
} catch (RuntimeException e) {
e.printStackTrace();
Log.e(TAG, e.getMessage());
// if no valid audio/video data has been received when stop() is
// called
} finally {
//
releaseMediaRecorder(); // release the MediaRecorder object
if (!paused) {
cameraParameters = camera.getParameters();
}
recording = false;
}
}
}
public void setRecorderOrientation(int orientation) {
// For back camera only
if (orientation != -1) {
Log.d(TAG, "set orientationHint:" + (orientation + 135) % 360 / 90 * 90);
mediaRecorder.setOrientationHint((orientation + 135) % 360 / 90 * 90);
}else {
Log.d(TAG, "not set orientationHint to mediaRecorder");
}
}
private boolean prepareMediaRecorder() {
mediaRecorder = new MediaRecorder();
camera.unlock();
mediaRecorder.setCamera(camera);
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER);
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);
mediaRecorder.setProfile(camcorderProfile);
mediaRecorder.setMaxDuration(MAXIMUM_RECORDING_TIME);
mediaRecorder.setOutputFile(this.destinationFilepath);
try {
mediaRecorder.setMaxFileSize(Math.min(RECORDING_FILE_LIMIT, mStorageSpace - LOW_STORAGE_THRESHOLD));
} catch (RuntimeException exception) {
}
setRecorderOrientation(mOrientation);
if (!useTexture()) {
mediaRecorder.setPreviewDisplay(surfaceHolder.getSurface());
}
try {
mediaRecorder.prepare();
} catch (IllegalStateException e) {
releaseMediaRecorder();
return false;
} catch (IOException e) {
releaseMediaRecorder();
return false;
}
mediaRecorder.setOnErrorListener(this);
mediaRecorder.setOnInfoListener(this);
return true;
}
private void releaseMediaRecorder() {
if (mediaRecorder != null) {
mediaRecorder.reset(); // clear recorder configuration
mediaRecorder.release(); // release the recorder object
mediaRecorder = null;
camera.lock(); // lock camera for later use
}
}
private Point getOptimizedPreviewSize(int videoWidth, int videoHeight) {
Display display = currentActivity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
Point optimizedSize = new Point();
optimizedSize.x = size.x;
optimizedSize.y = (int) ((float) videoWidth / (float) videoHeight * size.x);
return optimizedSize;
}
private void showCameraErrorAndFinish() {
DialogInterface.OnClickListener buttonListener = new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
currentActivity.finish();
}
};
new AlertDialog.Builder(currentActivity).setCancelable(false)
.setTitle("Camera error")
.setMessage("Cannot connect to the camera.")
.setNeutralButton("OK", buttonListener)
.show();
}
private void showStorageErrorAndFinish() {
DialogInterface.OnClickListener buttonListener = new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
currentActivity.finish();
}
};
new AlertDialog.Builder(currentActivity).setCancelable(false)
.setTitle("Storage error")
.setMessage("Cannot read external storage.")
.setNeutralButton("OK", buttonListener)
.show();
}
private void updateStorageSpace() {
mStorageSpace = getAvailableSpace();
Log.v(TAG, "updateStorageSpace mStorageSpace=" + mStorageSpace);
}
private long getAvailableSpace() {
String state = Environment.getExternalStorageState();
Log.d(TAG, "External storage state=" + state);
if (Environment.MEDIA_CHECKING.equals(state)) {
return -1;
}
if (!Environment.MEDIA_MOUNTED.equals(state)) {
return -1;
}
File directory = currentActivity.getExternalFilesDir("vine");
directory.mkdirs();
if (!directory.isDirectory() || !directory.canWrite()) {
return -1;
}
try {
StatFs stat = new StatFs(directory.getAbsolutePath());
return stat.getAvailableBlocks() * (long) stat.getBlockSize();
} catch (Exception e) {
Log.i(TAG, "Fail to access external storage", e);
}
return -1;
}
private boolean useTexture() {
return false;
// return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
}
private class SurfaceViewCallback implements SurfaceHolder.Callback {
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
Log.v(TAG, "surfaceChanged. width=" + width + ". height=" + height);
}
@Override
public void surfaceCreated(SurfaceHolder holder) {
Log.v(TAG, "surfaceCreated");
surfaceViewReady = true;
surfaceHolder = holder;
startPreview();
}
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
Log.d(TAG, "surfaceDestroyed");
surfaceViewReady = false;
}
}
@Override
public void onError(int error, Camera camera) {
Log.e(TAG, "Camera onError. what=" + error + ".");
if (error == Camera.CAMERA_ERROR_SERVER_DIED) {
} else if (error == Camera.CAMERA_ERROR_UNKNOWN) {
}
}
@Override
public void onInfo(MediaRecorder mr, int what, int extra) {
if (what == MediaRecorder.MEDIA_RECORDER_INFO_MAX_DURATION_REACHED) {
stopRecording();
} else if (what == MediaRecorder.MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED) {
stopRecording();
Toast.makeText(currentActivity, "Size limit reached", Toast.LENGTH_LONG).show();
}
}
@Override
public void onError(MediaRecorder mr, int what, int extra) {
Log.e(TAG, "MediaRecorder onError. what=" + what + ". extra=" + extra);
if (what == MediaRecorder.MEDIA_RECORDER_ERROR_UNKNOWN) {
stopRecording();
}
}
}VideoUtils
public class VideoUtils {
private static final String TAG = VideoUtils.class.getSimpleName();
static double[] matrix = new double[] { 0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0,
0.0, 1.0 };
public static boolean MergeFiles(String speratedDirPath,
String targetFileName) {
File videoSourceDirFile = new File(speratedDirPath);
String[] videoList = videoSourceDirFile.list();
List<track> videoTracks = new LinkedList<track>();
List<track> audioTracks = new LinkedList<track>();
for (String file : videoList) {
Log.d(TAG, "source files" + speratedDirPath
+ File.separator + file);
try {
FileChannel fc = new FileInputStream(speratedDirPath
+ File.separator + file).getChannel();
Movie movie = MovieCreator.build(fc);
for (Track t : movie.getTracks()) {
if (t.getHandler().equals("soun")) {
audioTracks.add(t);
}
if (t.getHandler().equals("vide")) {
videoTracks.add(t);
}
}
} catch (FileNotFoundException e) {
e.printStackTrace();
return false;
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
Movie result = new Movie();
try {
if (audioTracks.size() > 0) {
result.addTrack(new AppendTrack(audioTracks
.toArray(new Track[audioTracks.size()])));
}
if (videoTracks.size() > 0) {
result.addTrack(new AppendTrack(videoTracks
.toArray(new Track[videoTracks.size()])));
}
IsoFile out = new DefaultMp4Builder().build(result);
FileChannel fc = new RandomAccessFile(
String.format(targetFileName), "rw").getChannel();
Log.d(TAG, "target file:" + targetFileName);
TrackBox tb = out.getMovieBox().getBoxes(TrackBox.class).get(1);
TrackHeaderBox tkhd = tb.getTrackHeaderBox();
double[] b = tb.getTrackHeaderBox().getMatrix();
tkhd.setMatrix(matrix);
fc.position(0);
out.getBox(fc);
fc.close();
for (String file : videoList) {
File TBRFile = new File(speratedDirPath + File.separator + file);
TBRFile.delete();
}
boolean a = videoSourceDirFile.delete();
Log.d(TAG, "try to delete dir:" + a);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
return false;
}
return true;
}
public static boolean clearFiles(String speratedDirPath) {
File videoSourceDirFile = new File(speratedDirPath);
if (videoSourceDirFile != null
&& videoSourceDirFile.listFiles() != null) {
File[] videoList = videoSourceDirFile.listFiles();
for (File video : videoList) {
video.delete();
}
videoSourceDirFile.delete();
}
return true;
}
public static int createSnapshot(String videoFile, int kind, String snapshotFilepath) {
return 0;
};
public static int createSnapshot(String videoFile, int width, int height, String snapshotFilepath) {
return 0;
}
}
</track></track></track></track>my reference code project link is
-
audio do not stop recording after pause ffmpeg c++
15 septembre 2021, par C1ngh10I am developing an application that record the screen and the audio from microphone. I implemented the pause function stopping video and audio thread on a condition variable, resuming them with a notify on the same condition variable. This is done in
captureAudio()
, in the mainwhile
. In this way works on macOS and linux, where I use avfoudation and alsa respectively, but on windows, with dshow, keep recording audio during the pause, when the thread is waiting on the condition variable. Does anybody know how can I fix this behaviour ?

#include "ScreenRecorder.h"

using namespace std;

ScreenRecorder::ScreenRecorder() : pauseCapture(false), stopCapture(false), started(false), activeMenu(true) {
 avcodec_register_all();
 avdevice_register_all();

 width = 1920;
 height = 1200;
}

ScreenRecorder::~ScreenRecorder() {

 if (started) {
 value = av_write_trailer(outAVFormatContext);
 if (value < 0) {
 cerr << "Error in writing av trailer" << endl;
 exit(-1);
 }

 avformat_close_input(&inAudioFormatContext);
 if(inAudioFormatContext == nullptr){
 cout << "inAudioFormatContext close successfully" << endl;
 }
 else{
 cerr << "Error: unable to close the inAudioFormatContext" << endl;
 exit(-1);
 //throw "Error: unable to close the file";
 }
 avformat_free_context(inAudioFormatContext);
 if(inAudioFormatContext == nullptr){
 cout << "AudioFormat freed successfully" << endl;
 }
 else{
 cerr << "Error: unable to free AudioFormatContext" << endl;
 exit(-1);
 }
 
 avformat_close_input(&pAVFormatContext);
 if (pAVFormatContext == nullptr) {
 cout << "File close successfully" << endl;
 }
 else {
 cerr << "Error: unable to close the file" << endl;
 exit(-1);
 //throw "Error: unable to close the file";
 }

 avformat_free_context(pAVFormatContext);
 if (pAVFormatContext == nullptr) {
 cout << "VideoFormat freed successfully" << endl;
 }
 else {
 cerr << "Error: unable to free VideoFormatContext" << endl;
 exit(-1);
 }
 }
}

/*==================================== VIDEO ==============================*/

int ScreenRecorder::openVideoDevice() throw() {
 value = 0;
 options = nullptr;
 pAVFormatContext = nullptr;

 pAVFormatContext = avformat_alloc_context();

 string dimension = to_string(width) + "x" + to_string(height);
 av_dict_set(&options, "video_size", dimension.c_str(), 0); //option to set the dimension of the screen section to record

#ifdef _WIN32
 pAVInputFormat = av_find_input_format("gdigrab");
 if (avformat_open_input(&pAVFormatContext, "desktop", pAVInputFormat, &options) != 0) {
 cerr << "Couldn't open input stream" << endl;
 exit(-1);
 }

#elif defined linux
 
 int offset_x = 0, offset_y = 0;
 string url = ":0.0+" + to_string(offset_x) + "," + to_string(offset_y); //custom string to set the start point of the screen section
 pAVInputFormat = av_find_input_format("x11grab");
 value = avformat_open_input(&pAVFormatContext, url.c_str(), pAVInputFormat, &options);

 if (value != 0) {
 cerr << "Error in opening input device (video)" << endl;
 exit(-1);
 }
#else

 value = av_dict_set(&options, "pixel_format", "0rgb", 0);
 if (value < 0) {
 cerr << "Error in setting pixel format" << endl;
 exit(-1);
 }

 value = av_dict_set(&options, "video_device_index", "1", 0);

 if (value < 0) {
 cerr << "Error in setting video device index" << endl;
 exit(-1);
 }

 pAVInputFormat = av_find_input_format("avfoundation");

 if (avformat_open_input(&pAVFormatContext, "Capture screen 0:none", pAVInputFormat, &options) != 0) { //TODO trovare un modo per selezionare sempre lo schermo (forse "Capture screen 0")
 cerr << "Error in opening input device" << endl;
 exit(-1);
 }



#endif
 //set frame per second

 value = av_dict_set(&options, "framerate", "30", 0);
 if (value < 0) {
 cerr << "Error in setting dictionary value (setting framerate)" << endl;
 exit(-1);
 }

 value = av_dict_set(&options, "preset", "medium", 0);
 if (value < 0) {
 cerr << "Error in setting dictionary value (setting preset value)" << endl;
 exit(-1);
 }
 /*
 value = av_dict_set(&options, "vsync", "1", 0);
 if(value < 0){
 cerr << "Error in setting dictionary value (setting vsync value)" << endl;
 exit(-1);
 }
 */

 value = av_dict_set(&options, "probesize", "60M", 0);
 if (value < 0) {
 cerr << "Error in setting probesize value" << endl;
 exit(-1);
 }

 //get video stream infos from context
 value = avformat_find_stream_info(pAVFormatContext, nullptr);
 if (value < 0) {
 cerr << "Error in retrieving the stream info" << endl;
 exit(-1);
 }

 VideoStreamIndx = -1;
 for (int i = 0; i < pAVFormatContext->nb_streams; i++) {
 if (pAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
 VideoStreamIndx = i;
 break;
 }
 }
 if (VideoStreamIndx == -1) {
 cerr << "Error: unable to find video stream index" << endl;
 exit(-2);
 }

 pAVCodecContext = pAVFormatContext->streams[VideoStreamIndx]->codec;
 pAVCodec = avcodec_find_decoder(pAVCodecContext->codec_id/*params->codec_id*/);
 if (pAVCodec == nullptr) {
 cerr << "Error: unable to find decoder video" << endl;
 exit(-1);
 }

 cout << "Insert height and width [h w]: "; //custom screen dimension to record
 cin >> h >> w;*/


 return 0;
}

/*========================================== AUDIO ============================*/

int ScreenRecorder::openAudioDevice() {
 audioOptions = nullptr;
 inAudioFormatContext = nullptr;

 inAudioFormatContext = avformat_alloc_context();
 value = av_dict_set(&audioOptions, "sample_rate", "44100", 0);
 if (value < 0) {
 cerr << "Error: cannot set audio sample rate" << endl;
 exit(-1);
 }
 value = av_dict_set(&audioOptions, "async", "1", 0);
 if (value < 0) {
 cerr << "Error: cannot set audio sample rate" << endl;
 exit(-1);
 }

#if defined linux
 audioInputFormat = av_find_input_format("alsa");
 value = avformat_open_input(&inAudioFormatContext, "hw:0", audioInputFormat, &audioOptions);
 if (value != 0) {
 cerr << "Error in opening input device (audio)" << endl;
 exit(-1);
 }
#endif

#if defined _WIN32
 audioInputFormat = av_find_input_format("dshow");
 value = avformat_open_input(&inAudioFormatContext, "audio=Microfono (Realtek(R) Audio)", audioInputFormat, &audioOptions);
 if (value != 0) {
 cerr << "Error in opening input device (audio)" << endl;
 exit(-1);
 }
#endif

 value = avformat_find_stream_info(inAudioFormatContext, nullptr);
 if (value != 0) {
 cerr << "Error: cannot find the audio stream information" << endl;
 exit(-1);
 }

 audioStreamIndx = -1;
 for (int i = 0; i < inAudioFormatContext->nb_streams; i++) {
 if (inAudioFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
 audioStreamIndx = i;
 break;
 }
 }
 if (audioStreamIndx == -1) {
 cerr << "Error: unable to find audio stream index" << endl;
 exit(-2);
 }
}

int ScreenRecorder::initOutputFile() {
 value = 0;

 outAVFormatContext = nullptr;
 outputAVFormat = av_guess_format(nullptr, "output.mp4", nullptr);
 if (outputAVFormat == nullptr) {
 cerr << "Error in guessing the video format, try with correct format" << endl;
 exit(-5);
 }
 avformat_alloc_output_context2(&outAVFormatContext, outputAVFormat, outputAVFormat->name, "..\\media\\output.mp4");
 if (outAVFormatContext == nullptr) {
 cerr << "Error in allocating outAVFormatContext" << endl;
 exit(-4);
 }

 /*===========================================================================*/
 this->generateVideoStream();
 this->generateAudioStream();

 //create an empty video file
 if (!(outAVFormatContext->flags & AVFMT_NOFILE)) {
 if (avio_open2(&outAVFormatContext->pb, "..\\media\\output.mp4", AVIO_FLAG_WRITE, nullptr, nullptr) < 0) {
 cerr << "Error in creating the video file" << endl;
 exit(-10);
 }
 }

 if (outAVFormatContext->nb_streams == 0) {
 cerr << "Output file does not contain any stream" << endl;
 exit(-11);
 }
 value = avformat_write_header(outAVFormatContext, &options);
 if (value < 0) {
 cerr << "Error in writing the header context" << endl;
 exit(-12);
 }
 return 0;
}

/*=================================== VIDEO ==================================*/

void ScreenRecorder::generateVideoStream() {
 //Generate video stream
 videoSt = avformat_new_stream(outAVFormatContext, nullptr);
 if (videoSt == nullptr) {
 cerr << "Error in creating AVFormatStream" << endl;
 exit(-6);
 }

 outVideoCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4); //AV_CODEC_ID_MPEG4
 if (outVideoCodec == nullptr) {
 cerr << "Error in finding the AVCodec, try again with the correct codec" << endl;
 exit(-8);
 }
avcodec_alloc_context3(outAVCodec)
 outVideoCodecContext = avcodec_alloc_context3(outVideoCodec);
 if (outVideoCodecContext == nullptr) {
 cerr << "Error in allocating the codec context" << endl;
 exit(-7);
 }

 //set properties of the video file (stream)
 outVideoCodecContext = videoSt->codec;
 outVideoCodecContext->codec_id = AV_CODEC_ID_MPEG4;
 outVideoCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
 outVideoCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
 outVideoCodecContext->bit_rate = 10000000;
 outVideoCodecContext->width = width;
 outVideoCodecContext->height = height;
 outVideoCodecContext->gop_size = 10;
 outVideoCodecContext->global_quality = 500;
 outVideoCodecContext->max_b_frames = 2;
 outVideoCodecContext->time_base.num = 1;
 outVideoCodecContext->time_base.den = 30;
 outVideoCodecContext->bit_rate_tolerance = 400000;

 if (outVideoCodecContext->codec_id == AV_CODEC_ID_H264) {
 av_opt_set(outVideoCodecContext->priv_data, "preset", "slow", 0);
 }

 if (outAVFormatContext->oformat->flags & AVFMT_GLOBALHEADER) {
 outVideoCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
 }

 value = avcodec_open2(outVideoCodecContext, outVideoCodec, nullptr);
 if (value < 0) {
 cerr << "Error in opening the AVCodec" << endl;
 exit(-9);
 }

 outVideoStreamIndex = -1;
 for (int i = 0; i < outAVFormatContext->nb_streams; i++) {
 if (outAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN) {
 outVideoStreamIndex = i;
 }
 }
 if (outVideoStreamIndex < 0) {
 cerr << "Error: cannot find a free stream index for video output" << endl;
 exit(-1);
 }
 avcodec_parameters_from_context(outAVFormatContext->streams[outVideoStreamIndex]->codecpar, outVideoCodecContext);
}

/*=============================== AUDIO ==================================*/

void ScreenRecorder::generateAudioStream() {
 AVCodecParameters* params = inAudioFormatContext->streams[audioStreamIndx]->codecpar;
 inAudioCodec = avcodec_find_decoder(params->codec_id);
 if (inAudioCodec == nullptr) {
 cerr << "Error: cannot find the audio decoder" << endl;
 exit(-1);
 }

 inAudioCodecContext = avcodec_alloc_context3(inAudioCodec);
 if (avcodec_parameters_to_context(inAudioCodecContext, params) < 0) {
 cout << "Cannot create codec context for audio input" << endl;
 }

 value = avcodec_open2(inAudioCodecContext, inAudioCodec, nullptr);
 if (value < 0) {
 cerr << "Error: cannot open the input audio codec" << endl;
 exit(-1);
 }

 //Generate audio stream
 outAudioCodecContext = nullptr;
 outAudioCodec = nullptr;
 int i;

 AVStream* audio_st = avformat_new_stream(outAVFormatContext, nullptr);
 if (audio_st == nullptr) {
 cerr << "Error: cannot create audio stream" << endl;
 exit(1);
 }

 outAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
 if (outAudioCodec == nullptr) {
 cerr << "Error: cannot find requested encoder" << endl;
 exit(1);
 }

 outAudioCodecContext = avcodec_alloc_context3(outAudioCodec);
 if (outAudioCodecContext == nullptr) {
 cerr << "Error: cannot create related VideoCodecContext" << endl;
 exit(1);
 }

 if ((outAudioCodec)->supported_samplerates) {
 outAudioCodecContext->sample_rate = (outAudioCodec)->supported_samplerates[0];
 for (i = 0; (outAudioCodec)->supported_samplerates[i]; i++) {
 if ((outAudioCodec)->supported_samplerates[i] == inAudioCodecContext->sample_rate)
 outAudioCodecContext->sample_rate = inAudioCodecContext->sample_rate;
 }
 }
 outAudioCodecContext->codec_id = AV_CODEC_ID_AAC;
 outAudioCodecContext->sample_fmt = (outAudioCodec)->sample_fmts ? (outAudioCodec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
 outAudioCodecContext->channels = inAudioCodecContext->channels;
 outAudioCodecContext->channel_layout = av_get_default_channel_layout(outAudioCodecContext->channels);
 outAudioCodecContext->bit_rate = 96000;
 outAudioCodecContext->time_base = { 1, inAudioCodecContext->sample_rate };

 outAudioCodecContext->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

 if ((outAVFormatContext)->oformat->flags & AVFMT_GLOBALHEADER) {
 outAudioCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
 }

 if (avcodec_open2(outAudioCodecContext, outAudioCodec, nullptr) < 0) {
 cerr << "error in opening the avcodec" << endl;
 exit(1);
 }

 //find a free stream index
 outAudioStreamIndex = -1;
 for (i = 0; i < outAVFormatContext->nb_streams; i++) {
 if (outAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN) {
 outAudioStreamIndex = i;
 }
 }
 if (outAudioStreamIndex < 0) {
 cerr << "Error: cannot find a free stream for audio on the output" << endl;
 exit(1);
 }

 avcodec_parameters_from_context(outAVFormatContext->streams[outAudioStreamIndex]->codecpar, outAudioCodecContext);
}

int ScreenRecorder::init_fifo()
{
 /* Create the FIFO buffer based on the specified output sample format. */
 if (!(fifo = av_audio_fifo_alloc(outAudioCodecContext->sample_fmt,
 outAudioCodecContext->channels, 1))) {
 fprintf(stderr, "Could not allocate FIFO\n");
 return AVERROR(ENOMEM);
 }
 return 0;
}

int ScreenRecorder::add_samples_to_fifo(uint8_t** converted_input_samples, const int frame_size) {
 int error;
 /* Make the FIFO as large as it needs to be to hold both,
 * the old and the new samples. */
 if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
 fprintf(stderr, "Could not reallocate FIFO\n");
 return error;
 }
 /* Store the new samples in the FIFO buffer. */
 if (av_audio_fifo_write(fifo, (void**)converted_input_samples, frame_size) < frame_size) {
 fprintf(stderr, "Could not write data to FIFO\n");
 return AVERROR_EXIT;
 }
 return 0;
}

int ScreenRecorder::initConvertedSamples(uint8_t*** converted_input_samples,
 AVCodecContext* output_codec_context,
 int frame_size) {
 int error;
 /* Allocate as many pointers as there are audio channels.
 * Each pointer will later point to the audio samples of the corresponding
 * channels (although it may be NULL for interleaved formats).
 */
 if (!(*converted_input_samples = (uint8_t**)calloc(output_codec_context->channels,
 sizeof(**converted_input_samples)))) {
 fprintf(stderr, "Could not allocate converted input sample pointers\n");
 return AVERROR(ENOMEM);
 }
 /* Allocate memory for the samples of all channels in one consecutive
 * block for convenience. */
 if (av_samples_alloc(*converted_input_samples, nullptr,
 output_codec_context->channels,
 frame_size,
 output_codec_context->sample_fmt, 0) < 0) {

 exit(1);
 }
 return 0;
}

static int64_t pts = 0;
void ScreenRecorder::captureAudio() {
 int ret;
 AVPacket* inPacket, * outPacket;
 AVFrame* rawFrame, * scaledFrame;
 uint8_t** resampledData;

 init_fifo();

 //allocate space for a packet
 inPacket = (AVPacket*)av_malloc(sizeof(AVPacket));
 if (!inPacket) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }
 av_init_packet(inPacket);

 //allocate space for a packet
 rawFrame = av_frame_alloc();
 if (!rawFrame) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }

 scaledFrame = av_frame_alloc();
 if (!scaledFrame) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }

 outPacket = (AVPacket*)av_malloc(sizeof(AVPacket));
 if (!outPacket) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }

 //init the resampler
 SwrContext* resampleContext = nullptr;
 resampleContext = swr_alloc_set_opts(resampleContext,
 av_get_default_channel_layout(outAudioCodecContext->channels),
 outAudioCodecContext->sample_fmt,
 outAudioCodecContext->sample_rate,
 av_get_default_channel_layout(inAudioCodecContext->channels),
 inAudioCodecContext->sample_fmt,
 inAudioCodecContext->sample_rate,
 0,
 nullptr);
 if (!resampleContext) {
 cerr << "Cannot allocate the resample context" << endl;
 exit(1);
 }
 if ((swr_init(resampleContext)) < 0) {
 fprintf(stderr, "Could not open resample context\n");
 swr_free(&resampleContext);
 exit(1);
 }

 while (true) {
 if (pauseCapture) {
 cout << "Pause audio" << endl;
 }
 cv.wait(ul, [this]() { return !pauseCapture; });

 if (stopCapture) {
 break;
 }

 ul.unlock();

 if (av_read_frame(inAudioFormatContext, inPacket) >= 0 && inPacket->stream_index == audioStreamIndx) {
 //decode audio routing
 av_packet_rescale_ts(outPacket, inAudioFormatContext->streams[audioStreamIndx]->time_base, inAudioCodecContext->time_base);
 if ((ret = avcodec_send_packet(inAudioCodecContext, inPacket)) < 0) {
 cout << "Cannot decode current audio packet " << ret << endl;
 continue;
 }
 
 while (ret >= 0) {
 ret = avcodec_receive_frame(inAudioCodecContext, rawFrame);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 break;
 else if (ret < 0) {
 cerr << "Error during decoding" << endl;
 exit(1);
 }
 if (outAVFormatContext->streams[outAudioStreamIndex]->start_time <= 0) {
 outAVFormatContext->streams[outAudioStreamIndex]->start_time = rawFrame->pts;
 }
 initConvertedSamples(&resampledData, outAudioCodecContext, rawFrame->nb_samples);

 swr_convert(resampleContext,
 resampledData, rawFrame->nb_samples,
 (const uint8_t**)rawFrame->extended_data, rawFrame->nb_samp

 add_samples_to_fifo(resampledData, rawFrame->nb_samples);

 //raw frame ready
 av_init_packet(outPacket);
 outPacket->data = nullptr;
 outPacket->size = 0;

 const int frame_size = FFMAX(av_audio_fifo_size(fifo), outAudioCodecContext->frame_size);

 scaledFrame = av_frame_alloc();
 if (!scaledFrame) {
 cerr << "Cannot allocate an AVPacket for encoded video" << endl;
 exit(1);
 }

 scaledFrame->nb_samples = outAudioCodecContext->frame_size;
 scaledFrame->channel_layout = outAudioCodecContext->channel_layout;
 scaledFrame->format = outAudioCodecContext->sample_fmt;
 scaledFrame->sample_rate = outAudioCodecContext->sample_rate;
 av_frame_get_buffer(scaledFrame, 0);

 while (av_audio_fifo_size(fifo) >= outAudioCodecContext->frame_size) {

 ret = av_audio_fifo_read(fifo, (void**)(scaledFrame->data), outAudioCodecContext->frame_size);
 scaledFrame->pts = pts;
 pts += scaledFrame->nb_samples;
 if (avcodec_send_frame(outAudioCodecContext, scaledFrame) < 0) {
 cout << "Cannot encode current audio packet " << endl;
 exit(1);
 }
 while (ret >= 0) {
 ret = avcodec_receive_packet(outAudioCodecContext, outPacket);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 break;
 else if (ret < 0) {
 cerr << "Error during encoding" << endl;
 exit(1);
 }
 av_packet_rescale_ts(outPacket, outAudioCodecContext->time_base, outAVFormatContext->streams[outAudioStreamIndex]->time_base);

 outPacket->stream_index = outAudioStreamIndex;

 write_lock.lock();
 
 if (av_write_frame(outAVFormatContext, outPacket) != 0)
 {
 cerr << "Error in writing audio frame" << endl;
 }
 write_lock.unlock();
 av_packet_unref(outPacket);
 }
 ret = 0;
 }
 av_frame_free(&scaledFrame);
 av_packet_unref(outPacket);
 }
 }
 }
}

int ScreenRecorder::captureVideoFrames() {
 int64_t pts = 0;
 int flag;
 int frameFinished = 0;
 bool endPause = false;
 int numPause = 0;

 ofstream outFile{ "..\\media\\log.txt", ios::out };

 int frameIndex = 0;
 value = 0;

 pAVPacket = (AVPacket*)av_malloc(sizeof(AVPacket));
 if (pAVPacket == nullptr) {
 cerr << "Error in allocating AVPacket" << endl;
 exit(-1);
 }

 pAVFrame = av_frame_alloc();
 if (pAVFrame == nullptr) {
 cerr << "Error: unable to alloc the AVFrame resources" << endl;
 exit(-1);
 }

 outFrame = av_frame_alloc();
 if (outFrame == nullptr) {
 cerr << "Error: unable to alloc the AVFrame resources for out frame" << endl;
 exit(-1);
 }

 int videoOutBuffSize;
 int nBytes = av_image_get_buffer_size(outVideoCodecContext->pix_fmt, outVideoCodecContext->width, outVideoCodecContext->height, 32);
 uint8_t* videoOutBuff = (uint8_t*)av_malloc(nBytes);

 if (videoOutBuff == nullptr) {
 cerr << "Error: unable to allocate memory" << endl;
 exit(-1);
 }

 value = av_image_fill_arrays(outFrame->data, outFrame->linesize, videoOutBuff, AV_PIX_FMT_YUV420P, outVideoCodecContext->width, outVideoCodecContext->height, 1);
 if (value < 0) {
 cerr << "Error in filling image array" << endl;
 }

 SwsContext* swsCtx_;
 if (avcodec_open2(pAVCodecContext, pAVCodec, nullptr) < 0) {
 cerr << "Could not open codec" << endl;
 exit(-1);
 }
 swsCtx_ = sws_getContext(pAVCodecContext->width, pAVCodecContext->height, pAVCodecContext->pix_fmt, outVideoCodecContext->width, outVideoCodecContext->height, outVideoCodecContext->pix_fmt, SWS_BICUBIC,
 nullptr, nullptr, nullptr);

 AVPacket outPacket;
 int gotPicture;

 time_t startTime;
 time(&startTime);

 while (true) {

 if (pauseCapture) {
 cout << "Pause" << endl;
 outFile << "/////////////////// Pause ///////////////////" << endl;
 cout << "outVideoCodecContext->time_base: " << outVideoCodecContext->time_base.num << ", " << outVideoCodecContext->time_base.den << endl;
 }
 cv.wait(ul, [this]() { return !pauseCapture; }); //pause capture (not busy waiting)
 if (endPause) {
 endPause = false;
 }

 if (stopCapture) //check if the capture has to stop
 break;
 ul.unlock();

 if (av_read_frame(pAVFormatContext, pAVPacket) >= 0 && pAVPacket->stream_index == VideoStreamIndx) {
 av_packet_rescale_ts(pAVPacket, pAVFormatContext->streams[VideoStreamIndx]->time_base, pAVCodecContext->time_base);
 value = avcodec_decode_video2(pAVCodecContext, pAVFrame, &frameFinished, pAVPacket);
 if (value < 0) {
 cout << "Unable to decode video" << endl;
 }

 if (frameFinished) { //frame successfully decoded
 //sws_scale(swsCtx_, pAVFrame->data, pAVFrame->linesize, 0, pAVCodecContext->height, outFrame->data, outFrame->linesize);
 av_init_packet(&outPacket);
 outPacket.data = nullptr;
 outPacket.size = 0;

 if (outAVFormatContext->streams[outVideoStreamIndex]->start_time <= 0) {
 outAVFormatContext->streams[outVideoStreamIndex]->start_time = pAVFrame->pts;
 }

 //disable warning on the console
 outFrame->width = outVideoCodecContext->width;
 outFrame->height = outVideoCodecContext->height;
 outFrame->format = outVideoCodecContext->pix_fmt;

 sws_scale(swsCtx_, pAVFrame->data, pAVFrame->linesize, 0, pAVCodecContext->height, outFrame->data, outFrame->linesize);

 avcodec_encode_video2(outVideoCodecContext, &outPacket, outFrame, &gotPicture);

 if (gotPicture) {
 if (outPacket.pts != AV_NOPTS_VALUE) {
 outPacket.pts = av_rescale_q(outPacket.pts, videoSt->codec->time_base, videoSt->time_base);
 }
 if (outPacket.dts != AV_NOPTS_VALUE) {
 outPacket.dts = av_rescale_q(outPacket.dts, videoSt->codec->time_base, videoSt->time_base);
 }

 //cout << "Write frame " << j++ << " (size = " << outPacket.size / 1000 << ")" << endl;
 //cout << "(size = " << outPacket.size << ")" << endl;

 //av_packet_rescale_ts(&outPacket, outVideoCodecContext->time_base, outAVFormatContext->streams[outVideoStreamIndex]->time_base);
 //outPacket.stream_index = outVideoStreamIndex;

 outFile << "outPacket->duration: " << outPacket.duration << ", " << "pAVPacket->duration: " << pAVPacket->duration << endl;
 outFile << "outPacket->pts: " << outPacket.pts << ", " << "pAVPacket->pts: " << pAVPacket->pts << endl;
 outFile << "outPacket.dts: " << outPacket.dts << ", " << "pAVPacket->dts: " << pAVPacket->dts << endl;

 time_t timer;
 double seconds;

 mu.lock();
 if (!activeMenu) {
 time(&timer);
 seconds = difftime(timer, startTime);
 int h = (int)(seconds / 3600);
 int m = (int)(seconds / 60) % 60;
 int s = (int)(seconds) % 60;

 std::cout << std::flush << "\r" << std::setw(2) << std::setfill('0') << h << ':'
 << std::setw(2) << std::setfill('0') << m << ':'
 << std::setw(2) << std::setfill('0') << s << std::flush;
 }
 mu.unlock();

 write_lock.lock();
 if (av_write_frame(outAVFormatContext, &outPacket) != 0) {
 cerr << "Error in writing video frame" << endl;
 }
 write_lock.unlock();
 av_packet_unref(&outPacket);
 }

 av_packet_unref(&outPacket);
 av_free_packet(pAVPacket); //avoid memory saturation
 }
 }
 }

 outFile.close();

 av_free(videoOutBuff);

 return 0;
}