
Recherche avancée
Autres articles (61)
-
Les vidéos
21 avril 2011, parComme les documents de type "audio", Mediaspip affiche dans la mesure du possible les vidéos grâce à la balise html5 .
Un des inconvénients de cette balise est qu’elle n’est pas reconnue correctement par certains navigateurs (Internet Explorer pour ne pas le nommer) et que chaque navigateur ne gère en natif que certains formats de vidéos.
Son avantage principal quant à lui est de bénéficier de la prise en charge native de vidéos dans les navigateur et donc de se passer de l’utilisation de Flash et (...) -
MediaSPIP v0.2
21 juin 2013, parMediaSPIP 0.2 est la première version de MediaSPIP stable.
Sa date de sortie officielle est le 21 juin 2013 et est annoncée ici.
Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
Comme pour la version précédente, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...) -
Mise à disposition des fichiers
14 avril 2011, parPar défaut, lors de son initialisation, MediaSPIP ne permet pas aux visiteurs de télécharger les fichiers qu’ils soient originaux ou le résultat de leur transformation ou encodage. Il permet uniquement de les visualiser.
Cependant, il est possible et facile d’autoriser les visiteurs à avoir accès à ces documents et ce sous différentes formes.
Tout cela se passe dans la page de configuration du squelette. Il vous faut aller dans l’espace d’administration du canal, et choisir dans la navigation (...)
Sur d’autres sites (9478)
-
Android FFMPEG - Low FPS & File Size is massive
2 mai 2018, par Alexzander FloresI am new to Android app development and I have been asked to make a video splitter app. I am trying to use FFMPEG, but the library size is massive and makes the .APK file 140MB. How can I solve this ? Similar apps are around 15MBs in size.
Also, the framerate starts at 30FPS and drops to around 2.2FPS over time when trying to split a 30 second long video into two parts. How can I solve this ? This is my code currently :
package splicer.com.splicer;
import android.Manifest;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.database.Cursor;
import android.media.MediaMetadataRetriever;
import android.media.MediaScannerConnection;
import android.net.Uri;
import android.provider.MediaStore;
import android.support.v4.app.ActivityCompat;
import android.support.v4.content.ContextCompat;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.text.method.ScrollingMovementMethod;
import android.view.View;
import android.widget.Button;
import android.widget.TextView;
import com.github.hiteshsondhi88.libffmpeg.ExecuteBinaryResponseHandler;
import com.github.hiteshsondhi88.libffmpeg.FFmpeg;
import com.github.hiteshsondhi88.libffmpeg.LoadBinaryResponseHandler;
import com.github.hiteshsondhi88.libffmpeg.exceptions.FFmpegNotSupportedException;
public class MainActivity extends AppCompatActivity {
private Button button;
private TextView textView;
private FFmpeg ffmpeg;
static {
System.loadLibrary("native-lib");
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
ffmpeg = FFmpeg.getInstance(getApplicationContext());
try {
ffmpeg.loadBinary(new LoadBinaryResponseHandler() {
@Override
public void onStart() {}
@Override
public void onFailure() {}
@Override
public void onSuccess() {}
@Override
public void onFinish() {}
});
} catch(FFmpegNotSupportedException e) {
e.printStackTrace();
}
textView = (TextView) findViewById(R.id.textView);
textView.setY(200);
textView.setHeight(700);
textView.setMovementMethod(new ScrollingMovementMethod());
button = (Button) findViewById(R.id.button);
button.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openGallery();
}
});
}
/**
* A native method that is implemented by the 'native-lib' native library,
* which is packaged with this application.
*/
public native String stringFromJNI();
public void openGallery() {
if(ContextCompat.checkSelfPermission(this, Manifest.permission.READ_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
ActivityCompat.requestPermissions(this, new String [] {Manifest.permission.READ_EXTERNAL_STORAGE}, 0);
}
if(ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
ActivityCompat.requestPermissions(this, new String [] {Manifest.permission.WRITE_EXTERNAL_STORAGE}, 0);
}
Intent gallery = new Intent(Intent.ACTION_PICK, MediaStore.Video.Media.EXTERNAL_CONTENT_URI);
startActivityForResult(gallery, 100);
}
public String getRealPathFromURI(Context context, Uri contentUri) {
Cursor cursor = null;
try {
String[] proj = { MediaStore.Images.Media.DATA };
cursor = context.getContentResolver().query(contentUri, proj, null, null, null);
int column_index = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATA);
cursor.moveToFirst();
return cursor.getString(column_index);
} finally {
if (cursor != null) {
cursor.close();
}
}
}
@Override
protected void onActivityResult(int requestCode, int resultCode, final Intent intent) {
super.onActivityResult(requestCode, resultCode, intent);
if(resultCode == RESULT_OK && requestCode == 100) {
MediaMetadataRetriever retriever = new MediaMetadataRetriever();
try {
retriever.setDataSource(getBaseContext(), intent.getData());
String time = retriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION);
long splitCount = Long.valueOf(time) / 1000 / 15;
if(splitCount > 1) {
final String path = getRealPathFromURI(getBaseContext(), Uri.parse(intent.getData().toString()));
for(int a = 0, start = 0; a < splitCount; ++a, start += 15) {
// I am only testing with .mp4s atm, this will change before production
final String targetPath = path.replace(".mp4", "_" + (a + 1) + ".mp4");
ffmpeg.execute(new String [] {
"-r",
"1",
"-i",
path,
"-ss",
String.valueOf(start),
"-t",
String.valueOf(start + 15),
"-r",
"24",
targetPath
}, new ExecuteBinaryResponseHandler() {
@Override
public void onStart() {}
@Override
public void onProgress(String message) {
textView.setText("onProcess: " + message);
}
@Override
public void onFailure(String message) {
textView.setText("onFailure: " + message + " --- " + path);
}
@Override
public void onSuccess(String message) {
textView.setText("onSuccess:" + message);
MediaScannerConnection.scanFile(getBaseContext(),
new String [] { targetPath }, null,
new MediaScannerConnection.OnScanCompletedListener() {
public void onScanCompleted(String path, Uri uri) {}
});
}
@Override
public void onFinish() {}
});
}
}
} catch(Exception e) {
e.printStackTrace();
} finally {
retriever.release();
}
}
}
}I don’t believe everything here is as optimal as it could be, but I’m just trying to prove the concept at the moment. Any help in the right direction would be amazing, thank you !
-
Encode video using ffmpeg from javacv on Android causes native code crash
13 décembre 2012, par gtcompscientistNOTE : I have updated this since originally asking the question to reflect some of what I have learned about loading live camera images into the ffmpeg libraries.
I am using
ffmpeg
fromjavacv
compiled for Android to encode/decode video for my application. (Note that originally, I was trying to useffmpeg-java
, but it has some incompatible libraries)Original problem : The problem that I've run into is that I am currently getting each frame as a
Bitmap
(just a plainandroid.graphics.Bitmap
) and I can't figure out how to stuff that into the encoder.Solution in
javacv
'sffmpeg
: Use avpicture_fill(), the format from Android is supposedly YUV420P, though I can't verify this until my encoder issues (below) are fixed.avcodec.avpicture_fill((AVPicture)mFrame, picPointer, avutil.PIX_FMT_YUV420P, VIDEO_WIDTH, VIDEO_HEIGHT)
Problem Now : The line that is supposed to actually encode the data crashes the thread. I get a big native code stack trace that I'm unable to understand. Does anybody have a suggestion ?
Here is the code that I am using to instantiate all the
ffmpeg
libraries :avcodec.avcodec_register_all();
avcodec.avcodec_init();
avformat.av_register_all();
mCodec = avcodec.avcodec_find_encoder(avcodec.CODEC_ID_H263);
if (mCodec == null)
{
Logging.Log("Unable to find encoder.");
return;
}
Logging.Log("Found encoder.");
mCodecCtx = avcodec.avcodec_alloc_context();
mCodecCtx.bit_rate(300000);
mCodecCtx.codec(mCodec);
mCodecCtx.width(VIDEO_WIDTH);
mCodecCtx.height(VIDEO_HEIGHT);
mCodecCtx.pix_fmt(avutil.PIX_FMT_YUV420P);
mCodecCtx.codec_id(avcodec.CODEC_ID_H263);
mCodecCtx.codec_type(avutil.AVMEDIA_TYPE_VIDEO);
AVRational ratio = new AVRational();
ratio.num(1);
ratio.den(30);
mCodecCtx.time_base(ratio);
mCodecCtx.coder_type(1);
mCodecCtx.flags(mCodecCtx.flags() | avcodec.CODEC_FLAG_LOOP_FILTER);
mCodecCtx.me_cmp(avcodec.FF_LOSS_CHROMA);
mCodecCtx.me_method(avcodec.ME_HEX);
mCodecCtx.me_subpel_quality(6);
mCodecCtx.me_range(16);
mCodecCtx.gop_size(30);
mCodecCtx.keyint_min(10);
mCodecCtx.scenechange_threshold(40);
mCodecCtx.i_quant_factor((float) 0.71);
mCodecCtx.b_frame_strategy(1);
mCodecCtx.qcompress((float) 0.6);
mCodecCtx.qmin(10);
mCodecCtx.qmax(51);
mCodecCtx.max_qdiff(4);
mCodecCtx.max_b_frames(1);
mCodecCtx.refs(2);
mCodecCtx.directpred(3);
mCodecCtx.trellis(1);
mCodecCtx.flags2(mCodecCtx.flags2() | avcodec.CODEC_FLAG2_BPYRAMID | avcodec.CODEC_FLAG2_WPRED | avcodec.CODEC_FLAG2_8X8DCT | avcodec.CODEC_FLAG2_FASTPSKIP);
if (avcodec.avcodec_open(mCodecCtx, mCodec) == 0)
{
Logging.Log("Unable to open encoder.");
return;
}
Logging.Log("Encoder opened.");
mFrameSize = avcodec.avpicture_get_size(avutil.PIX_FMT_YUV420P, VIDEO_WIDTH, VIDEO_HEIGHT);
Logging.Log("Frame size - '" + mFrameSize + "'.");
//mPic = new AVPicture(mPicSize);
mFrame = avcodec.avcodec_alloc_frame();
if (mFrame == null)
{
Logging.Log("Unable to alloc frame.");
}This is what I want to be able to execute next :
BytePointer picPointer = new BytePointer(data);
int bBuffSize = mFrameSize;
BytePointer bBuffer = new BytePointer(bBuffSize);
int picSize = 0;
if ((picSize = avcodec.avpicture_fill((AVPicture)mFrame, picPointer, avutil.PIX_FMT_YUV420P, VIDEO_WIDTH, VIDEO_HEIGHT)) <= 0)
{
Logging.Log("Couldn't convert preview to AVPicture (" + picSize + ")");
return;
}
Logging.Log("Converted preview to AVPicture (" + picSize + ")");
VCAP_Package vPackage = new VCAP_Package();
if (mCodecCtx.isNull())
{
Logging.Log("Codec Context is null!");
}
//encode the image
int size = avcodec.avcodec_encode_video(mCodecCtx, bBuffer, bBuffSize, mFrame);
int totalSize = 0;
while (size >= 0)
{
totalSize += size;
Logging.Log("Encoded '" + size + "' bytes.");
//Get any delayed frames
size = avcodec.avcodec_encode_video(mCodecCtx, bBuffer, bBuffSize, null);
}
Logging.Log("Finished encoding. (" + totalSize + ")");But, as of now, I don't know how to put the Bitmap into the right piece or if I have that setup correctly.
A few notes about the code :
-VIDEO_WIDTH
= 352
-VIDEO_HEIGHT
= 288
-VIDEO_FPS
= 30 ; -
How to read a text file in c++ ?
16 mai 2017, par Sanduni WickramasingheI want to detect a predefined specific content in a video. For this I use an already identified piece of video separately and another input video. I have read the data in the identified video and bite array was saved in a text file. Now I want to compare the content of saved text file with the input video content.
This is my code
void CFfmpegmethods::VideoRead(){
av_register_all();
avformat_network_init();
ifstream inFile;
inFile.open("H:\\Sanduni_projects\\sample_ad.txt");
const char *url = "H:\\Sanduni_projects\\ad_1.mp4";
AVDictionary *options = NULL;
AVFormatContext *s = avformat_alloc_context();
AVPacket *pkt = new AVPacket(); //this structure stores compressed data
//open an input stream and read the header
int ret = avformat_open_input(&s, url, NULL,NULL);
//avformat_find_stream_info(s, &options); //finding the missing information
if (ret < 0)
abort();
if (!inFile) {
cerr << "Unable to open file datafile.txt";
exit(1); // call system to stop
}
av_dict_set(&options, "video_size", "640x480", 0);
av_dict_set(&options, "pixel_format", "rgb24", 0);
if (avformat_open_input(&s, url, NULL, &options) < 0){
abort();
}
av_dict_free(&options);
AVDictionaryEntry *e;
if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) {
fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key);
abort();
}
int i = 1;
int64_t duration = 0;
int size = 0;
uint8_t *data; //Unsigned integer type with a width of exactly 8 bits.
int sum = 0;
int total_size = 0;
int64_t total_duration = 0;
int packet_size = 0;
int64_t stream_index = 0;
int64_t bit_rate = 0;
AVBufferRef *buf;
//writing data to a file
outdata.open("H:\\Sanduni_projects\\sample_ad.txt");
//outdata.open("H:\\Sanduni_projects\\log.txt");
if (!outdata){
cerr << "Error: file could not be opened" << endl;
exit(1);
}
//Split what is stored in the file into frames and return one for each call
//returns the next frame of the stream
while(1){
int frame = av_read_frame(s, pkt); //one frame is readed.
if (frame < 0) break;
size = pkt->size;
data = pkt->data;
for (int j = 0; j < size; j++){
for (int k = 0; k < 12; k++){
while (!inFile.eof){
}
//if (data[j] != ) break;
}
}
//int decode = avcodec_send_packet(avctx, pkt);
}
//make the packet free
av_packet_unref(pkt);
delete pkt;
cout << "total size: " << total_size << endl;
cout << "total duration:" << total_duration << endl;
outdata.close();
//Close the file after reading
avformat_close_input(&s);
}
void CFfmpegmethods::SampleAdCreation(){
av_register_all();
const char *url_ref = "H:\\Sanduni_projects\\sample_ad.mp4";
AVDictionary *options = NULL;
AVDictionary *options_ref = NULL;
AVFormatContext *s = avformat_alloc_context();
AVPacket *pkt = new AVPacket(); //this structure stores compressed data
//open an input stream and read the header
int ret = avformat_open_input(&s, url_ref, NULL, NULL);
if (ret < 0)
abort();
av_dict_set(&options, "video_size", "640x480", 0);
av_dict_set(&options, "pixel_format", "rgb24", 0);
if (avformat_open_input(&s, url_ref, NULL, &options) < 0){
abort();
}
av_dict_free(&options);
AVDictionaryEntry *e;
if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) {
fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key);
abort();
}
uint8_t *data;
//writing data to a file
outdata.open("H:\\Sanduni_projects\\sample_ad.txt");
if (!outdata){
cerr << "Error: file could not be opened" << endl;
exit(1);
}
for (int m = 0; m < 12; m++){
int size = pkt->size;
int frame = av_read_frame(s, pkt);
if (frame < 0) break;
data = pkt->data;
for (int j = 0; j < size; j++){
outdata << data[j];
} outdata</make the packet free
av_packet_unref(pkt);
delete pkt;
outdata.close();
//Close the file after reading
avformat_close_input(&s);
}