
Recherche avancée
Autres articles (39)
-
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page. -
Librairies et binaires spécifiques au traitement vidéo et sonore
31 janvier 2010, parLes logiciels et librairies suivantes sont utilisées par SPIPmotion d’une manière ou d’une autre.
Binaires obligatoires FFMpeg : encodeur principal, permet de transcoder presque tous les types de fichiers vidéo et sonores dans les formats lisibles sur Internet. CF ce tutoriel pour son installation ; Oggz-tools : outils d’inspection de fichiers ogg ; Mediainfo : récupération d’informations depuis la plupart des formats vidéos et sonores ;
Binaires complémentaires et facultatifs flvtool2 : (...)
Sur d’autres sites (8550)
-
Why is my DSharpPlus Slash Command not playing my desired sound using FFMPEG in C# ?
19 mai 2023, par IngeniousThoughtsI'm having a problem with my ffmpeg setup the commands work fine but my play command doesn't play my desired sound.


//The command.
 [SlashCommand("play", "plays a sound in a voice channel.")]
 public async Task HowlCommand(InteractionContext ctx, [Choice("ChoiceName", "C:\\My\\Program\\Directory\\Name\\MySound.mp3")]
 [Option("Sound", "Please select a Sound")] string filepath)
 {
 //Creates a slash command used response.
 //Also removes the error message.
 await ctx.CreateResponseAsync(InteractionResponseType.ChannelMessageWithSource, new DiscordInteractionResponseBuilder()
 .WithContent("Playing sound in voice channel. Please wait just a moment!"));
 
 //Checks if the user is not a bot to send the message.
 if (ctx.Member.IsBot)
 {
 return;
 }
 else
 {
 if(filepath != "C:\\My\\Program\\Directory\\Name\\MySound.mp3")
 {
 var embedmessage = new DiscordMessageBuilder()
 .AddEmbed(new DiscordEmbedBuilder()
 
 .WithAuthor("BotName", null, ctx.Client.CurrentApplication.Icon)
 .WithTitle("Please select the following sound to play:")
 .WithImageUrl(ctx.Client.CurrentApplication.Icon)
 .WithFooter("VoiceChannel Error.", "ImageURL.png")
 .WithTimestamp(DateTime.Now)
 .Build()
 
 );
 
 //Makes the command wait 5 seconds before sending the rest of the command data.
 await Task.Delay(TimeSpan.FromSeconds(5));
 
 //Sends the embed in a message.
 await ctx.Channel.SendMessageAsync(embedmessage);
 }
 else
 {
 //Makes the command wait 5 seconds before sending the rest of the command data.
 await Task.Delay(TimeSpan.FromSeconds(5));
 
 
 var vnext = ctx.Client.GetVoiceNext();
 var vnc = vnext.GetConnection(ctx.Guild);
 
 //if null throws exception.
 if (vnc == null)
 throw new System.InvalidOperationException("Not connected in this guild.");
 
 
 //Gets the mp3 file to use.
 var ffmpeg = Process.Start(new ProcessStartInfo
 {
 FileName = "ffmpeg",
 Arguments = $@"-i ""{filepath}"" -ac 2 -f s16le -ar 48000 pipe:1",
 RedirectStandardOutput = true,
 UseShellExecute = false
 });
 Stream pcm = ffmpeg.StandardOutput.BaseStream;
 
 VoiceTransmitSink transmit = vnc.GetTransmitSink();
 await pcm.CopyToAsync(transmit);
 vnc.GetTransmitSink().VolumeModifier = 5;
 
 //Makes the command wait 10 seconds before sending the rest of the command data.
 await Task.Delay(TimeSpan.FromSeconds(10));
 
 //Disconnects the bot from the voice channel.
 vnc.Disconnect();
 }
 }
 }



//The command.
 [SlashCommand("join", "Joins a voice channel.")]
 public async Task JoinChannel(InteractionContext ctx, [Choice("MyVoiceChannel", "VoiceChannelName")]
 [Option("VoiceChannel", "Please choose a Voice Channel.")] DiscordChannel channel)
 {
 //Creates a slash command used response.
 //Also removes the error message.
 await ctx.CreateResponseAsync(InteractionResponseType.ChannelMessageWithSource, new DiscordInteractionResponseBuilder()
 .WithContent("Joining voice channel. Please wait just a moment!"));
 
 //Checks if the user is not a bot to send the message.
 if (ctx.Member.IsBot)
 {
 return;
 }
 else
 {
 if (channel.Name != "MyVoiceChannelName")
 {
 var embedmessage = new DiscordMessageBuilder()
 .AddEmbed(new DiscordEmbedBuilder()
 
 .WithAuthor("BotName", null, ctx.Client.CurrentApplication.Icon)
 .WithTitle("Please Create The Following Voice Channel:")
 .WithImageUrl(ctx.Client.CurrentApplication.Icon)
 .AddField("VoiceChannel:", "**BotName**" + Environment.NewLine + "Is Case Sensitive: **Yes**")
 .WithFooter("VoiceChannel Error.", "ImageURL.png")
 .WithTimestamp(DateTime.Now)
 .Build()
 
 );
 
 //Makes the command wait 5 seconds before sending the rest of the command data.
 await Task.Delay(TimeSpan.FromSeconds(5));
 
 //Sends the embed in a message.
 await ctx.Channel.SendMessageAsync(embedmessage);
 }
 else
 {
 //Makes the command wait 5 seconds before sending the rest of the command data.
 await Task.Delay(TimeSpan.FromSeconds(5));
 
 
 channel = ctx.Member.VoiceState?.Channel;
 await channel.ConnectAsync();
 
 }
 }
 }
 
 }
}



public sealed class Program
 {
 public static DiscordClient Client { get; private set; }
 public static InteractivityExtension Interactivity { get; private set; }
 public static CommandsNextExtension Commands { get; private set; }
 public static VoiceNextExtension VoiceNext { get; private set; }
 
 
 static async Task Main(string[] args)
 {
 
 //Main Window configs specifying the title name and color.
 Console.BackgroundColor = ConsoleColor.Black;
 Console.ForegroundColor = ConsoleColor.Magenta;
 Console.Title = "BotName";
 
 //1. Get the details of your config.json file by deserialising it
 var configJsonFile = new JSONReader();
 await configJsonFile.ReadJSON();
 
 //2. Setting up the Bot Configuration
 var discordConfig = new DiscordConfiguration()
 {
 Intents = DiscordIntents.All,
 Token = configJsonFile.token,
 TokenType = TokenType.Bot,
 AutoReconnect = true
 };
 
 //3. Apply this config to our DiscordClient
 Client = new DiscordClient(discordConfig);
 
 //4. Set the default timeout for Commands that use interactivity
 Client.UseInteractivity(new InteractivityConfiguration()
 {
 Timeout = TimeSpan.FromMinutes(2)
 });
 
 //5. Set up the Task Handler Ready event
 Client.Ready += OnClientReady;
 
 //6. Set up the Commands Configuration
 var commandsConfig = new CommandsNextConfiguration()
 {
 StringPrefixes = new string[] { configJsonFile.prefix },
 EnableMentionPrefix = true,
 EnableDms = true,
 EnableDefaultHelp = false,
 };
 
 Commands = Client.UseCommandsNext(commandsConfig);
 
 //7. Register your commands
 var slashCommandsConfig = Client.UseSlashCommands();
 slashCommandsConfig.RegisterCommands<mysoundscommand>(MyGuildID);
 
 //8. Allows usage of voice channels.
 var VoiceNext = Client.UseVoiceNext();
 
 //9. Connect to get the Bot online
 await Client.ConnectAsync();
 await Task.Delay(-1);
 }
 
 private static Task OnClientReady(DiscordClient sender, ReadyEventArgs e)
 {
 return Task.CompletedTask;
 }
 }
</mysoundscommand>


SourceCode Link :




playing the playsound slash command but wasn't expecting it to not play the mp3 file.


everything else worked fine except when it transmits the sound it doesn't play it.


-
Memory Leak in c++/cli application
10 décembre 2013, par AnkushI am passing bitmap from my c# app to c++/cli dll that add it to video.
The problem is program slowly leaking memory. I tried _CrtDumpMemoryLeaks() shows me leak of bitmap & another 40 byte leak but i am disposing bitmap.
Can anyone see memory leak, Here is code..Flow :
1) Capture screenshot by takescreenshot()
2) pass it to c++/cli function
3) dispose bitmap
lines from my c# app
Bitmap snap = takescreeshot();
vencoder.AddBitmap(snap);
snap.Dispose();
vencoder.printleak();
private static Bitmap takescreeshot()
{
System.Drawing.Bitmap bitmap = null;
System.Drawing.Graphics graphics = null;
bitmap = new Bitmap
(
System.Windows.Forms.Screen.PrimaryScreen.Bounds.Width,
System.Windows.Forms.Screen.PrimaryScreen.Bounds.Height,
System.Drawing.Imaging.PixelFormat.Format24bppRgb
);
graphics = System.Drawing.Graphics.FromImage(bitmap);
graphics.CopyFromScreen(Screen.PrimaryScreen.Bounds.X, Screen.PrimaryScreen.Bounds.Y, 0, 0, Screen.PrimaryScreen.Bounds.Size);
//Write TimeSpamp
Rectangle rect = new Rectangle(1166, 738, 200, 20);
String datetime= System.String.Format("{0:dd:MM:yy hh:mm:ss}",DateTime.Now);
System.Drawing.Font sysfont = new System.Drawing.Font("Times New Roman", 14, FontStyle.Bold);
graphics.DrawString(datetime, sysfont, Brushes.Red,rect);
//
Grayscale filter = new Grayscale(0.2125, 0.7154, 0.0721);
Bitmap grayImage = filter.Apply(bitmap);
//Dispose
bitmap.Dispose();
graphics.Dispose();
return grayImage;
}now in c++/cli dll
bool VideoEncoder::AddBitmap(Bitmap^ bitmap)
{
BitmapData^ bitmapData = bitmap->LockBits( System::Drawing::Rectangle( 0, 0,bitmap->Width, bitmap->Height ),ImageLockMode::ReadOnly,PixelFormat::Format8bppIndexed);
uint8_t* ptr = reinterpret_cast( static_cast( bitmapData->Scan0 ) );
uint8_t* srcData[4] = { ptr, NULL, NULL, NULL };
int srcLinesize[4] = { bitmapData->Stride, 0, 0, 0 };
pCurrentPicture = CreateFFmpegPicture(pVideoStream->codec->pix_fmt, pVideoStream->codec->width, pVideoStream->codec->height);
sws_scale(pImgConvertCtx, srcData, srcLinesize, 0, bitmap->Height, pCurrentPicture->data, pCurrentPicture->linesize );
bitmap->UnlockBits( bitmapData );
write_video_frame();
bitmapData=nullptr;
ptr=NULL;
return true;
}
AVFrame * VideoEncoder::CreateFFmpegPicture(int pix_fmt, int nWidth, int nHeight)
{
AVFrame *picture = NULL;
uint8_t *picture_buf = NULL;
int size;
picture = avcodec_alloc_frame();
if ( !picture)
{
printf("Cannot create frame\n");
return NULL;
}
size = avpicture_get_size((AVPixelFormat)pix_fmt, nWidth, nHeight);
picture_buf = (uint8_t *) av_malloc(size);
if (!picture_buf)
{
av_free(picture);
printf("Cannot allocate buffer\n");
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
(AVPixelFormat)pix_fmt, nWidth, nHeight);
return picture;
}
void VideoEncoder::write_video_frame()
{
AVCodecContext* codecContext = pVideoStream->codec;
int out_size, ret = 0;
if ( pFormatContext->oformat->flags & AVFMT_RAWPICTURE )
{
printf( "raw picture must be written" );
}
else
{
out_size = avcodec_encode_video( codecContext, pVideoEncodeBuffer,nSizeVideoEncodeBuffer, pCurrentPicture );
if ( out_size > 0 )
{
AVPacket packet;
av_init_packet( &packet );
if ( codecContext->coded_frame->pts != AV_NOPTS_VALUE )
{
packet.pts = av_rescale_q( packet.pts, codecContext->time_base, pVideoStream->time_base );
}
if ( codecContext->coded_frame->pkt_dts != AV_NOPTS_VALUE )
{
packet.dts = av_rescale_q( packet.dts, codecContext->time_base, pVideoStream->time_base );
}
if ( codecContext->coded_frame->key_frame )
{
packet.flags |= AV_PKT_FLAG_KEY;
}
packet.stream_index = pVideoStream->index;
packet.data = pVideoEncodeBuffer;
packet.size = out_size;
ret = av_interleaved_write_frame( pFormatContext, &packet );
av_free_packet(&packet);
av_freep(pCurrentPicture);
}
else
{
// image was buffered
}
}
if ( ret != 0 )
{
throw gcnew Exception( "Error while writing video frame." );
}
}
void VideoEncoder::printleak()
{
printf("No of leaks: %d",_CrtDumpMemoryLeaks());
printf("\n");
} -
Is it feasible to create FFmpegFrameGrabber one by one for single FFmpegFrameRecorder and maintain video stream keep alive ?
12 juillet 2023, par zhoutianThe reason why I ask these question is I got byte [] of container data(name is dhav) one by one and I need to push that data continuously to RTMP to play。


What's the current progress I made ?


For now ,I can push data to RTMP and play RTMP by VLC just for few seconds,then the RTMP stream is end .


because the grabber created by inputstream only contain a few of the data come from ByteBuffer ,when that inputstream is end, the RTMP is closed.


synchronized (buffer) {
 buffer.flip();
 byte[] bytes = new byte[buffer.remaining()];
 buffer.get(bytes);
 buffer.clear();
 isByteBufferFull[0] = false;
 try {
 grabAndPush(bytes, SRS_PUSH_ADDRESS);
 } catch (Exception e) {
 //throw new RuntimeException(e);
 }

 }



private static synchronized void grabAndPush(byte[] bytes, String pushAddress) throws Exception {
 avutil.av_log_set_level(avutil.AV_LOG_INFO);
 FFmpegLogCallback.set();

 FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new ByteArrayInputStream(bytes));
...
}



So can anyone tell me how to keep the RTMP aways alive by FFmpegFrameGrabber and FFmpegFrameRecorder when the source data come from one by one.
very appreciate 😃


this is my code :


import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVCodecParameters;
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avformat.AVStream;
import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.FFmpegLogCallback;
import org.bytedeco.javacv.Frame;
import org.jfjy.ch2ji.ecctv.dh.api.ApiService;
import org.jfjy.ch2ji.ecctv.dh.callback.RealPlayCallback;

import java.io.ByteArrayInputStream;
import java.nio.ByteBuffer;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

@Slf4j
public class GetBytes2PushRTMPNew2 {

 private static final String SRS_PUSH_ADDRESS = "rtmp://127.0.0.1:1935/live/livestream";

 static int BUFFER_CAPACITY = 1 * 1024 * 1024;

 public static void main(String[] args) throws Exception {
 FFmpegLogCallback.set();
 ApiService apiService = new ApiService();
 Long login = apiService.login("10.3.0.54", 8801, "admin", "xxxx");
 ByteBuffer buffer = ByteBuffer.allocate(BUFFER_CAPACITY);
 final boolean[] isByteBufferFull = {false};
 apiService.startRealPlay(new RealPlayCallback() {
 @Override
 public void apply(Long aLong, Integer integer, byte[] bytes) {
 try {
 //push data to bytebuffer
 synchronized (buffer) {
 if (buffer.remaining() > bytes.length) {
 buffer.put(bytes);
 } else {
 isByteBufferFull[0] = true;
 }
 }
 } catch (Exception e) {
 throw new RuntimeException(e);
 }
 }
 }, 0, 0);

 ExecutorService executorService = Executors.newFixedThreadPool(1);
 executorService.execute(new Runnable() {
 @Override
 public void run() {
 while (true) {
 //get data from bytebuffer when buffer is full
 synchronized (isByteBufferFull) {
 if (isByteBufferFull[0]) {
 synchronized (buffer) {
 buffer.flip();
 byte[] bytes = new byte[buffer.remaining()];
 buffer.get(bytes);
 buffer.clear();
 isByteBufferFull[0] = false;
 try {
 //using grabber and recorder to push RTMP
 grabAndPush(bytes, SRS_PUSH_ADDRESS);
 } catch (Exception e) {
 //throw new RuntimeException(e);
 }

 }
 }
 }
 try {
 Thread.sleep(500);
 } catch (InterruptedException e) {
 throw new RuntimeException(e);
 }
 }

 }
 });
 while (true) {

 }
 }

 private static synchronized void grabAndPush(byte[] bytes, String pushAddress) throws Exception {
 avutil.av_log_set_level(avutil.AV_LOG_INFO);
 FFmpegLogCallback.set();

 FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new ByteArrayInputStream(bytes));


 grabber.setFormat("dhav");
 grabber.start();

 AVFormatContext avFormatContext = grabber.getFormatContext();

 int streamNum = avFormatContext.nb_streams();

 if (streamNum < 1) {
 log.error("no media!");
 return;
 }

 int frameRate = (int) grabber.getVideoFrameRate();
 if (0 == frameRate) {
 frameRate = 15;
 }
 log.info("frameRate[{}],duration[{}]秒,nb_streams[{}]",
 frameRate,
 avFormatContext.duration() / 1000000,
 avFormatContext.nb_streams());

 for (int i = 0; i < streamNum; i++) {
 AVStream avStream = avFormatContext.streams(i);
 AVCodecParameters avCodecParameters = avStream.codecpar();
 log.info("stream index[{}],codec type[{}],codec ID[{}]", i, avCodecParameters.codec_type(), avCodecParameters.codec_id());
 }

 int frameWidth = grabber.getImageWidth();
 int frameHeight = grabber.getImageHeight();
 int audioChannels = grabber.getAudioChannels();

 log.info("frameWidth[{}],frameHeight[{}],audioChannels[{}]",
 frameWidth,
 frameHeight,
 audioChannels);

 FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(pushAddress,
 frameWidth,
 frameHeight,
 audioChannels);

 recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
 recorder.setInterleaved(true);

 recorder.setFormat("flv");

 recorder.setFrameRate(frameRate);

 recorder.setGopSize(frameRate);

 recorder.setAudioChannels(grabber.getAudioChannels());


 recorder.start();


 Frame frame;


 log.info("start push");

 int videoFrameNum = 0;
 int audioFrameNum = 0;
 int dataFrameNum = 0;

 int interVal = 1000 / frameRate;
 interVal /= 8;

 while (null != (frame = grabber.grab())) {

 if (null != frame.image) {
 videoFrameNum++;
 }

 if (null != frame.samples) {
 audioFrameNum++;
 }

 if (null != frame.data) {
 dataFrameNum++;
 }

 recorder.record(frame);

 Thread.sleep(interVal);
 }

 log.info("push complete,videoFrameNum[{}],audioFrameNum[{}],dataFrameNum[{}]",
 videoFrameNum,
 audioFrameNum,
 dataFrameNum);

 recorder.close();
 grabber.close();
 }


}