
Recherche avancée
Autres articles (61)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Supporting all media types
13 avril 2011, parUnlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)
Sur d’autres sites (7185)
-
I want to take any Audio from a file and encode it as PCM_ALAW. My Example is a .m4a file to .wav file
22 novembre 2023, par ClockmanI have been working on this for a while now while am generally new to ffmpeg library, I have studied it a bit. The challenge I have that at the point of witting to file I get the following exception.


"Exception thrown at 0x00007FFACA8305B3 (avformat-60.dll) in FfmpegPractice.exe : 0xC0000005 : Access violation writing location 0x0000000000000000.". I understand this means am writing to an uninitialized buffer am unable to discover why this is happening. The exception call stack shows the following


avformat-60.dll!avformat_write_header() C
avformat-60.dll!ff_write_chained() C
avformat-60.dll!ff_write_chained() C
avformat-60.dll!av_write_frame() C
FfmpegPractice.exe!main() Line 215 C++



Some things I have tried


This code is part of a larger project built with CMake but for some reason I could no step into ffmpeg library while debugging, So I recompiled ffmpeg ensured debugging was enabled so I could drill down to the root cause but I still could not step into the ffmpeg library.


I then created a minimal project using visual studio c++ console project and I still could not step into the code.


I have read through many ffmpeg docs and some I could find on the internet and I still could not solve it.


This is the code


#include <iostream>

extern "C" {
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libswresample></libswresample>swresample.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>audio_fifo.h>
}

using namespace std;

//in audio file
string filename{ "rapid_caller_test.m4a" };
AVFormatContext* pFormatCtx{};
AVCodecContext* pCodecCtx{};
AVStream* pStream{};

//out audio file
string outFilename{ "output.wav" };
AVFormatContext* pOutFormatCtx{ nullptr };
AVCodecContext* pOutCodecCtx{ nullptr };
AVIOContext* pOutIoContext{ nullptr };
const AVCodec* pOutCodec{ nullptr };
AVStream* pOutStream{ nullptr };
const int OUTPUT_CHANNELS = 1;
const int SAMPLE_RATE = 8000;
const int OUT_BIT_RATE = 64000;
uint8_t** convertedSamplesBuffer{ nullptr };
int64_t dstNmbrSamples{ 0 };
int dstLineSize{ 0 };
static int64_t pts{ 0 };

//conversion context;
SwrContext* swr{};

uint32_t i{ 0 };
int audiostream{ -1 };


void cleanUp() 
{
 avcodec_free_context(&pOutCodecCtx);;
 avio_closep(&(pOutFormatCtx)->pb);
 avformat_free_context(pOutFormatCtx);
 pOutFormatCtx = nullptr;
}

int main()
{

/*
* section to setup input file
*/
if (avformat_open_input(&pFormatCtx, filename.data(), nullptr, nullptr) != 0) {
 cout << "could not open file " << filename << endl;
 return -1;
}
if (avformat_find_stream_info(pFormatCtx, nullptr) < 0) {
 cout << "Could not retrieve stream information from file " << filename << endl;
 return -1;
}
av_dump_format(pFormatCtx, 0, filename.c_str(), 0);

for (i = 0; i < pFormatCtx->nb_streams; i++) {
 if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
 audiostream = i;
 break;
 }
}
if (audiostream == -1) {
 cout << "did not find audio stream" << endl;
 return -1;
}

pStream = pFormatCtx->streams[audiostream];
const AVCodec* pCodec{ avcodec_find_decoder(pStream->codecpar->codec_id) };
pCodecCtx = avcodec_alloc_context3(pCodec);
avcodec_parameters_to_context(pCodecCtx, pStream->codecpar);
if (avcodec_open2(pCodecCtx, pCodec, nullptr)) {
 cout << "could not open codec" << endl;
 return -1;
}

/*
* section to set up output file which is a G711 audio
*/
if (avio_open(&pOutIoContext, outFilename.data(), AVIO_FLAG_WRITE)) {
 cout << "could not open out put file" << endl;
 return -1;
}
if (!(pOutFormatCtx = avformat_alloc_context())) {
 cout << "could not create format conext" << endl;
 cleanUp();
 return -1;
}
pOutFormatCtx->pb = pOutIoContext;
if (!(pOutFormatCtx->oformat = av_guess_format(nullptr, outFilename.data(), nullptr))) {
 cout << "could not find output file format" << endl;
 cleanUp();
 return -1;
}
if (!(pOutFormatCtx->url = av_strdup(outFilename.data()))) {
 cout << "could not allocate file name" << endl;
 cleanUp();
 return -1;
}
if (!(pOutCodec = avcodec_find_encoder(AV_CODEC_ID_PCM_ALAW))) {
 cout << "codec not found" << endl;
 cleanUp();
 return -1;
}
if (!(pOutStream = avformat_new_stream(pOutFormatCtx, nullptr))) {
 cout << "could not create new stream" << endl;
 cleanUp();
 return -1;
}
if (!(pOutCodecCtx = avcodec_alloc_context3(pOutCodec))) {
 cout << "could not allocate codec context" << endl;
 return -1;
}
av_channel_layout_default(&pOutCodecCtx->ch_layout, OUTPUT_CHANNELS);
pOutCodecCtx->sample_rate = SAMPLE_RATE;
pOutCodecCtx->sample_fmt = pOutCodec->sample_fmts[0];
pOutCodecCtx->bit_rate = OUT_BIT_RATE;

//setting sample rate for the container
pOutStream->time_base.den = SAMPLE_RATE;
pOutStream->time_base.num = 1;
if (pOutFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
 pOutCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

if (avcodec_open2(pOutCodecCtx, pOutCodec, nullptr)) {
 cout << "could not open output codec" << endl;
 cleanUp();
 return -1;
}
if ((avcodec_parameters_from_context(pOutStream->codecpar, pOutCodecCtx)) < 0) {
 cout << "could not initialize stream parameters" << endl;
} 

AVPacket* packet = av_packet_alloc();

swr = swr_alloc();
swr_alloc_set_opts2(&swr, &pOutCodecCtx->ch_layout, pOutCodecCtx->sample_fmt, pOutCodecCtx->sample_rate,&pCodecCtx->ch_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0, nullptr);
swr_init(swr);

int ret{};
int bSize{};
while (av_read_frame(pFormatCtx, packet) >= 0) {
 AVFrame* pFrame = av_frame_alloc();
 AVFrame* pOutFrame = av_frame_alloc();
 if (packet->stream_index == audiostream) {
 ret = avcodec_send_packet(pCodecCtx, packet);
 while (ret >= 0) {
 ret = avcodec_receive_frame(pCodecCtx, pFrame);
 if (ret == AVERROR(EAGAIN))
 continue;
 else if (ret == AVERROR_EOF)
 break;
 dstNmbrSamples = av_rescale_rnd(swr_get_delay(swr, pCodecCtx->sample_rate) + pFrame->nb_samples, pOutCodecCtx->sample_rate, pCodecCtx->sample_rate, AV_ROUND_UP);
 if ((av_samples_alloc_array_and_samples(&convertedSamplesBuffer, &dstLineSize, pOutCodecCtx->ch_layout.nb_channels,dstNmbrSamples, pOutCodecCtx->sample_fmt, 0)) < 0) {
 cout << "coult not allocate samples array and buffer" << endl;
 }
 int channel_samples_count{ 0 };
 channel_samples_count = swr_convert(swr, convertedSamplesBuffer, dstNmbrSamples, (const uint8_t**)pFrame->data, pFrame->nb_samples);
 bSize = av_samples_get_buffer_size(&dstLineSize, pOutCodecCtx->ch_layout.nb_channels, channel_samples_count, pOutCodecCtx->sample_fmt, 0);
 cout << "no of samples is " << channel_samples_count << " the buffer size " << bSize << endl;
 pOutFrame->nb_samples = channel_samples_count;
 av_channel_layout_copy(&pOutFrame->ch_layout, &pOutCodecCtx->ch_layout);
 pOutFrame->format = pOutCodecCtx->sample_fmt;
 pOutFrame->sample_rate = pOutCodecCtx->sample_rate;
 if ((av_frame_get_buffer(pOutFrame, 0)) < 0) {
 cout << "could not allocate output frame samples " << endl;
 av_frame_free(&pOutFrame);
 }
 
 //populate out frame buffer
 av_frame_make_writable(pOutFrame);
 for (int i{ 0 }; i < bSize; i++) {
 pOutFrame->data[0][i] = convertedSamplesBuffer[0][i];
 cout << pOutFrame->data[0][i];
 }
 if (pOutFrame) {
 pOutFrame->pts = pts;
 pts += pOutFrame->nb_samples;
 }
 int res = avcodec_send_frame(pOutCodecCtx, pOutFrame);
 if (res < 0) {
 cout << "error sending frame to encoder" << endl;
 cleanUp();
 return -1;
 }
 //int er = avformat_write_header(pOutFormatCtx,nullptr);
 AVPacket* pOutPacket = av_packet_alloc();
 pOutPacket->time_base.num = 1;
 pOutPacket->time_base.den = 8000;
 if (pOutPacket == nullptr) {
 cout << "unable to allocate packet" << endl;
 }
 while (res >= 0) {
 res = avcodec_receive_packet(pOutCodecCtx, pOutPacket);
 if (res == AVERROR(EAGAIN))
 continue;
 else if (ret == AVERROR_EOF)
 break;
 av_packet_rescale_ts(pOutPacket, pOutCodecCtx->time_base, pOutFormatCtx->streams[0]->time_base);
 //av_dump_format(pOutFormatCtx, 0, outFilename.c_str(), 1);
 if (av_write_frame(pOutFormatCtx, pOutPacket) < 0) {
 cout << "could not write frame" << endl;
 }
 }
 }
}
 av_frame_free(&pFrame);
 av_frame_free(&pOutFrame);
}
if (av_write_trailer(pOutFormatCtx) < 0) {
 cout << "could not write file trailer" << endl;
}
swr_free(&swr);
avcodec_free_context(&pOutCodecCtx);
av_packet_free(&packet);
}
</iostream>


Error/Exception


The exception is thrown when I call


if (av_write_frame(pOutFormatCtx, pOutPacket) < 0) { cout << "could not write frame" << endl; }

I also called this line

//int er = avformat_write_header(pOutFormatCtx,nullptr);


to see if I will get an exception but it did not throw any exception.


I have spent weeks on this issue with no success.
My goal is to take any audio from a file an be able to resample it if need be, and transcode it to PCM_ALAW.
I will appreciate any help I can get.


-
How to Stream RTP (IP camera) Into React App setup
10 novembre 2024, par sharon2469I am trying to transfer a live broadcast from an IP camera or any other broadcast coming from an RTP/RTSP source to my REACT application. BUT MUST BE LIVE


My setup at the moment is :


IP Camera -> (RTP) -> FFmpeg -> (udp) -> Server(nodeJs) -> (WebRTC) -> React app


In the current situation, There is almost no delay, but there are some things here that I can't avoid and I can't understand why, and here is my question :


1) First, is the SETUP even correct and this is the only way to Stream RTP video in Web app ?


2) Is it possible to avoid re-encode the stream , RTP transmission necessarily comes in H.264, hence I don't really need to execute the following command :


return spawn('ffmpeg', [
 '-re', // Read input at its native frame rate Important for live-streaming
 '-probesize', '32', // Set probing size to 32 bytes (32 is minimum)
 '-analyzeduration', '1000000', // An input duration of 1 second
 '-c:v', 'h264', // Video codec of input video
 '-i', 'rtp://238.0.0.2:48888', // Input stream URL
 '-map', '0:v?', // Select video from input stream
 '-c:v', 'libx264', // Video codec of output stream
 '-preset', 'ultrafast', // Faster encoding for lower latency
 '-tune', 'zerolatency', // Optimize for zero latency
 // '-s', '768x480', // Adjust the resolution (experiment with values)
 '-f', 'rtp', `rtp://127.0.0.1:${udpPort}` // Output stream URL
]);



As you can se in this command I re-encode to libx264, But if I set FFMPEG a parameter '-c:v' :'copy' instead of '-c:v', 'libx264' then FFMPEG throw an error says : that it doesn't know how to encode h264 and only knows what is libx264-> Basically, I want to stop the re-encode because there is really no need for it, because the stream is already encoded to H264. Are there certain recommendations that can be made ?


3) I thought about giving up the FFMPEG completely, but the RTP packets arrive at a size of 1200+ BYTES when WEBRTC is limited to up to 1280 BYTE. Is there a way to manage these sabotages without damaging the video and is it to enter this world ? I guess there is the whole story with the JITTER BUFFER here


This is my server side code (THIS IS JUST A TEST CODE)


import {
 MediaStreamTrack,
 randomPort,
 RTCPeerConnection,
 RTCRtpCodecParameters,
 RtpPacket,
} from 'werift'
import {Server} from "ws";
import {createSocket} from "dgram";
import {spawn} from "child_process";
import LoggerFactory from "./logger/loggerFactory";

//

const log = LoggerFactory.getLogger('ServerMedia')

// Websocket server -> WebRTC
const serverPort = 8888
const server = new Server({port: serverPort});
log.info(`Server Media start om port: ${serverPort}`);

// UDP server -> ffmpeg
const udpPort = 48888
const udp = createSocket("udp4");
// udp.bind(udpPort, () => {
// udp.addMembership("238.0.0.2");
// })
udp.bind(udpPort)
log.info(`UDP port: ${udpPort}`)


const createFFmpegProcess = () => {
 log.info(`Start ffmpeg process`)
 return spawn('ffmpeg', [
 '-re', // Read input at its native frame rate Important for live-streaming
 '-probesize', '32', // Set probing size to 32 bytes (32 is minimum)
 '-analyzeduration', '1000000', // An input duration of 1 second
 '-c:v', 'h264', // Video codec of input video
 '-i', 'rtp://238.0.0.2:48888', // Input stream URL
 '-map', '0:v?', // Select video from input stream
 '-c:v', 'libx264', // Video codec of output stream
 '-preset', 'ultrafast', // Faster encoding for lower latency
 '-tune', 'zerolatency', // Optimize for zero latency
 // '-s', '768x480', // Adjust the resolution (experiment with values)
 '-f', 'rtp', `rtp://127.0.0.1:${udpPort}` // Output stream URL
 ]);

}

let ffmpegProcess = createFFmpegProcess();


const attachFFmpegListeners = () => {
 // Capture standard output and print it
 ffmpegProcess.stdout.on('data', (data) => {
 log.info(`FFMPEG process stdout: ${data}`);
 });

 // Capture standard error and print it
 ffmpegProcess.stderr.on('data', (data) => {
 console.error(`ffmpeg stderr: ${data}`);
 });

 // Listen for the exit event
 ffmpegProcess.on('exit', (code, signal) => {
 if (code !== null) {
 log.info(`ffmpeg process exited with code ${code}`);
 } else if (signal !== null) {
 log.info(`ffmpeg process killed with signal ${signal}`);
 }
 });
};


attachFFmpegListeners();


server.on("connection", async (socket) => {
 const payloadType = 96; // It is a numerical value that is assigned to each codec in the SDP offer/answer exchange -> for H264
 // Create a peer connection with the codec parameters set in advance.
 const pc = new RTCPeerConnection({
 codecs: {
 audio: [],
 video: [
 new RTCRtpCodecParameters({
 mimeType: "video/H264",
 clockRate: 90000, // 90000 is the default value for H264
 payloadType: payloadType,
 }),
 ],
 },
 });

 const track = new MediaStreamTrack({kind: "video"});


 udp.on("message", (data) => {
 console.log(data)
 const rtp = RtpPacket.deSerialize(data);
 rtp.header.payloadType = payloadType;
 track.writeRtp(rtp);
 });

 udp.on("error", (err) => {
 console.log(err)

 });

 udp.on("close", () => {
 console.log("close")
 });

 pc.addTransceiver(track, {direction: "sendonly"});

 await pc.setLocalDescription(await pc.createOffer());
 const sdp = JSON.stringify(pc.localDescription);
 socket.send(sdp);

 socket.on("message", (data: any) => {
 if (data.toString() === 'resetFFMPEG') {
 ffmpegProcess.kill('SIGINT');
 log.info(`FFMPEG process killed`)
 setTimeout(() => {
 ffmpegProcess = createFFmpegProcess();
 attachFFmpegListeners();
 }, 5000)
 } else {
 pc.setRemoteDescription(JSON.parse(data));
 }
 });
});



And this fronted :





 
 
 <code class="echappe-js"><script&#xA; crossorigin&#xA; src="https://unpkg.com/react@16/umd/react.development.js"&#xA; ></script>

<script&#xA; crossorigin&#xA; src="https://unpkg.com/react-dom@16/umd/react-dom.development.js"&#xA; ></script>

<script&#xA; crossorigin&#xA; src="https://cdnjs.cloudflare.com/ajax/libs/babel-core/5.8.34/browser.min.js"&#xA; ></script>

<script src="https://cdn.jsdelivr.net/npm/babel-regenerator-runtime@6.5.0/runtime.min.js"></script>








<script type="text/babel">&#xA; let rtc;&#xA;&#xA; const App = () => {&#xA; const [log, setLog] = React.useState([]);&#xA; const videoRef = React.useRef();&#xA; const socket = new WebSocket("ws://localhost:8888");&#xA; const [peer, setPeer] = React.useState(null); // Add state to keep track of the peer connection&#xA;&#xA; React.useEffect(() => {&#xA; (async () => {&#xA; await new Promise((r) => (socket.onopen = r));&#xA; console.log("open websocket");&#xA;&#xA; const handleOffer = async (offer) => {&#xA; console.log("new offer", offer.sdp);&#xA;&#xA; const updatedPeer = new RTCPeerConnection({&#xA; iceServers: [],&#xA; sdpSemantics: "unified-plan",&#xA; });&#xA;&#xA; updatedPeer.onicecandidate = ({ candidate }) => {&#xA; if (!candidate) {&#xA; const sdp = JSON.stringify(updatedPeer.localDescription);&#xA; console.log(sdp);&#xA; socket.send(sdp);&#xA; }&#xA; };&#xA;&#xA; updatedPeer.oniceconnectionstatechange = () => {&#xA; console.log(&#xA; "oniceconnectionstatechange",&#xA; updatedPeer.iceConnectionState&#xA; );&#xA; };&#xA;&#xA; updatedPeer.ontrack = (e) => {&#xA; console.log("ontrack", e);&#xA; videoRef.current.srcObject = e.streams[0];&#xA; };&#xA;&#xA; await updatedPeer.setRemoteDescription(offer);&#xA; const answer = await updatedPeer.createAnswer();&#xA; await updatedPeer.setLocalDescription(answer);&#xA;&#xA; setPeer(updatedPeer);&#xA; };&#xA;&#xA; socket.onmessage = (ev) => {&#xA; const data = JSON.parse(ev.data);&#xA; if (data.type === "offer") {&#xA; handleOffer(data);&#xA; } else if (data.type === "resetFFMPEG") {&#xA; // Handle the resetFFMPEG message&#xA; console.log("FFmpeg reset requested");&#xA; }&#xA; };&#xA; })();&#xA; }, []); // Added socket as a dependency to the useEffect hook&#xA;&#xA; const sendRequestToResetFFmpeg = () => {&#xA; socket.send("resetFFMPEG");&#xA; };&#xA;&#xA; return (&#xA; <div>&#xA; Video: &#xA; <video ref={videoRef} autoPlay muted />&#xA; <button onClick={() => sendRequestToResetFFmpeg()}>Reset FFMPEG</button>&#xA; </div>&#xA; );&#xA; };&#xA;&#xA; ReactDOM.render(<App />, document.getElementById("app1"));&#xA;</script>





-
Render YUV frame using OpenTK [closed]
20 mai 2024, par dima2012 terminatormy window
Im trying to render YUV AVFrame, that i get from camera using OpenTK, im creating a rectangle and trying to apply a texture to it, but it doesnt work.


Here is my window class


using OpenTK.Graphics.Egl;
using OpenTK.Graphics.OpenGL4;
using OpenTK.Windowing.Common;
using OpenTK.Windowing.Desktop;
using OpenTK.Windowing.GraphicsLibraryFramework;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;

namespace myFFmpeg
{
 public class CameraWindow : GameWindow
 {
 private int vertexBufferHandle;
 private int elementBufferHandle;
 private int vertexArrayHandle;
 private int frameNumber = 0;
 private int yTex, uTex, vTex;

 Shader shader;
 Texture texture;

 float[] vertices =
 {
 //Position | Texture coordinates
 0.5f, 0.5f, 0.0f, 1.0f, 0.0f, // top right
 0.5f, -0.5f, 0.0f, 1.0f, 1.0f, // bottom right
 -0.5f, -0.5f, 0.0f, 0.0f, 1.0f, // bottom left
 -0.5f, 0.5f, 0.0f, 0.0f, 0.0f // top left
 };


 private uint[] indices = 
 {
 0, 1, 3, // first triangle
 1, 2, 3 // second triangle
 };

 public CameraWindow(string title) : base(GameWindowSettings.Default, new NativeWindowSettings() { ClientSize = (1280, 720), Title = title }) { UpdateFrequency = 25; }

 protected override void OnUpdateFrame(FrameEventArgs e)
 {
 base.OnUpdateFrame(e);
 }

 protected override void OnLoad()
 {
 GL.ClearColor(0.5f, 0.3f, 0.3f, 1.0f);

 shader = new Shader(@"..\..\shader.vert", @"..\..\shader.frag");
 texture = new Texture();

 elementBufferHandle = GL.GenBuffer();
 GL.BindBuffer(BufferTarget.ElementArrayBuffer, elementBufferHandle);
 GL.BufferData(BufferTarget.ElementArrayBuffer, indices.Length * sizeof(uint), indices, BufferUsageHint.StaticDraw);

 vertexBufferHandle = GL.GenBuffer();
 GL.BindBuffer(BufferTarget.ArrayBuffer, vertexBufferHandle);
 GL.BufferData(BufferTarget.ArrayBuffer, vertices.Length * sizeof(float), vertices, BufferUsageHint.StaticDraw);

 GL.BindBuffer(BufferTarget.ArrayBuffer, 0);

 vertexArrayHandle = GL.GenVertexArray();
 GL.BindVertexArray(vertexArrayHandle);

 GL.BindBuffer(BufferTarget.ArrayBuffer, vertexBufferHandle);
 GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, false, 5 * sizeof(float), 0);
 GL.EnableVertexAttribArray(0);

 int vertexShader = GL.CreateShader(ShaderType.VertexShader);
 GL.ShaderSource(vertexShader, @"..\..\shader.vert");
 GL.CompileShader(vertexShader);

 int fragmentShader = GL.CreateShader(ShaderType.FragmentShader);
 GL.ShaderSource(fragmentShader, @"..\..\shader.frag");
 GL.CompileShader(fragmentShader);

 int shaderProgram = GL.CreateProgram();
 GL.AttachShader(shaderProgram, vertexShader);
 GL.AttachShader(shaderProgram, fragmentShader);
 GL.LinkProgram(shaderProgram);


 int vertexPosLocation = GL.GetAttribLocation(shaderProgram, "vertexPos");
 GL.EnableVertexAttribArray(vertexPosLocation);
 GL.VertexAttribPointer(vertexPosLocation, 2, VertexAttribPointerType.Float, false, 4 * sizeof(float), 0);

 int texCoordLocation = GL.GetAttribLocation(shaderProgram, "texCoord");
 GL.EnableVertexAttribArray(texCoordLocation);
 GL.VertexAttribPointer(texCoordLocation, 2, VertexAttribPointerType.Float, false, 4 * sizeof(float), 2 * sizeof(float));

 GL.UseProgram(shaderProgram);

 GL.ActiveTexture(TextureUnit.Texture0);
 GL.BindTexture(TextureTarget.Texture2D, yTex);
 GL.Uniform1(GL.GetUniformLocation(shaderProgram, "yTex"), 0);

 GL.ActiveTexture(TextureUnit.Texture1);
 GL.BindTexture(TextureTarget.Texture2D, uTex);
 GL.Uniform1(GL.GetUniformLocation(shaderProgram, "uTex"), 1);

 GL.ActiveTexture(TextureUnit.Texture2);
 GL.BindTexture(TextureTarget.Texture2D, vTex);
 GL.Uniform1(GL.GetUniformLocation(shaderProgram, "vTex"), 2);

 GL.BindVertexArray(0);
 //code

 base.OnLoad();
 }

 protected override void OnUnload()
 {
 GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
 GL.DeleteBuffer(vertexBufferHandle);
 GL.UseProgram(0);
 shader.Dispose();

 //code

 base.OnUnload();
 }

 protected override void OnRenderFrame(FrameEventArgs e)
 {

 GL.Clear(ClearBufferMask.ColorBufferBit);

 shader.Use();
 texture.Use(frameNumber++);

 GL.BindVertexArray(vertexArrayHandle);

 GL.DrawElements(PrimitiveType.Triangles, indices.Length, DrawElementsType.UnsignedInt, indices);

 Context.SwapBuffers();

 base.OnRenderFrame(e);
 }

 protected override void OnFramebufferResize(FramebufferResizeEventArgs e)
 {
 base.OnFramebufferResize(e);

 GL.Viewport(0, 0, e.Width, e.Height);
 }
 }
}



And my texture class :


using System;
using OpenTK;
using OpenTK.Graphics.OpenGL4;
using SkiaSharp;
using FFmpeg;
using SkiaSharp.Internals;
using StbImageSharp;
using FFmpeg.AutoGen;
using System.Threading;

namespace myFFmpeg
{
 public class Texture
 {
 int Handle, yTex, uTex, vTex;

 Program program = new Program();

 public Texture()
 {
 Handle = GL.GenTexture();
 }


 public unsafe void Use(int frameNumber)
 {
 GL.BindTexture(TextureTarget.Texture2D, Handle);

 // Generate textures only once (outside the loop)
 if (yTex == 0)
 {
 GL.GenTextures(1, out yTex);
 }
 if (uTex == 0)
 {
 GL.GenTextures(1, out uTex);
 }
 if (vTex == 0)
 {
 GL.GenTextures(1, out vTex);
 }

 // Bind textures to specific units before rendering each frame
 GL.ActiveTexture(TextureUnit.Texture0);
 GL.BindTexture(TextureTarget.Texture2D, yTex);
 GL.ActiveTexture(TextureUnit.Texture1);
 GL.BindTexture(TextureTarget.Texture2D, uTex);
 GL.ActiveTexture(TextureUnit.Texture2);

 // Update textures with new frame data from FFmpeg
 AVFrame frame = program.getFrame();
 int width = frame.width;
 int height = frame.height;

 Console.BackgroundColor = ConsoleColor.White;
 Console.ForegroundColor = ConsoleColor.Black;
 Console.WriteLine((AVPixelFormat)frame.format);
 Console.BackgroundColor = ConsoleColor.Black;


 // Assuming YUV data is stored in separate planes (Y, U, V)
 GL.BindTexture(TextureTarget.Texture2D, yTex);
 GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Luminance, width, height, 0, PixelFormat.Luminance, PixelType.UnsignedByte, (IntPtr)frame.data[0]);
 GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
 GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);

 GL.BindTexture(TextureTarget.Texture2D, uTex);
 GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Luminance, width / 2, height / 2, 0, PixelFormat.Luminance, PixelType.UnsignedByte, (IntPtr)frame.data[1]);
 GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
 GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);

 GL.BindTexture(TextureTarget.Texture2D, vTex);
 GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Luminance, width / 2, height / 2, 0, PixelFormat.Luminance, PixelType.UnsignedByte, (IntPtr)frame.data[2]);
 GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
 GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);

 }
 }
}




And my shader class :


using OpenTK.Graphics.OpenGL4;
using System;
using System.IO;

namespace myFFmpeg
{
 public class Shader : IDisposable
 {
 public int Handle { get; private set; }

 public Shader(string vertexPath, string fragmentPath)
 {
 string vertexShaderSource = File.ReadAllText(vertexPath);
 string fragmentShaderSource = File.ReadAllText(fragmentPath);

 int vertexShader = GL.CreateShader(ShaderType.VertexShader);
 GL.ShaderSource(vertexShader, vertexShaderSource);
 GL.CompileShader(vertexShader);
 CheckShaderCompilation(vertexShader);

 int fragmentShader = GL.CreateShader(ShaderType.FragmentShader);
 GL.ShaderSource(fragmentShader, fragmentShaderSource);
 GL.CompileShader(fragmentShader);
 CheckShaderCompilation(fragmentShader);

 Handle = GL.CreateProgram();
 GL.AttachShader(Handle, vertexShader);
 GL.AttachShader(Handle, fragmentShader);
 GL.LinkProgram(Handle);
 CheckProgramLinking(Handle);

 GL.DetachShader(Handle, vertexShader);
 GL.DetachShader(Handle, fragmentShader);
 GL.DeleteShader(vertexShader);
 GL.DeleteShader(fragmentShader);
 }

 public void Use()
 {
 GL.UseProgram(Handle);
 }

 public int GetAttribLocation(string attribName)
 {
 return GL.GetAttribLocation(Handle, attribName);
 }

 public int GetUniformLocation(string uniformName)
 {
 return GL.GetUniformLocation(Handle, uniformName);
 }

 private void CheckShaderCompilation(int shader)
 {
 GL.GetShader(shader, ShaderParameter.CompileStatus, out int success);
 if (success == 0)
 {
 string infoLog = GL.GetShaderInfoLog(shader);
 throw new InvalidOperationException($"Shader compilation failed: {infoLog}");
 }
 }

 private void CheckProgramLinking(int program)
 {
 GL.GetProgram(program, GetProgramParameterName.LinkStatus, out int success);
 if (success == 0)
 {
 string infoLog = GL.GetProgramInfoLog(program);
 throw new InvalidOperationException($"Program linking failed: {infoLog}");
 }
 }

 public void Dispose()
 {
 GL.DeleteProgram(Handle);
 }
 }
}



Vert shader


#version 330 core
layout(location = 0) in vec3 vertexPos;
layout(location = 1) in vec2 texCoord;

out vec2 TexCoord; 

void main()
{
 gl_Position = vec4(vertexPos,1.0);
 TexCoord = texCoord;
}



Frag shader


#version 330 core
in vec2 TexCoord;
out vec4 color;

uniform sampler2D yTex;
uniform sampler2D uTex;
uniform sampler2D vTex;

void main()
{
 float y = texture(yTex, TexCoord).r;
 float u = texture(uTex, TexCoord).r - 0.5;
 float v = texture(vTex, TexCoord).r - 0.5;

 // YUV to RGB conversion (BT.709)
 float r = y + 1.5714 * v;
 float g = y - 0.6486 * u - 0.3918 * v;
 float b = y + 1.8556 * u;

 color = vec4(r, g, b, 1.0);
}



I can provide more code, if needed..


I tried changing shaders, changing textures, getting frame using
ffmpeg.av_hwframe_transfer_data(_receivedFrame, _pFrame, 0);