
Recherche avancée
Médias (1)
-
Carte de Schillerkiez
13 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
Autres articles (68)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (12501)
-
How to fill an AVFrame structure in order to encode an YUY2 video (or UYVY) into H265
22 avril, par Rich DengI want to compress a video stream in YUY2 or UYVY format to, say H265. If I understand the answers given this thread correctly, I should be able use the function
av_image_fill_arrays()
to fill the data and linesize arrays of anAVFrame
object, callavcodec_send_frame()
, and thenavcodec_receive_packet()
to get encoded data :

bool VideoEncoder::Init(const AM_MEDIA_TYPE* pMediaType)
{
 // we should have a valid pointer
 if (pMediaType)
 {
 m_mtInput.Empty();
 m_mtInput.Set(*pMediaType);
 }
 else
 return false;

 // find encoder
 m_pCodec = m_spAVCodecDlls->avcodec_find_encoder(AV_CODEC_ID_HEVC);
 m_pCodecCtx = m_spAVCodecDlls->avcodec_alloc_context3(m_pCodec);
 if (!m_pCodec || !m_pCodecCtx)
 {
 Log.Log(_T("Failed to find or allocate codec context!"));
 return false;
 }

 AVPixelFormat ePixFmtInput = GetInputPixelFormat();
 if (CanConvertInputFormat(ePixFmtInput) == false)
 {
 return false;
 }

 // we are able to convert
 // so continue with setting it up
 int nWidth = m_mtInput.GetWidth();
 int nHeight = m_mtInput.GetHeight();

 // Set encoding parameters

 // Set bitrate (4 Mbps for 1920x1080)
 m_pCodecCtx->bit_rate = (((int64)4000000 * nWidth / 1920) * nHeight / 1080); 

 m_pCodecCtx->width = nWidth; 
 m_pCodecCtx->height = nHeight;


 // use reference time as time_base
 m_pCodecCtx->time_base.den = 10000000; 
 m_pCodecCtx->time_base.num = 1;

 SetAVRational(m_pCodecCtx->framerate, m_mtInput.GetFrameRate());
 //m_pCodecCtx->framerate = (AVRational){ 30, 1 };
 m_pCodecCtx->gop_size = 10; // GOP size
 m_pCodecCtx->max_b_frames = 1;

 // set pixel format
 m_pCodecCtx->pix_fmt = ePixFmtInput; // YUV 4:2:0 format or YUV 4:2:2

 // Open the codec
 if (m_spAVCodecDlls->avcodec_open2(m_pCodecCtx, m_pCodec, NULL) < 0)
 {
 return false;
 }

 return true;
}

bool VideoEncoder::AllocateFrame()
{

 m_pFrame = m_spAVCodecDlls->av_frame_alloc();
 if (m_pFrame == NULL)
 {
 Log.Log(_T("Failed to allocate frame object!"));
 return false;
 }

 m_pFrame->format = m_pCodecCtx->pix_fmt;
 m_pFrame->width = m_pCodecCtx->width;
 m_pFrame->height = m_pCodecCtx->height;

 m_pFrame->time_base.den = m_pCodecCtx->time_base.den;
 m_pFrame->time_base.num = m_pCodecCtx->time_base.num;


 return true;
}

bool VideoEncoder::Encode(IMediaSample* pSample)
{
 if (m_pFrame == NULL)
 {
 return false;
 }

 // get the time stamps
 REFERENCE_TIME rtStart, rtEnd;
 HRESULT hr = pSample->GetTime(&rtStart, &rtEnd);
 m_rtInputFrameStart = rtStart;
 m_rtInputFrameEnd = rtEnd;


 // get length
 int nLength = pSample->GetActualDataLength();

 // get pointer to actual sample data
 uint8_t* pData = NULL;
 hr = pSample->GetPointer(&pData);

 if (FAILED(hr) || NULL == pData)
 return false;

 m_pFrame->flags = (S_OK == pSample->IsSyncPoint()) ? (m_pFrame->flags | AV_FRAME_FLAG_KEY) : (m_pFrame->flags & ~AV_FRAME_FLAG_KEY);

 // clear old data
 for (int n = 0; n < AV_NUM_DATA_POINTERS; n++)
 {
 m_pFrame->data[n] = NULL;// (uint8_t*)aryData[n];
 m_pFrame->linesize[n] = 0;// = aryStride[n];
 }


 int nRet = 0;
 int nStride = m_mtInput.GetStride();
 nRet = m_spAVCodecDlls->av_image_fill_arrays(m_pFrame->data, m_pFrame->linesize, pData, ePixFmt, m_pFrame->width, m_pFrame->height, 32);
 if (nRet < 0)
 {
 return false;
 }

 m_pFrame->pts = (int64_t) rtStart;
 m_pFrame->duration = rtEnd - rtStart;
 nRet = m_spAVCodecDlls->avcodec_send_frame(m_pCodecCtx, m_pFrame);
 if (nRet == AVERROR(EAGAIN))
 {
 ReceivePacket();
 nRet = m_spAVCodecDlls->avcodec_send_frame(m_pCodecCtx, m_pFrame);
 }

 if (nRet < 0)
 {
 return false;
 }

 // Receive the encoded packets
 ReceivePacket();

 return true;
}

bool VideoEncoder::ReceivePacket()
{
 bool bRet = true;
 AVPacket* pkt = m_spAVCodecDlls->av_packet_alloc();
 while (m_spAVCodecDlls->avcodec_receive_packet(m_pCodecCtx, pkt) == 0)
 {
 // Write pkt->data to output file or stream
 m_pCallback->VideoEncoderWriteEncodedSample(pkt);
 if (m_OutFile.IsOpen())
 m_OutFile.Write(pkt->data, pkt->size);
 m_spAVCodecDlls->av_packet_unref(pkt);
 }
 m_spAVCodecDlls->av_packet_free(&pkt);

 return bRet;
}



I must have done something wrong. The result is not correct. For example, rather than a video with a person's face showing in the middle of the screen, I get a mostly green screen with parts of the face showing up at the lower left and lower right corners.


Can someone help me ?


-
forgejo/autolabeler : clean up logic a bit and prevent self-looping
8 août, par Timo Rothenpieler -
Bash script having custom functions not running under systemd service
13 août, par nightcrawlerI have this script to get images from a webcam & process them via RKNN NPU


#!/bin/bash

# Define the temporary directory for images
TEMP_DIR="/media/32GB/pics"
# Define the resize/letterbox option
RESIZE_OPTION="letterbox" # or "letterbox" depending on your requirement
# Define the output image path pattern
OUTPUT_IMAGE_PATH="/media/32GB/processed_pics/%Y-%m-%d_%H-%M-%S_processed.jpg"
# Define the path to the rknn_yolov5_demo_Linux binar
BINARY_PATH="$HOME/ezrknn-toolkit2/rknpu2/examples/rknn_yolov5_demo/install/rknn_yolov5_demo_Linux"
# Define ntfy variables
NTFY_URL="https://ntfy.org/ho"
NTFY_USER="xxx"
NTFY_PASS="xxxx"

# Empty existing content
rm "$TEMP_DIR"/*
# Function to run ffmpeg and write images to temporary files
run_ffmpeg() {
 v380 -u xxxx -p xxxx -addr 192.168.1.xxx | ffmpeg -i - -f image2 -vf fps=3 -strftime 1 "$TEMP_DIR/%Y-%m-%d_%H-%M-%S_cap.jpg" -y
}

# Function to run rknn_yolov5_demo_Linux and process images from temporary files
run_rknn_yolov5_demo() {
 while true; do
 # Find the most recent image file in the temporary directory
 IMAGE_PATH=$(ls -t "$TEMP_DIR"/*.jpg | head -n 1)

 # Check if the image path is not empty
 if [ -n "$IMAGE_PATH" ]; then
 # Define the output image path
 OUTPUT_IMAGE=$(date +"$OUTPUT_IMAGE_PATH")

 # Change to the binary directory and set LD_LIBRARY_PATH
 DETECTION_OUTPUT=$(cd "$BINARY_PATH" && LD_LIBRARY_PATH=./lib ./rknn_yolov5_demo ./model/RK3566_RK3568/yolov5s-640-640.rknn "$IMAGE_PATH" "$RESIZE_OPTION" "$OUTPUT_IMAGE")

 # Check if the detection output contains the word "person"
 if echo "$DETECTION_OUTPUT" | grep -q "person"; then
 echo "Human detected. Saving processed image to $OUTPUT_IMAGE"
 rm "$IMAGE_PATH"
 # Upload the image using the imgur binary and capture the link
 UPLOAD_OUTPUT=$(imgur "$OUTPUT_IMAGE")
 UPLOAD_LINK=$(echo "$UPLOAD_OUTPUT" | grep -m 1 '^http')

 if [ -n "$UPLOAD_LINK" ]; then
 echo "Image uploaded successfully. Link: $UPLOAD_LINK"
 # Send ntfy notification with the image link
 curl -u $NTFY_USER:$NTFY_PASS -H "tags:rotating_light" -H "Attach:$UPLOAD_LINK" -d "Human detected" $NTFY_URL
 else
 echo "Failed to upload image."
 fi
 else
 rm "$OUTPUT_IMAGE"
 rm "$IMAGE_PATH"
 fi
 fi

 # Sleep for a short period to avoid high CPU usage
 sleep 1
 done
}


# Run ffmpeg and rknn_yolov5_demo_Linux in the background
run_ffmpeg &
run_rknn_yolov5_demo &



& the corresponding .service file


[Unit]
Description=Process Images with rknn_yolov5_demo
After=network.target
#StartLimitIntervalSec=60
#StartLimitBurst=5

[Service]
Type=simple
ExecStartPre=/bin/sleep 30
ExecStart=/home/xxx/process_images_rknn.sh
Restart=always
RestartSec=3
TimeoutStartSec=60

[Install]
WantedBy=default.target



Now last x2 lines of script are creating problems.


Case1] If I keep like this
htop
shows no initiation offfmpeg
norrknn
Binaries

Case2] I removed
&
from both lines then onlyffmpeg
runs butrknn
is nowhere inhtop


Case3] Only this case works


run_ffmpeg &
run_rknn_yolov5_demo



I am reloading systemctl daemon & restarting service after script modification in each case