Recherche avancée

Médias (1)

Mot : - Tags -/intégration

Autres articles (69)

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

  • Creating farms of unique websites

    13 avril 2011, par

    MediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
    This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

Sur d’autres sites (11481)

  • Decoding and resampling audio with FFmpeg for output with libao

    13 mai 2020, par DoctorSelar

    I'm trying to write a program to read and play an audio file using FFmpeg and libao. I've been following the procedure outlined in the FFmpeg documentation for decoding audio using the new avcodec_send_packet and avcodec_receive_frame functions, but the examples I've been able to find are few and far between (the ones in the FFmpeg documentation either don't use libavformat or use the deprecated avcodec_decode_audio4). I've based a lot of my program off of the transcode_aac.c example (up to init_resampler) in the FFmpeg documentation, but that also uses the deprecated decoding function.

    



    I believe I have the decoding part of the program working, but I need to resample the audio in order to convert it into an interleaved format to send to libao, for which I'm attempting to use libswresample. Whenever the program is run in its current state, it outputs (many times) "Error resampling : Output changed". The test file I've been using is just a YouTube rip that I had on hand. ffprobe reports the only stream as :

    



    Stream #0:0(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 125 kb/s (default)


    



    This is my first program with FFmpeg (and I'm still relatively new to C), so any advice on how to improve/fix other parts of the program would be welcome.

    



    #include&#xA;#include<libavcodec></libavcodec>avcodec.h>&#xA;#include<libavformat></libavformat>avformat.h>&#xA;#include<libavutil></libavutil>avutil.h>&#xA;#include<libswresample></libswresample>swresample.h>&#xA;#include<ao></ao>ao.h>&#xA;&#xA;#define OUTPUT_CHANNELS 2&#xA;#define OUTPUT_RATE 44100&#xA;#define BUFFER_SIZE 192000&#xA;#define OUTPUT_BITS 16&#xA;#define OUTPUT_FMT AV_SAMPLE_FMT_S16&#xA;&#xA;static char *errtext (int err) {&#xA;    static char errbuff[256];&#xA;    av_strerror(err,errbuff,sizeof(errbuff));&#xA;    return errbuff;&#xA;}&#xA;&#xA;static int open_audio_file (const char *filename, AVFormatContext **context, AVCodecContext **codec_context) {&#xA;    AVCodecContext *avctx;&#xA;    AVCodec *codec;&#xA;    int ret;&#xA;    int stream_id;&#xA;    int i;&#xA;&#xA;    // Open input file&#xA;    if ((ret = avformat_open_input(context,filename,NULL,NULL)) &lt; 0) {&#xA;        fprintf(stderr,"Error opening input file &#x27;%s&#x27;: %s\n",filename,errtext(ret));&#xA;        *context = NULL;&#xA;        return ret;&#xA;    }&#xA;&#xA;    // Get stream info&#xA;    if ((ret = avformat_find_stream_info(*context,NULL)) &lt; 0) {&#xA;        fprintf(stderr,"Unable to find stream info: %s\n",errtext(ret));&#xA;        avformat_close_input(context);&#xA;        return ret;&#xA;    }&#xA;&#xA;    // Find the best stream&#xA;    if ((stream_id = av_find_best_stream(*context,AVMEDIA_TYPE_AUDIO,-1,-1,&amp;codec,0)) &lt; 0) {&#xA;        fprintf(stderr,"Unable to find valid audio stream: %s\n",errtext(stream_id));&#xA;        avformat_close_input(context);&#xA;        return stream_id;&#xA;    }&#xA;&#xA;    // Allocate a decoding context&#xA;    if (!(avctx = avcodec_alloc_context3(codec))) {&#xA;        fprintf(stderr,"Unable to allocate decoder context\n");&#xA;        avformat_close_input(context);&#xA;        return AVERROR(ENOMEM);&#xA;    }&#xA;&#xA;    // Initialize stream parameters&#xA;    if ((ret = avcodec_parameters_to_context(avctx,(*context)->streams[stream_id]->codecpar)) &lt; 0) {&#xA;        fprintf(stderr,"Unable to get stream parameters: %s\n",errtext(ret));&#xA;        avformat_close_input(context);&#xA;        avcodec_free_context(&amp;avctx);&#xA;        return ret;&#xA;    }&#xA;&#xA;    // Open the decoder&#xA;    if ((ret = avcodec_open2(avctx,codec,NULL)) &lt; 0) {&#xA;        fprintf(stderr,"Could not open codec: %s\n",errtext(ret));&#xA;        avformat_close_input(context);&#xA;        avcodec_free_context(&amp;avctx);&#xA;        return ret;&#xA;    }&#xA;&#xA;    *codec_context = avctx;&#xA;    return 0;&#xA;}&#xA;&#xA;static void init_packet (AVPacket *packet) {&#xA;    av_init_packet(packet);&#xA;    packet->data = NULL;&#xA;    packet->size = 0;&#xA;}&#xA;&#xA;static int init_resampler (AVCodecContext *codec_context, SwrContext **resample_context) {&#xA;    int ret;&#xA;&#xA;    // Set resampler options&#xA;    *resample_context = swr_alloc_set_opts(NULL,&#xA;                                           av_get_default_channel_layout(OUTPUT_CHANNELS),&#xA;                                           OUTPUT_FMT,&#xA;                                           codec_context->sample_rate,&#xA;                                           av_get_default_channel_layout(codec_context->channels),&#xA;                                           codec_context->sample_fmt,&#xA;                                           codec_context->sample_rate,&#xA;                                           0,NULL);&#xA;    if (!(*resample_context)) {&#xA;        fprintf(stderr,"Unable to allocate resampler context\n");&#xA;        return AVERROR(ENOMEM);&#xA;    }&#xA;&#xA;    // Open the resampler&#xA;    if ((ret = swr_init(*resample_context)) &lt; 0) {&#xA;        fprintf(stderr,"Unable to open resampler context: %s\n",errtext(ret));&#xA;        swr_free(resample_context);&#xA;        return ret;&#xA;    }&#xA;&#xA;    return 0;&#xA;}&#xA;&#xA;static int init_frame (AVFrame **frame) {&#xA;    if (!(*frame = av_frame_alloc())) {&#xA;        fprintf(stderr,"Could not allocate frame\n");&#xA;        return AVERROR(ENOMEM);&#xA;    }&#xA;    return 0;&#xA;}&#xA;&#xA;int main (int argc, char *argv[]) {&#xA;    AVFormatContext *context = 0;&#xA;    AVCodecContext *codec_context;&#xA;    SwrContext *resample_context = NULL;&#xA;    AVPacket packet;&#xA;    AVFrame *frame = 0;&#xA;    AVFrame *resampled = 0;&#xA;    int16_t *buffer;&#xA;    int ret, packet_ret, finished;&#xA;&#xA;    ao_device *device;&#xA;    ao_sample_format format;&#xA;    int default_driver;&#xA;&#xA;    if (argc != 2) {&#xA;        fprintf(stderr,"Usage: %s <filename>\n",argv[0]);&#xA;        return 1;&#xA;    }&#xA;&#xA;    av_register_all();&#xA;    printf("Opening file...\n");&#xA;    if (open_audio_file(argv[1],&amp;context,&amp;codec_context) &lt; 0)&#xA;        return 1;&#xA;&#xA;    printf("Initializing resampler...\n");&#xA;    if (init_resampler(codec_context,&amp;resample_context) &lt; 0) {&#xA;        avformat_close_input(&amp;context);&#xA;        avcodec_free_context(&amp;codec_context);&#xA;        return 1;&#xA;    }&#xA;&#xA;    // Setup libao&#xA;    printf("Starting audio device...\n");&#xA;    ao_initialize();&#xA;    default_driver = ao_default_driver_id();&#xA;    format.bits = OUTPUT_BITS;&#xA;    format.channels = OUTPUT_CHANNELS;&#xA;    format.rate = codec_context->sample_rate;&#xA;    format.byte_format = AO_FMT_NATIVE;&#xA;    format.matrix = 0;&#xA;    if ((device = ao_open_live(default_driver,&amp;format,NULL)) == NULL) {&#xA;        fprintf(stderr,"Error opening audio device\n");&#xA;        avformat_close_input(&amp;context);&#xA;        avcodec_free_context(&amp;codec_context);&#xA;        swr_free(&amp;resample_context);&#xA;        return 1;&#xA;    }&#xA;&#xA;    // Mainloop&#xA;    printf("Beginning mainloop...\n");&#xA;    init_packet(&amp;packet);&#xA;    // Read packets until done&#xA;    while (1) {&#xA;        packet_ret = av_read_frame(context,&amp;packet);&#xA;        // Send a packet&#xA;        if ((ret = avcodec_send_packet(codec_context,&amp;packet)) &lt; 0)&#xA;            fprintf(stderr,"Error sending packet to decoder: %s\n",errtext(ret));&#xA;&#xA;        av_packet_unref(&amp;packet);&#xA;&#xA;        while (1) {&#xA;            if (!frame)&#xA;                frame = av_frame_alloc();&#xA;&#xA;            ret = avcodec_receive_frame(codec_context,frame);&#xA;            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) // Need more input&#xA;                break;&#xA;            else if (ret &lt; 0) {&#xA;                fprintf(stderr,"Error receiving frame: %s\n",errtext(ret));&#xA;                break;&#xA;            }&#xA;            // We have a valid frame, need to resample it&#xA;            if (!resampled)&#xA;                resampled = av_frame_alloc();&#xA;&#xA;            resampled->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);&#xA;            resampled->sample_rate = codec_context->sample_rate;&#xA;            resampled->format = OUTPUT_FMT;&#xA;&#xA;            if ((ret = swr_convert_frame(resample_context,resampled,frame)) &lt; 0) {&#xA;                fprintf(stderr,"Error resampling: %s\n",errtext(ret));&#xA;            } else {&#xA;                ao_play(device,(char*)resampled->extended_data[0],resampled->linesize[0]);&#xA;            }&#xA;            av_frame_unref(resampled);&#xA;            av_frame_unref(frame);&#xA;        }&#xA;&#xA;        if (packet_ret == AVERROR_EOF)&#xA;            break;&#xA;    }&#xA;&#xA;    printf("Closing file and freeing contexts...\n");&#xA;    avformat_close_input(&amp;context);&#xA;    avcodec_free_context(&amp;codec_context);&#xA;    swr_free(&amp;resample_context);&#xA;&#xA;    printf("Closing audio device...\n");&#xA;    ao_close(device);&#xA;    ao_shutdown();&#xA;&#xA;    return 0;&#xA;}&#xA;</filename>

    &#xA;&#xA;

    UPDATE : I've got it playing sound now, but it sounds like samples are missing (and MP3 files warn that "Could not update timestamps for skipped samples"). The issue was that the resampled frame needed to have certain attributes set before being passed to swr_convert_frame. I've also added av_packet_unref and av_frame_unref, but I'm still unsure as to where to best locate them.

    &#xA;

  • What's the most desireable way to capture system display and audio in the form of individual encoded audio and video packets in go (language) ? [closed]

    11 janvier 2023, par Tiger Yang

    Question (read the context below first) :

    &#xA;

    For those of you familiar with the capabilities of go, Is there a better way to go about all this ? Since ffmpeg is so ubiquitous, I'm sure it's been optomized to perfection, but what's the best way to capture system display and audio in the form of individual encoded audio and video packets in go (language), so that they can be then sent via webtransport-go ? I wish for it to prioritize efficiency and low latency, and ideally capture and encode the framebuffer directly like ffmpeg does.

    &#xA;

    Thanks ! I have many other questions about this, but I think it's best to ask as I go.

    &#xA;

    Context and what I've done so far :

    &#xA;

    I'm writing a remote desktop software for my personal use because of grievances with current solutions out there. At the moment, it consists of a web app that uses the webtransport API to send input datagrams and receive AV packets on two dedicated unidirectional streams, and the webcodecs API to decode these packets. On the serverside, I originally planned to use python with the aioquic library as a webtransport server. Upon connection and authentication, the server would start ffmpeg as a subprocess with this command :

    &#xA;

    ffmpeg -init_hw_device d3d11va -filter_complex ddagrab=video_size=1920x1080:framerate=60 -vcodec hevc_nvenc -tune ll -preset p7 -spatial_aq 1 -temporal_aq 1 -forced-idr 1 -rc cbr -b:v 400K -no-scenecut 1 -g 216000 -f hevc -

    &#xA;

    What I really appreciate about this is that it uses windows' desktop duplication API to copy the framebuffer of my GPU and hand that directly to the on-die hardware encoder with zero round trips to the CPU. I think it's about as efficient and elegant a solution as I can manage. It then outputs the encoded stream to the stdout, which python can read and send to the client.

    &#xA;

    As for the audio, there is another ffmpeg instance :

    &#xA;

    ffmpeg -f dshow -channels 2 -sample_rate 48000 -sample_size 16 -audio_buffer_size 15 -i audio="RD Audio (High Definition Audio Device)" -acodec libopus -vbr on -application audio -mapping_family 0 -apply_phase_inv true -b:a 25K -fec false -packet_loss 0 -map 0 -f data -

    &#xA;

    which listens to a physical loopback interface, which is literally just a short wire bridging the front panel headphone and microphone jacks (I'm aware of the quality loss of converting to analog and back, but the audio is then crushed down to 25kbps so it's fine) ()

    &#xA;

    Unfortunately, aioquic was not easy to work with IMO, and I found webtransport-go https://github.com/adriancable/webtransport-go, which was a hell of a lot better in both simplicity and documentation. However, now I'm dealing with a whole new language, and I wanna ask : (above)

    &#xA;

    EDIT : Here's the code for my server so far :

    &#xA;

    &#xD;&#xA;
    &#xD;&#xA;
    package main&#xA;&#xA;import (&#xA;    "bytes"&#xA;    "context"&#xA;    "fmt"&#xA;    "log"&#xA;    "net/http"&#xA;    "os/exec"&#xA;    "time"&#xA;&#xA;    "github.com/adriancable/webtransport-go"&#xA;)&#xA;&#xA;func warn(str string) {&#xA;    fmt.Printf("\n===== WARNING ===================================================================================================\n   %s\n=================================================================================================================\n", str)&#xA;}&#xA;&#xA;func main() {&#xA;&#xA;    password := []byte("abc")&#xA;&#xA;    videoString := []string{&#xA;        "ffmpeg",&#xA;        "-init_hw_device", "d3d11va",&#xA;        "-filter_complex", "ddagrab=video_size=1920x1080:framerate=60",&#xA;        "-vcodec", "hevc_nvenc",&#xA;        "-tune", "ll",&#xA;        "-preset", "p7",&#xA;        "-spatial_aq", "1",&#xA;        "-temporal_aq", "1",&#xA;        "-forced-idr", "1",&#xA;        "-rc", "cbr",&#xA;        "-b:v", "500K",&#xA;        "-no-scenecut", "1",&#xA;        "-g", "216000",&#xA;        "-f", "hevc", "-",&#xA;    }&#xA;&#xA;    audioString := []string{&#xA;        "ffmpeg",&#xA;        "-f", "dshow",&#xA;        "-channels", "2",&#xA;        "-sample_rate", "48000",&#xA;        "-sample_size", "16",&#xA;        "-audio_buffer_size", "15",&#xA;        "-i", "audio=RD Audio (High Definition Audio Device)",&#xA;        "-acodec", "libopus",&#xA;        "-mapping_family", "0",&#xA;        "-b:a", "25K",&#xA;        "-map", "0",&#xA;        "-f", "data", "-",&#xA;    }&#xA;&#xA;    connected := false&#xA;&#xA;    http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {&#xA;        session := request.Body.(*webtransport.Session)&#xA;&#xA;        session.AcceptSession()&#xA;        fmt.Println("\nAccepted incoming WebTransport connection.")&#xA;        fmt.Println("Awaiting authentication...")&#xA;&#xA;        authData, err := session.ReceiveMessage(session.Context()) // Waits here till first datagram&#xA;        if err != nil {                                            // if client closes connection before sending anything&#xA;            fmt.Println("\nConnection closed:", err)&#xA;            return&#xA;        }&#xA;&#xA;        if len(authData) >= 2 &amp;&amp; bytes.Equal(authData[2:], password) {&#xA;            if connected {&#xA;                session.CloseSession()&#xA;                warn("Client has authenticated, but a session is already taking place! Connection closed.")&#xA;                return&#xA;            } else {&#xA;                connected = true&#xA;                fmt.Println("Client has authenticated!\n")&#xA;            }&#xA;        } else {&#xA;            session.CloseSession()&#xA;            warn("Client has failed authentication! Connection closed. (" &#x2B; string(authData[2:]) &#x2B; ")")&#xA;            return&#xA;        }&#xA;&#xA;        videoStream, _ := session.OpenUniStreamSync(session.Context())&#xA;&#xA;        videoCmd := exec.Command(videoString[0], videoString[1:]...)&#xA;        go func() {&#xA;            videoOut, _ := videoCmd.StdoutPipe()&#xA;            videoCmd.Start()&#xA;&#xA;            buffer := make([]byte, 15000)&#xA;            for {&#xA;                len, err := videoOut.Read(buffer)&#xA;                if err != nil {&#xA;                    break&#xA;                }&#xA;                if len > 0 {&#xA;                    videoStream.Write(buffer[:len])&#xA;                }&#xA;            }&#xA;        }()&#xA;&#xA;        time.Sleep(50 * time.Millisecond)&#xA;&#xA;        audioStream, err := session.OpenUniStreamSync(session.Context())&#xA;&#xA;        audioCmd := exec.Command(audioString[0], audioString[1:]...)&#xA;        go func() {&#xA;            audioOut, _ := audioCmd.StdoutPipe()&#xA;            audioCmd.Start()&#xA;&#xA;            buffer := make([]byte, 15000)&#xA;            for {&#xA;                len, err := audioOut.Read(buffer)&#xA;                if err != nil {&#xA;                    break&#xA;                }&#xA;                if len > 0 {&#xA;                    audioStream.Write(buffer[:len])&#xA;                }&#xA;            }&#xA;        }()&#xA;&#xA;        for {&#xA;            data, err := session.ReceiveMessage(session.Context())&#xA;            if err != nil {&#xA;                videoCmd.Process.Kill()&#xA;                audioCmd.Process.Kill()&#xA;&#xA;                connected = false&#xA;&#xA;                fmt.Println("\nConnection closed:", err)&#xA;                break&#xA;            }&#xA;&#xA;            if len(data) == 0 {&#xA;&#xA;            } else if data[0] == byte(0) {&#xA;                fmt.Printf("Received mouse datagram: %s\n", data)&#xA;            }&#xA;        }&#xA;&#xA;    })&#xA;&#xA;    server := &amp;webtransport.Server{&#xA;        ListenAddr: ":1024",&#xA;        TLSCert:    webtransport.CertFile{Path: "SSL/fullchain.pem"},&#xA;        TLSKey:     webtransport.CertFile{Path: "SSL/privkey.pem"},&#xA;        QuicConfig: &amp;webtransport.QuicConfig{&#xA;            KeepAlive:      false,&#xA;            MaxIdleTimeout: 3 * time.Second,&#xA;        },&#xA;    }&#xA;&#xA;    fmt.Println("Launching WebTransport server at", server.ListenAddr)&#xA;    ctx, cancel := context.WithCancel(context.Background())&#xA;    if err := server.Run(ctx); err != nil {&#xA;        log.Fatal(err)&#xA;        cancel()&#xA;    }&#xA;&#xA;}

    &#xD;&#xA;

    &#xD;&#xA;

    &#xD;&#xA;&#xA;

  • Motion : Raspian webcam Unable to find a compatible palette format

    25 juin 2018, par mrtumnus

    I am trying to get a Creative Live ! Motion webcam working on a Raspberry Pi 3 B+. fswebcam and motion both give an error, "Unable to find a compatible palette format". ffmpeg, on the other hand, is able to easily capture video from the webcam.

    Device info :

    $ lsusb
    Bus 001 Device 004: ID 041e:4041 Creative Technology, Ltd Webcam Live! Motion`

    $ v4l2-ctl --all
    Driver Info (not using libv4l2):
       Driver name   : sq930x
       Card type     : Creative WebCam Live! Motion
       Bus info      : usb-3f980000.usb-1.2
       Driver version: 4.14.34
       Capabilities  : 0x85200001
               Video Capture
               Read/Write
               Streaming
               Extended Pix Format
               Device Capabilities
       Device Caps   : 0x05200001
               Video Capture
               Read/Write
               Streaming
               Extended Pix Format
    Priority: 2
    Video input : 0 (sq930x: ok)
    Format Video Capture:
       Width/Height      : 640/480
       Pixel Format      : 'RGGB'
       Field             : None
       Bytes per Line    : 640
       Size Image        : 307200
       Colorspace        : sRGB
       Transfer Function : Default
       YCbCr/HSV Encoding: Default
       Quantization      : Default
       Flags             :
    Streaming Parameters Video Capture:
       Frames per second: invalid (0/0)
       Read buffers     : 2

    User Controls

                      exposure (int)    : min=1 max=4095 step=1 default=854 value=854
                          gain (int)    : min=1 max=255 step=1 default=141 value=141

    Log of motion failure to open device :

    $ cat /var/log/motion/motion.log
    [0:motion] [NTC] [ALL] [Jun 24 16:16:42] motion_startup: Using log type (ALL) log level (NTC)
    [0:motion] [NTC] [ENC] [Jun 24 16:16:42] ffmpeg_init: ffmpeg libavcodec version 57.48.101 libavformat version 57.41.100
    [0:motion] [NTC] [ALL] [Jun 24 16:16:42] main: Motion running in setup mode.
    [0:motion] [NTC] [ALL] [Jun 24 16:16:42] main: Camera 0 is from /etc/motion/motion.conf
    [0:motion] [NTC] [ALL] [Jun 24 16:16:42] main: Camera 0 is device: /dev/video0 input -1
    [0:motion] [NTC] [ALL] [Jun 24 16:16:42] main: Stream port 8081
    [0:motion] [NTC] [ALL] [Jun 24 16:16:42] main: Waiting for threads to finish, pid: 12067
    [0:web_control] [NTC] [STR] [Jun 24 16:16:42] http_bindsock: listening on 127.0.0.1 port 8080
    [1:ml1] [NTC] [ALL] [Jun 24 16:16:42] motion_init: Camera 0 started: motion detection Enabled
    [0:web_control] [NTC] [STR] [Jun 24 16:16:42] httpd_run: Started motion-httpd server on port 8080 (auth Disabled)
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] vid_v4lx_start: Using videodevice /dev/video0 and input -1
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_get_capability:
    ------------------------
    cap.driver: "sq930x"
    cap.card: "Creative WebCam Live! Motion"
    cap.bus_info: "usb-3f980000.usb-1.2"
    cap.capabilities=0x85200001
    ------------------------
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_get_capability: - VIDEO_CAPTURE
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_get_capability: - READWRITE
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_get_capability: - STREAMING
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_select_input: name = "sq930x", type 0x00000002, status 00000000
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_select_input: - CAMERA
    [1:ml1] [WRN] [VID] [Jun 24 16:16:42] v4l2_select_input: Device doesn't support VIDIOC_G_STD
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_set_pix_format: Config palette index 17 (YU12) doesn't work.
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_set_pix_format: Supported palettes:
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_set_pix_format: (0) RGGB (8-bit Bayer RGRG/GBGB)
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] v4l2_set_pix_format: 0 - 8-bit Bayer RGRG/GBGB (compressed : 0) (0x42474752)
    [1:ml1] [ERR] [VID] [Jun 24 16:16:42] v4l2_set_pix_format: Unable to find a compatible palette format.
    [1:ml1] [NTC] [VID] [Jun 24 16:16:42] vid_v4lx_start: Using V4L1
    [1:ml1] [NTC] [ALL] [Jun 24 16:16:42] image_ring_resize: Resizing pre_capture buffer to 1 items
    [1:ml1] [ERR] [ALL] [Jun 24 16:16:52] motion_init: Error capturing first image
    [1:ml1] [NTC] [STR] [Jun 24 16:16:52] http_bindsock: listening on 127.0.0.1 port 8081
    [1:ml1] [NTC] [ALL] [Jun 24 16:16:52] motion_init: Started motion-stream server on port 8081 (auth Disabled)
    [1:ml1] [ERR] [ALL] [Jun 24 16:16:52] motion_loop: Video device fatal error - Closing video device
    [1:ml1] [NTC] [VID] [Jun 24 16:16:52] vid_close: Closing video device /dev/video0

    Successful ffmpeg recording :

    $ ffmpeg -i /dev/video0 test.mp4
    ffmpeg version 3.2.10-1~deb9u1+rpt1 Copyright (c) 2000-2018 the FFmpeg developers
     built with gcc 6.3.0 (Raspbian 6.3.0-18+rpi1) 20170516
     configuration: --prefix=/usr --extra-version='1~deb9u1+rpt1' --toolchain=hardened --libdir=/usr/lib/arm-linux-gnueabihf --incdir=/usr/include/arm-linux-gnueabihf --enable-gpl --disable-stripping --enable-avresample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libebur128 --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libmp3lame --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-omx-rpi --enable-mmal --enable-openal --enable-opengl --enable-sdl2 --enable-libdc1394 --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libopencv --enable-libx264 --enable-shared
     libavutil      55. 34.101 / 55. 34.101
     libavcodec     57. 64.101 / 57. 64.101
     libavformat    57. 56.101 / 57. 56.101
     libavdevice    57.  1.100 / 57.  1.100
     libavfilter     6. 65.100 /  6. 65.100
     libavresample   3.  1.  0 /  3.  1.  0
     libswscale      4.  2.100 /  4.  2.100
     libswresample   2.  3.100 /  2.  3.100
     libpostproc    54.  1.100 / 54.  1.100
    [video4linux2,v4l2 @ 0x16035c0] Time per frame unknown
    Input #0, video4linux2,v4l2, from '/dev/video0':
     Duration: N/A, start: 2523.746120, bitrate: N/A
       Stream #0:0: Video: rawvideo ([186]RG[8] / 0x84752BA), bayer_rggb8, 640x480, 25.42 tbr, 1000k tbn, 1000k tbc

    Related questions :

    Raspberry Pi webcam unable to open video device

    webcam Unable to find a compatible palette format