Recherche avancée

Médias (91)

Autres articles (58)

  • Websites made ​​with MediaSPIP

    2 mai 2011, par

    This page lists some websites based on MediaSPIP.

  • Creating farms of unique websites

    13 avril 2011, par

    MediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
    This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

Sur d’autres sites (8680)

  • How to use ffmpeg lib transform mp4(h264&aac) to m3u8 (hls) by C code ?

    1er juillet 2020, par itning

    I used official examples transcoding.c but console print pkt->duration = 0, maybe the hls segment duration will not precise.

    


    I use this code to set duration but invalid。

    


    av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);

    


    In command line

    


    ffmpeg -i a.mp4 -codec copy -vbsf h264_mp4toannexb -map 0 -f segment -segment_list a.m3u8 -segment_time 10 a-%03d.ts

    


    How to use C code to achieve this command ?

    


    this is my code :

    


    /**&#xA; * @file&#xA; * API example for demuxing, decoding, filtering, encoding and muxing&#xA; * @example transcoding.c&#xA; */&#xA;&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libavfilter></libavfilter>buffersink.h>&#xA;#include <libavfilter></libavfilter>buffersrc.h>&#xA;#include <libavutil></libavutil>opt.h>&#xA;#include <libavutil></libavutil>pixdesc.h>&#xA;&#xA;static AVFormatContext *ifmt_ctx;&#xA;static AVFormatContext *ofmt_ctx;&#xA;typedef struct FilteringContext {&#xA;    AVFilterContext *buffersink_ctx;&#xA;    AVFilterContext *buffersrc_ctx;&#xA;    AVFilterGraph *filter_graph;&#xA;} FilteringContext;&#xA;static FilteringContext *filter_ctx;&#xA;&#xA;typedef struct StreamContext {&#xA;    AVCodecContext *dec_ctx;&#xA;    AVCodecContext *enc_ctx;&#xA;} StreamContext;&#xA;static StreamContext *stream_ctx;&#xA;&#xA;static int open_input_file(const char *filename) {&#xA;    int ret;&#xA;    unsigned int i;&#xA;&#xA;    ifmt_ctx = NULL;&#xA;    if ((ret = avformat_open_input(&amp;ifmt_ctx, filename, NULL, NULL)) &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));&#xA;    if (!stream_ctx)&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        AVStream *stream = ifmt_ctx->streams[i];&#xA;        AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);&#xA;        AVCodecContext *codec_ctx;&#xA;        if (!dec) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);&#xA;            return AVERROR_DECODER_NOT_FOUND;&#xA;        }&#xA;        codec_ctx = avcodec_alloc_context3(dec);&#xA;        if (!codec_ctx) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);&#xA;            return AVERROR(ENOMEM);&#xA;        }&#xA;        ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "&#xA;                                       "for stream #%u\n", i);&#xA;            return ret;&#xA;        }&#xA;        /* Reencode video &amp; audio and remux subtitles etc. */&#xA;        if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO&#xA;            || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;            if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)&#xA;                codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);&#xA;            /* Open decoder */&#xA;            ret = avcodec_open2(codec_ctx, dec, NULL);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;        }&#xA;        stream_ctx[i].dec_ctx = codec_ctx;&#xA;    }&#xA;&#xA;    av_dump_format(ifmt_ctx, 0, filename, 0);&#xA;    return 0;&#xA;}&#xA;&#xA;static int open_output_file(const char *filename, enum AVCodecID videoCodecId, enum AVCodecID audioCodecId) {&#xA;    AVStream *out_stream;&#xA;    AVStream *in_stream;&#xA;    AVCodecContext *dec_ctx, *enc_ctx;&#xA;    AVCodec *encoder;&#xA;    int ret;&#xA;    unsigned int i;&#xA;&#xA;    ofmt_ctx = NULL;&#xA;    avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, filename);&#xA;    if (!ofmt_ctx) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");&#xA;        return AVERROR_UNKNOWN;&#xA;    }&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        out_stream = avformat_new_stream(ofmt_ctx, NULL);&#xA;        if (!out_stream) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");&#xA;            return AVERROR_UNKNOWN;&#xA;        }&#xA;&#xA;        in_stream = ifmt_ctx->streams[i];&#xA;        dec_ctx = stream_ctx[i].dec_ctx;&#xA;&#xA;        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO&#xA;            || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;&#xA;            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;                encoder = avcodec_find_encoder(videoCodecId);&#xA;            } else {&#xA;                encoder = avcodec_find_encoder(audioCodecId);&#xA;            }&#xA;            //encoder = avcodec_find_encoder(dec_ctx->codec_id);&#xA;            if (!encoder) {&#xA;                av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");&#xA;                return AVERROR_INVALIDDATA;&#xA;            }&#xA;            enc_ctx = avcodec_alloc_context3(encoder);&#xA;            if (!enc_ctx) {&#xA;                av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");&#xA;                return AVERROR(ENOMEM);&#xA;            }&#xA;&#xA;            /* In this example, we transcode to same properties (picture size,&#xA;             * sample rate etc.). These properties can be changed for output&#xA;             * streams easily using filters */&#xA;            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;                enc_ctx->height = dec_ctx->height;&#xA;                enc_ctx->width = dec_ctx->width;&#xA;                enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;&#xA;                /* take first format from list of supported formats */&#xA;                if (encoder->pix_fmts)&#xA;                    enc_ctx->pix_fmt = encoder->pix_fmts[0];&#xA;                else&#xA;                    enc_ctx->pix_fmt = dec_ctx->pix_fmt;&#xA;                /* video time_base can be set to whatever is handy and supported by encoder */&#xA;                enc_ctx->time_base = av_inv_q(dec_ctx->framerate);&#xA;            } else {&#xA;                enc_ctx->sample_rate = dec_ctx->sample_rate;&#xA;                enc_ctx->channel_layout = dec_ctx->channel_layout;&#xA;                enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);&#xA;                /* take first format from list of supported formats */&#xA;                enc_ctx->sample_fmt = encoder->sample_fmts[0];&#xA;                enc_ctx->time_base = (AVRational) {1, enc_ctx->sample_rate};&#xA;            }&#xA;&#xA;            if (ofmt_ctx->oformat->flags &amp; AVFMT_GLOBALHEADER)&#xA;                enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;&#xA;&#xA;            /* Third parameter can be used to pass settings to encoder */&#xA;            ret = avcodec_open2(enc_ctx, encoder, NULL);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;            ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;&#xA;            out_stream->time_base = enc_ctx->time_base;&#xA;            stream_ctx[i].enc_ctx = enc_ctx;&#xA;        } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {&#xA;            av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);&#xA;            return AVERROR_INVALIDDATA;&#xA;        } else {&#xA;            /* if this stream must be remuxed */&#xA;            ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);&#xA;                return ret;&#xA;            }&#xA;            out_stream->time_base = in_stream->time_base;&#xA;        }&#xA;&#xA;    }&#xA;    av_dump_format(ofmt_ctx, 0, filename, 1);&#xA;&#xA;    av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);&#xA;&#xA;    if (!(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE)) {&#xA;        ret = avio_open(&amp;ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Could not open output file &#x27;%s&#x27;", filename);&#xA;            return ret;&#xA;        }&#xA;    }&#xA;&#xA;    /* init muxer, write output file header */&#xA;    ret = avformat_write_header(ofmt_ctx, NULL);&#xA;    if (ret &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    return 0;&#xA;}&#xA;&#xA;static int init_filter(FilteringContext *fctx, AVCodecContext *dec_ctx,&#xA;                       AVCodecContext *enc_ctx, const char *filter_spec) {&#xA;    char args[512];&#xA;    int ret = 0;&#xA;    const AVFilter *buffersrc = NULL;&#xA;    const AVFilter *buffersink = NULL;&#xA;    AVFilterContext *buffersrc_ctx = NULL;&#xA;    AVFilterContext *buffersink_ctx = NULL;&#xA;    AVFilterInOut *outputs = avfilter_inout_alloc();&#xA;    AVFilterInOut *inputs = avfilter_inout_alloc();&#xA;    AVFilterGraph *filter_graph = avfilter_graph_alloc();&#xA;&#xA;    if (!outputs || !inputs || !filter_graph) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;        buffersrc = avfilter_get_by_name("buffer");&#xA;        buffersink = avfilter_get_by_name("buffersink");&#xA;        if (!buffersrc || !buffersink) {&#xA;            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");&#xA;            ret = AVERROR_UNKNOWN;&#xA;            goto end;&#xA;        }&#xA;&#xA;        snprintf(args, sizeof(args),&#xA;                 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",&#xA;                 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,&#xA;                 dec_ctx->time_base.num, dec_ctx->time_base.den,&#xA;                 dec_ctx->sample_aspect_ratio.num,&#xA;                 dec_ctx->sample_aspect_ratio.den);&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",&#xA;                                           args, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",&#xA;                                           NULL, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",&#xA;                             (uint8_t *) &amp;enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");&#xA;            goto end;&#xA;        }&#xA;    } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;        buffersrc = avfilter_get_by_name("abuffer");&#xA;        buffersink = avfilter_get_by_name("abuffersink");&#xA;        if (!buffersrc || !buffersink) {&#xA;            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");&#xA;            ret = AVERROR_UNKNOWN;&#xA;            goto end;&#xA;        }&#xA;&#xA;        if (!dec_ctx->channel_layout)&#xA;            dec_ctx->channel_layout =&#xA;                    av_get_default_channel_layout(dec_ctx->channels);&#xA;        snprintf(args, sizeof(args),&#xA;                 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,&#xA;                 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,&#xA;                 av_get_sample_fmt_name(dec_ctx->sample_fmt),&#xA;                 dec_ctx->channel_layout);&#xA;        ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",&#xA;                                           args, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",&#xA;                                           NULL, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",&#xA;                             (uint8_t *) &amp;enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",&#xA;                             (uint8_t *) &amp;enc_ctx->channel_layout,&#xA;                             sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "sample_rates",&#xA;                             (uint8_t *) &amp;enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");&#xA;            goto end;&#xA;        }&#xA;    } else {&#xA;        ret = AVERROR_UNKNOWN;&#xA;        goto end;&#xA;    }&#xA;&#xA;    /* Endpoints for the filter graph. */&#xA;    outputs->name = av_strdup("in");&#xA;    outputs->filter_ctx = buffersrc_ctx;&#xA;    outputs->pad_idx = 0;&#xA;    outputs->next = NULL;&#xA;&#xA;    inputs->name = av_strdup("out");&#xA;    inputs->filter_ctx = buffersink_ctx;&#xA;    inputs->pad_idx = 0;&#xA;    inputs->next = NULL;&#xA;&#xA;    if (!outputs->name || !inputs->name) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,&#xA;                                        &amp;inputs, &amp;outputs, NULL)) &lt; 0)&#xA;        goto end;&#xA;&#xA;    if ((ret = avfilter_graph_config(filter_graph, NULL)) &lt; 0)&#xA;        goto end;&#xA;&#xA;    /* Fill FilteringContext */&#xA;    fctx->buffersrc_ctx = buffersrc_ctx;&#xA;    fctx->buffersink_ctx = buffersink_ctx;&#xA;    fctx->filter_graph = filter_graph;&#xA;&#xA;    end:&#xA;    avfilter_inout_free(&amp;inputs);&#xA;    avfilter_inout_free(&amp;outputs);&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int init_filters(void) {&#xA;    const char *filter_spec;&#xA;    unsigned int i;&#xA;    int ret;&#xA;    filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));&#xA;    if (!filter_ctx)&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        filter_ctx[i].buffersrc_ctx = NULL;&#xA;        filter_ctx[i].buffersink_ctx = NULL;&#xA;        filter_ctx[i].filter_graph = NULL;&#xA;        if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO&#xA;              || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))&#xA;            continue;&#xA;&#xA;&#xA;        if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)&#xA;            filter_spec = "null"; /* passthrough (dummy) filter for video */&#xA;        else&#xA;            filter_spec = "anull"; /* passthrough (dummy) filter for audio */&#xA;        ret = init_filter(&amp;filter_ctx[i], stream_ctx[i].dec_ctx,&#xA;                          stream_ctx[i].enc_ctx, filter_spec);&#xA;        if (ret)&#xA;            return ret;&#xA;    }&#xA;    return 0;&#xA;}&#xA;&#xA;static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {&#xA;    int ret;&#xA;    int got_frame_local;&#xA;    AVPacket enc_pkt;&#xA;    int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =&#xA;    (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==&#xA;     AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;&#xA;&#xA;    if (!got_frame)&#xA;        got_frame = &amp;got_frame_local;&#xA;&#xA;    av_log(NULL, AV_LOG_INFO, "Encoding frame\n");&#xA;    /* encode filtered frame */&#xA;    enc_pkt.data = NULL;&#xA;    enc_pkt.size = 0;&#xA;    av_init_packet(&amp;enc_pkt);&#xA;    ret = enc_func(stream_ctx[stream_index].enc_ctx, &amp;enc_pkt,&#xA;                   filt_frame, got_frame);&#xA;    av_frame_free(&amp;filt_frame);&#xA;    if (ret &lt; 0)&#xA;        return ret;&#xA;    if (!(*got_frame))&#xA;        return 0;&#xA;&#xA;    /* prepare packet for muxing */&#xA;    enc_pkt.stream_index = stream_index;&#xA;    av_packet_rescale_ts(&amp;enc_pkt,&#xA;                         stream_ctx[stream_index].enc_ctx->time_base,&#xA;                         ofmt_ctx->streams[stream_index]->time_base);&#xA;&#xA;    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");&#xA;    /* mux encoded frame */&#xA;    ret = av_interleaved_write_frame(ofmt_ctx, &amp;enc_pkt);&#xA;    return ret;&#xA;}&#xA;&#xA;static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index) {&#xA;    int ret;&#xA;    AVFrame *filt_frame;&#xA;&#xA;    av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");&#xA;    /* push the decoded frame into the filtergraph */&#xA;    ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,&#xA;                                       frame, 0);&#xA;    if (ret &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    /* pull filtered frames from the filtergraph */&#xA;    while (1) {&#xA;        filt_frame = av_frame_alloc();&#xA;        if (!filt_frame) {&#xA;            ret = AVERROR(ENOMEM);&#xA;            break;&#xA;        }&#xA;        av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");&#xA;        ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,&#xA;                                      filt_frame);&#xA;        if (ret &lt; 0) {&#xA;            /* if no more frames for output - returns AVERROR(EAGAIN)&#xA;             * if flushed and no more frames for output - returns AVERROR_EOF&#xA;             * rewrite retcode to 0 to show it as normal procedure completion&#xA;             */&#xA;            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)&#xA;                ret = 0;&#xA;            av_frame_free(&amp;filt_frame);&#xA;            break;&#xA;        }&#xA;&#xA;        filt_frame->pict_type = AV_PICTURE_TYPE_NONE;&#xA;        ret = encode_write_frame(filt_frame, stream_index, NULL);&#xA;        if (ret &lt; 0)&#xA;            break;&#xA;    }&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int flush_encoder(unsigned int stream_index) {&#xA;    int ret;&#xA;    int got_frame;&#xA;&#xA;    if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &amp;&#xA;          AV_CODEC_CAP_DELAY))&#xA;        return 0;&#xA;&#xA;    while (1) {&#xA;        av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);&#xA;        ret = encode_write_frame(NULL, stream_index, &amp;got_frame);&#xA;        if (ret &lt; 0)&#xA;            break;&#xA;        if (!got_frame)&#xA;            return 0;&#xA;    }&#xA;    return ret;&#xA;}&#xA;&#xA;int main() {&#xA;    char *inputFile = "D:/20200623_094923.mp4";&#xA;    char *outputFile = "D:/test/te.m3u8";&#xA;    enum AVCodecID videoCodec = AV_CODEC_ID_H264;&#xA;    enum AVCodecID audioCodec = AV_CODEC_ID_AAC;&#xA;&#xA;    int ret;&#xA;    AVPacket packet = {.data = NULL, .size = 0};&#xA;    AVFrame *frame = NULL;&#xA;    enum AVMediaType type;&#xA;    unsigned int stream_index;&#xA;    unsigned int i;&#xA;    int got_frame;&#xA;    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);&#xA;&#xA;    if ((ret = open_input_file(inputFile)) &lt; 0)&#xA;        goto end;&#xA;    if ((ret = open_output_file(outputFile, videoCodec, audioCodec)) &lt; 0)&#xA;        goto end;&#xA;    if ((ret = init_filters()) &lt; 0)&#xA;        goto end;&#xA;&#xA;    /* read all packets */&#xA;    while (1) {&#xA;        if ((ret = av_read_frame(ifmt_ctx, &amp;packet)) &lt; 0)&#xA;            break;&#xA;        stream_index = packet.stream_index;&#xA;        type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;&#xA;        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",&#xA;               stream_index);&#xA;&#xA;        if (filter_ctx[stream_index].filter_graph) {&#xA;            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&amp;filter the frame\n");&#xA;            frame = av_frame_alloc();&#xA;            if (!frame) {&#xA;                ret = AVERROR(ENOMEM);&#xA;                break;&#xA;            }&#xA;            av_packet_rescale_ts(&amp;packet,&#xA;                                 ifmt_ctx->streams[stream_index]->time_base,&#xA;                                 stream_ctx[stream_index].dec_ctx->time_base);&#xA;            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :&#xA;                       avcodec_decode_audio4;&#xA;            ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,&#xA;                           &amp;got_frame, &amp;packet);&#xA;            if (ret &lt; 0) {&#xA;                av_frame_free(&amp;frame);&#xA;                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");&#xA;                break;&#xA;            }&#xA;&#xA;            if (got_frame) {&#xA;                frame->pts = frame->best_effort_timestamp;&#xA;                ret = filter_encode_write_frame(frame, stream_index);&#xA;                av_frame_free(&amp;frame);&#xA;                if (ret &lt; 0)&#xA;                    goto end;&#xA;            } else {&#xA;                av_frame_free(&amp;frame);&#xA;            }&#xA;        } else {&#xA;            /* remux this frame without reencoding */&#xA;            av_packet_rescale_ts(&amp;packet,&#xA;                                 ifmt_ctx->streams[stream_index]->time_base,&#xA;                                 ofmt_ctx->streams[stream_index]->time_base);&#xA;&#xA;            ret = av_interleaved_write_frame(ofmt_ctx, &amp;packet);&#xA;            if (ret &lt; 0)&#xA;                goto end;&#xA;        }&#xA;        av_packet_unref(&amp;packet);&#xA;    }&#xA;&#xA;    /* flush filters and encoders */&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        /* flush filter */&#xA;        if (!filter_ctx[i].filter_graph)&#xA;            continue;&#xA;        ret = filter_encode_write_frame(NULL, i);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        /* flush encoder */&#xA;        ret = flush_encoder(i);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");&#xA;            goto end;&#xA;        }&#xA;    }&#xA;&#xA;    av_write_trailer(ofmt_ctx);&#xA;    end:&#xA;    av_packet_unref(&amp;packet);&#xA;    av_frame_free(&amp;frame);&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        avcodec_free_context(&amp;stream_ctx[i].dec_ctx);&#xA;        if (ofmt_ctx &amp;&amp; ofmt_ctx->nb_streams > i &amp;&amp; ofmt_ctx->streams[i] &amp;&amp; stream_ctx[i].enc_ctx)&#xA;            avcodec_free_context(&amp;stream_ctx[i].enc_ctx);&#xA;        if (filter_ctx &amp;&amp; filter_ctx[i].filter_graph)&#xA;            avfilter_graph_free(&amp;filter_ctx[i].filter_graph);&#xA;    }&#xA;    av_free(filter_ctx);&#xA;    av_free(stream_ctx);&#xA;    avformat_close_input(&amp;ifmt_ctx);&#xA;    if (ofmt_ctx &amp;&amp; !(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE))&#xA;        avio_closep(&amp;ofmt_ctx->pb);&#xA;    avformat_free_context(ofmt_ctx);&#xA;&#xA;    if (ret &lt; 0)&#xA;        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));&#xA;&#xA;    return ret ? 1 : 0;&#xA;}&#xA;

    &#xA;

  • Piwik 1.12, New Features, API Improvements, Stability — The Last Piwik 1.X Release

    30 mai 2013, par Piwik team — Development

    We are very excited to announce the immediate availability of Piwik v1.12 !

    Piwik v1.12 is a major new release with four big new features, seven smaller new features, several API improvements and all together 82 tickets fixed. This is also the last major 1.X release, which means after this release we will be working on releasing Piwik 2.0. This also means that you should upgrade to PHP 5.3 or higher if you haven’t already, since Piwik 2.0 will only support PHP 5.3 and above.

    Finally, this release contains two breaking changes to the API. If you use the Piwik API click here or scroll down to see if you’re affected.

    Table of Contents :

    New Big Feature – Beta Release Channel

    beta_channel_settings

    For those of you who want to help test Piwik 2.0-beta releases as soon as they come up, we’ve made it easier to use our beta releases. Navigate to the Settings > General Settings page and click the The latest beta release radio button. You will then be able to upgrade to beta releases.

    This isn’t truly a major feature, but we think it’s just as important because it will allow us to create more beta releases and thus catch more bugs before we make a final release. This means more releases and more stability for you.

    New Big Feature – Segment Editor

    Custom Segment Editor with Custom Variable segmentation

    The Segment Editor is a long-awaited new feature that allows you to view, save and edit your segments.

    Piwik has supported segmentation (filtering visits and reports by arbitrary criteria, like browser family) for quite some time now, but it has never been possible to visually create and modify them. Nor could they be saved for later recall.

    Thanks to the eighty individuals and company who funded this feature, it is now possible to :

    • visually segment your visitors, instead of creating URLs.
    • save segments and easily switch between them, instead of remembering URLs.
    • get suggestions for segments that might be helpful to view.
    • learn more in the Segmentating Analytics reports user documentation..

    New Big Feature – Page Speed Reports

    You can now see how long it took your webserver to generate and send pages over HTTP through the new Avg. Generation Time metric.

    This metric can be viewed on both the Pages and Page Titles reports :

    avg_generation_time_page_urls

    And the average page generation time for all the pages in your website/webapp is displayed on the visitors overview :

    avg_generation_time_overview

    You can use this new information to benchmark your webapp and web server.

    New Big Feature – Device Detection Reports

    Piwik 1.12 also includes a new plugin that provides reports on the device types (tablet, desktop, smartphone, etc.), device brands (Apple, Google, Samsung, etc.) and device models (iPad, Nexus 7, etc.) your visitors use to access your website :

    device_reports

    The new plugin also enhances Operating system detections (detecting sub versions of Linux, Windows, and more).

    Note : This plugin is not enabled by default, but will be in Piwik 2.0. If you want to view these reports now, you can activate the plugin in the Installed Plugins admin page. Navigate to Visitors > Devices to see the new reports. You may also use the new (beta) ‘Device type’.

    The new plugin was developed with the support of Clearcode.cc our technology partner

    Other improvements

    Majestic SEO Metrics

    seo_stats_with_majestic

    We’ve added two new SEO metrics to the SEO widget, both of which are calculated by MajesticSEO.com. These metrics will tell you the number of external backlinks (the number of links to your site from other sites) and the number of referrer domains (the number of domains that link to your site).

    We thank the team at Majestic for their support and hard work in bringing you these metrics to your Piwik dashboards !

    Real-time Visitor Count Dashboard Widget

    Real time visitor counter

    There is now a simple new widget you can use to see the number of visitors, visits and actions that occurred in the last couple minutes. We call it the Real Time Visitor Counter !

    New segment parameter : siteSearchKeyword.

    There is now a new segment parameter you can use to segment your visits : siteSearchKeyword. This parameter will let you select visits that had site searches with a specific keyword.

    Ignore URL letter case when importing log files.

    We’ve added a new option to the log import script, –force-lowercase-path. When used, the importer will change URL paths to lowercase before tracking them. This way http://domain.com/MY/BLOG will be treated the same as http://domain.com/my/blog.

    Updated ISP Names

    pretty_provider_names

    We’ve also modified the Providers report so prettier and more up-to-date names of ISPs are displayed.

    Customize the background/text/axis color of graphs.

    custom_image_graph_colors

    It is now possible to change the background color, text color and/or axis color of the graph images generated by the ImageGraph plugin. To access this functionality, use the following URL query parameters when generating an image :

    • backgroundColor
    • textColor
    • axisColor

    For example :

    http://demo.piwik.org/index.php?module=API&amp;method=ImageGraph.get&amp;idSite=7&amp;apiModule=UserSettings&amp;apiAction=getBrowser&amp;token_auth=anonymous&amp;period=day&amp;date=2013-03-21,2013-04-19&amp;language=en&amp;width=779&amp;height=150&amp;fontSize=9&amp;showMetricTitle=0&amp;aliasedGraph=1&amp;legendAppendMetric=0&amp;backgroundColor=efefef&amp;gridColor=dcdcdc&amp;colors=cb2026

    Send your users to a custom URL after they logout.

    If you manage a Piwik installation with many users and you want to send them to a custom page or website after they log out of Piwik, you can now specify the URL to redirect users after they log out.

    API Changes and Improvements

    BREAKING CHANGE – renamed segment parameters.

    The following segment parameters have been renamed :

    • continent renamed to : continentCode
    • browserName renamed to : browserCode
    • operatingSystem renamed to : operatingSystemCode
    • lat renamed to : latitude
    • long renamed to : longitude
    • region renamed to : regionCode
    • country renamed to : countryCode
    • continent renamed to : continentCode

    If you use one of the old segment parameter names, Piwik will throw an exception, so you should notice when you’re using an old name.

    BREAKING CHANGE – changes to the input & output of the Live.getLastVisitsDetails method.

    The following changes were made to the Live.getLastVisitsDetails API method :

    • The method no longer uses the maxIdVisit query parameter. It has been replaced by the filter_offset parameter.
    • Site search keywords are now displayed in a <siteSearchKeyword> element. They were formerly in <pageTitle> elements.
    • Custom variables with page scope now have ‘Page’ in their element names when displayed. For example, <customVariablePageName1>, <customVariablePageName2>, etc.

    Filter results of MultiSites.getAll by website name.

    It is now possible to filter the results of MultiSites.getAll by website name. To do this, set the pattern query parameter to the desired regex pattern.

    Get suggested values to use for a segment parameter.

    The new API method API.getSuggestedValuesForSegment can now be used to get suggested values for a segment parameter. This method will return a list of the most seen values (in the last 60 days) for a certain segment parameter. So for browserCode, this would return the codes for the browsers most visitors used in the last 60 days.

    Use extra tracking query parameters with the JS tracker (such as ‘lat’ & ‘long’).

    We’ve added a new method to the JavaScript tracker named appendToTrackingUrl. You can use this method to add extra query parameters to a tracking request, like so :

    _paq.push(['appendToTrackingUrl', 'lat=X&amp;long=Y']);

    What we’re working on

    As we said above, Piwik v1.12 is the last in the 1.X series of releases. This means we are now officially working on Piwik 2.0.

    Piwik 2.0 will be a big release, to be sure, but it’s going to bring you more than just a couple new features and a bag of bug fixes. For Piwik 2.0 we will be revisiting the user needs and the ideals that originally prompted us to create Piwik in order to build our vision of the future of web analytics.

    Piwik 2.0 won’t just be a bigger, better web app, but a new platform for observing and analyzing the things that matter to you.

    Participate in Piwik

    Are you a talented developer or an experienced User Interface designer ? Or maybe you like to write documentation or are a marketing guru ?

    If you have some free time and if you want to contribute to one of the most awesome open source projects around, please get in touch with the Piwik team, or read this page to learn more…

    Summary

    For the full list of changes in Piwik 1.12 check out the Changelog.

    Thank you to the core developers, all the beta testers and users, our official supporters, the translators & everyone who reported bugs or feature requests. Also thank you to softwares we use, and the libraries we use.

    If you are a company and would like to help an important project like Piwik grow, please get in touch, it means a lot to us. You can also participate in the project

    –> if you like what you read, please tell your friends and colleagues or write on your website, blog, forums, stackoverflow, etc. <–

    Peace. Enjoy !

  • Multilingual SEO : A Marketer’s Guide to Measuring and Optimising Multilingual Websites

    26 juin, par Joe

    The web—and search engines in particular—make it easier than ever for businesses of any size to reach an international audience. 

     
    A multilingual website makes sense, especially when the majority of websites are in English. After all, you want to stand out to customers by speaking their local language. But it’s no good having a multilingual site if people can’t find it. 

    That’s where multilingual SEO comes in. 

    In this article, we’ll show you how to build a multilingual website that ranks in Google and other local search engines. You’ll learn why multilingual SEO is about more than translating your content and specific tasks you need to tick off to make your multilingual site as visible as possible. 

    ¡Vamos !

    What is multilingual SEO ? 

    Multilingual SEO is the process of optimising your website to improve search visibility in more than one language. It involves creating high-quality translations (including SEO metadata), targeting language-specific keywords and building links in the target language. 

    A definition of multilingual SEO

    The goal is to make your site as discoverable and accessible as possible for users searching Google and other search engines in their local language. 

    It’s worth pointing out that multilingual SEO differs slightly from international SEO, even if the terms are used interchangeably. With multilingual SEO, you are optimising for a language (so Spanish targets every Spanish-speaking country, not just Spain). In international SEO, you target specific countries, so you might have a different strategy for targeting Argentinian customers vs. Mexican customers. 

    Why adopt a multilingual SEO strategy ?

    There are two major reasons to adopt a multilingual SEO strategy : to reach more customers and to deliver the best experience possible. 

    Why adopt a multilingual SEO strategy

    Reach a wider audience

    Not everyone searches the web in English. Even if non-native speakers eventually resort to English, many will try Googling in their own language first. That means if you target customers in multiple non-English-speaking countries, then creating a multilingual SEO is a must to reach as many of them as possible. 

    A multilingual SEO strategy also boosts your website’s chances of appearing in country-specific search engines like Baidu and Yandex — and in localised versions of Google like Google.fr and Google.de.

    Deliver a better user experience

    Multilingual SEO gives your customers what they want : the ability to search, browse and shop in their native language. This is a big deal, with 89% of consumers saying it’s important to deal with a brand in their own language.

    Improving the user experience also increases the likelihood of non-English-speaking customers converting. As many as 82% of people won’t make a purchase in major consumer categories without local language support. 

    How to prepare for multilingual SEO success

    Before you start creating multilingual SEO content, you need to take care of a couple of things. 

    Identify target markets

    The first step is to identify the languages you want to target. You know your customers better than anyone, so it’s likely you have one or two languages in mind already. 

    But if you don’t, why not analyse your existing website traffic to discover which languages to target first ? The Locations report in Matomo (found in the Visitors section of Matomo’s navigation) shows you which countries your visitors hail from. 

    A screenshot of Matomo's Location Report

    In the example above, targeting German and Indonesian searchers would be a sensible strategy. 

    Target local keywords

    Once you’ve decided on your target markets, it’s time to find localised keywords. Keywords are the backbone of any SEO campaign, so take your time to find ones that are specific to your local markets.

    Yes, that means you shouldn’t just translate your English keywords into French or Spanish ! French or Spanish searchers may use completely different terms to find your products or services. 

    That’s why it’s vital to use a tool like Ahrefs or Semrush to do multilingual keyword research. 

    A french keyword

    This may be a bit tricky if you aren’t a native speaker of your target language, but you can translate your English keywords using Google Translate to get started. 

    Remember, search volumes won’t be as high as English keywords since fewer people are searching for them. So don’t be scared off by small keyword volumes. Besides, even in the U.S. around 95% of keywords get 10 searches per month or fewer. 

    Choose your URL structure

    The final step in preparing your multilingual SEO strategy is deciding on your URL structure, whether that’s using separate domains, subdomains or subfolders. 

    This is important for SEO as it will avoid duplicate content issues. Using language indicators within these URLs will also help both users and search engines differentiate versions of your site. 

    The first option is to have a separate domain for each target language. 

    • yoursite.com
    • yoursite.fr
    • yoursite.es

    Using subdomains would mean you keep one domain but have completely separate sites :

    • fr.yoursite.com
    • es.yoursite.com
    • de.yoursite.com

    Using subfolders keeps everything clean but can result in long URLs :

    • yoursite.com/en
    • yoursite.com/de
    • yoursite.com/es

    As you can see in the image below, we use subdomains to separate multilingual versions of you site :

    A browser showing a language-specific URL structure

    While separate domains provide more precise targeting, it’s a lot of work to manage them. So, unless you have a keyword-rich, unbranded domain name that needs translating, we’d recommend using either subdomains or subdirectories. It’s slightly easier to manage subfolders, but subdomains offer users a clearer divide between different versions of your site. 

    If you want to make your site even easier to navigate, then you can incorporate language indicators into your page’s design to make it easy for consumers to switch languages. These are the little dropdown menus you see containing various flags that let users browse in different languages.

    5 multilingual SEO strategies to use in 2024

    Now you’ve got the basics in order, use the following SEO strategies to improve your multilingual rankings. 

    Use hreflang tags

    There’s another way that Google and other search engines use to determine the language and region your website is targeting : hreflang..

    Hreflang is an HTML attribute that Google and other search engines use to ensure they serve users the right version of the page.

    You can insert it into the header section of the page like this example for a German subdomain :

    <link rel=”alternate” href=”https://yourwebsite.com/de” hreflang=”de” />

    Or you can add the relevant markup to your website’s sitemap. Here’s what the same German markup would look like :

    <xhtml:link rel=”alternate” hreflang=”de” href=”https://yourwebsite.com/de/” /> 

    Whichever method you include one language code in ISO 639-1 format. You can also include a region code in ISO 3166-1 Alpha 2 format. Note that you can include multiple region codes. A web page in German, for example, could target German and Austrian consumers. 

    Hreflang tags also avoid duplicate content issues. 

    With a multilingual site, you could have a dozen different versions of the same page, showing the same content but in a different language. Without an hreflang tag specifying that these are different versions of the same page, Google may penalise your site.

    Invest in high-quality translations

    Google rewards good content. And, while you’d hope Google Translate would be good enough, it usually isn’t.

    Instead, make sure you are using professional linguists to translate your content. They won’t only be able to produce accurate and contextually relevant translations — the kind that Google may reward with higher rankings — but they’ll also be able to account for cultural differences between languages. 

    Imagine you are translating a web page from U.S. English into Italian, for example. You’ve not only got to translate the words themselves but also the measurements (from inches to cm), dates (from mm/dd/yy to dd/mm/yy), currencies, idioms and more. 

    Translate your metadata, too

    You need to translate more than just the content of your website. You should translate its metadata — the descriptive information search engines use to understand your page — to help you rank better in Google and localised search engines. 

    As you can see in the image below, we’ve translated the French version of our homepage’s title and meta description :

    Matomo's meta data translated into French

    Page titles and meta descriptions aren’t the only pieces of metadata you need to pay attention to. Make sure you translate the following :

    • URLs
    • Image alt tags
    • Canonical tags
    • Structured data markup

    While you’re at it, make sure you have translated all of your website’s content, too. It’s easy to miss error messages, contact forms and checkout pages that would otherwise ruin the user experience. 

    Build multilingual backlinks

    Building backlinks is an important step in any SEO strategy. But it’s doubly important in multilingual SEO, where your links in your target language also help Google to understand that you have a translated website. 

    While you want to prioritise links from websites in your target language, make sure that websites are relevant to your niche. It’s no good having a link from a Spanish recipe blog if you have a marketing SaaS tool. 

    A great place to start is by mining the links of competitors in your target market. Your competitors have already done the hard work acquiring these links, and there’s every chance these websites will link to your translated content, too.

    Search competitor backlinks for multilingual link opportunities

    Don’t forget about internal linking pages in the same language, either. This will obviously help users stay in the same language while navigating your site, but it will also show Google the depth of your multilingual content.

    Monitor the SEO health of your multilingual site

    The technical performance of your multilingual pages has a significant impact on your ability to rank and convert. 

    We know for a fact that Google uses page performance metrics in the form of Core Web Vitals as a search ranking factor. What’s more, research by WP Rocker finds that a side loading in one second has a three times better conversion rate than a site loading in five seconds. 

    With that in mind, make sure your site is performing at optimal levels using Matomo’s SEO Web Vitals report. Our SEO Web Vitals feature tracks all of Google’s Core Web Vitals, including :

    • Page Speed Score
    • First Contentful Paint (FCP)
    • Final Input Delay (FID)
    • Last Contentful Paint (LCP)
    • Cumulative Layout Shift (CLS)

    The report displays each metric in a different colour depending on your site’s performance, with green meaning good, orange meaning average, and red meaning poor.

    Matomo's SEO Web Vitals Report

    Check in on these metrics regularly or set up custom alerts to automatically notify you when a specific metric drops below or exceeds a certain threshold — like if your Page Speed score falls below 50, for example. 

    How to track your multilingual SEO efforts with Matomo

    Matomo isn’t just a great tool to track your site’s SEO health ; you can also use our privacy-focused analytics platform to track your multilingual SEO success.

    For example, you could use the report to focus your multilingual SEO efforts on a single language if searches are starting to rival English. Or you decide to translate your most trafficked English keywords into your target languages, regardless if a tool like Ahrefs or Semrush tells you whether these keywords get searches or not.

    If you want to analyse the performance of your new language, for example, you can segment traffic by URL. In our case, we use the segment “Page URL contains fr.matomo.org” to measure the impact of our French website. 

    We can also track the performance of every language except French by using the segment “Page URL does not contain fr.matomo.org”.

    You can use Matomo to track your Keyword performance, too. Unlike search engine-owned platforms like Google Analytics and Google Search Console that no longer share keyword data, Matomo lets users see exactly which keywords users search to find your site in the Combined keywords report :

    Matomo's Combined Keywords Report

    This is valuable information you can use to identify new keyword opportunities and improve your multilingual content strategy. 

    For example, you could use the report to focus your multilingual SEO efforts on a single language if searches are starting to rival English. Or you decide to translate your most trafficked English keywords into your target languages, regardless if a tool like Ahrefs or Semrush tells you whether these keywords get searches or not.

    For international brands that have separate websites and apps for each target language or region, Matomo’s Roll-Up Reporting lets you keep track of aggregate data in one place. 

    A diagram that shows how Roll-up reporting works

    Roll-Up Reporting lets you view data from multiple websites and apps as if they were a single site. This lets you quickly answer questions like :

    • How many visits happened across all of my multilingual websites ?
    • Which languages contributed the most conversions ?
    • How does the performance of my Spanish app compare to my Spanish website ?

    Is it any wonder, then, that Matomo is used by over one million sites in 190 countries to track their web and SEO performance in a privacy-friendly way ?

    Join them today by trying Matomo free for 21 days, no credit card required. Alternatively, request a demo to see how Matomo can help you track your multilingual SEO efforts.