diff options
| author | Jan Beich <jbeich@FreeBSD.org> | 2023-03-17 18:58:47 +0000 |
|---|---|---|
| committer | Jan Beich <jbeich@FreeBSD.org> | 2023-03-17 20:59:23 +0000 |
| commit | f0dadc529fe9c4ee69dd96db92e2d0572d2092aa (patch) | |
| tree | 80091ad194c99a3a9f5a775d81ee866d5c7128ba /multimedia/ffmpeg4/files | |
| parent | multimedia/ffmpeg: respect DOCSDIR more (diff) | |
multimedia/ffmpeg: fork 4.* into a pinned port
Mainly for leaf consumers: applications but not plugins or libraries.
Otherwise, indirectly mixing different versions of ffmpeg libraries at
runtime can lead to crashes.
To use simply add the following before <bsd.port.mk> or <bsd.port.pre.mk>
.include "${.CURDIR:H:H}/multimedia/ffmpeg4/override.mk"
PR: 261302
Inspired by: c717faa5c764
Diffstat (limited to 'multimedia/ffmpeg4/files')
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-configure | 26 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-doc-Makefile | 11 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-libavdevice_v4l2.c | 18 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-libavfilter_vf__lensfun.c | 51 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-libavformat_rtsp.c | 44 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-libavutil-common.h | 13 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-libavutil-x86-asm.h | 11 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-libavutil_hwcontext__drm.c | 24 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-libswscale_ppc_yuv2rgb__altivec.c | 15 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-svtav1 | 437 | ||||
| -rw-r--r-- | multimedia/ffmpeg4/files/patch-vmaf | 951 |
11 files changed, 1601 insertions, 0 deletions
diff --git a/multimedia/ffmpeg4/files/patch-configure b/multimedia/ffmpeg4/files/patch-configure new file mode 100644 index 000000000000..760c50b8e912 --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-configure @@ -0,0 +1,26 @@ +--- configure.orig 2022-10-09 19:04:43 UTC ++++ configure +@@ -3784,13 +3784,7 @@ host_os=$target_os_default + host_os=$target_os_default + + # machine +-if test "$target_os_default" = aix; then +- arch_default=$(uname -p) +- strip_default="strip -X32_64" +- nm_default="nm -g -X32_64" +-else +- arch_default=$(uname -m) +-fi ++arch_default=$(uname -p) + cpu="generic" + intrinsics="none" + +@@ -6758,7 +6752,7 @@ if enabled x86; then + + if enabled x86; then + case $target_os in +- mingw32*|mingw64*|win32|win64|linux|cygwin*) ++ freebsd|mingw32*|mingw64*|win32|win64|linux|cygwin*) + ;; + *) + disable ffnvcodec cuvid nvdec nvenc diff --git a/multimedia/ffmpeg4/files/patch-doc-Makefile b/multimedia/ffmpeg4/files/patch-doc-Makefile new file mode 100644 index 000000000000..52ab4033ad98 --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-doc-Makefile @@ -0,0 +1,11 @@ +--- doc/Makefile.orig 2015-06-19 20:44:35 UTC ++++ doc/Makefile +@@ -145,7 +145,7 @@ install-html: $(HTMLPAGES) + endif + + ifdef CONFIG_MANPAGES +-install-progs-$(CONFIG_DOC): install-man ++#install-progs-$(CONFIG_DOC): install-man + + install-man: $(MANPAGES) + $(Q)mkdir -p "$(MANDIR)/man1" diff --git a/multimedia/ffmpeg4/files/patch-libavdevice_v4l2.c b/multimedia/ffmpeg4/files/patch-libavdevice_v4l2.c new file mode 100644 index 000000000000..8518fadce3af --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-libavdevice_v4l2.c @@ -0,0 +1,18 @@ +- ioctl() prototype on BSDs follows Version 7 AT&T UNIX + +--- libavdevice/v4l2.c.orig 2019-04-21 04:37:17 UTC ++++ libavdevice/v4l2.c +@@ -95,10 +95,10 @@ struct video_data { + int (*open_f)(const char *file, int oflag, ...); + int (*close_f)(int fd); + int (*dup_f)(int fd); +-#ifdef __GLIBC__ +- int (*ioctl_f)(int fd, unsigned long int request, ...); +-#else ++#if defined(__sun) || defined(__BIONIC__) || defined(__musl__) /* POSIX-like */ + int (*ioctl_f)(int fd, int request, ...); ++#else ++ int (*ioctl_f)(int fd, unsigned long int request, ...); + #endif + ssize_t (*read_f)(int fd, void *buffer, size_t n); + void *(*mmap_f)(void *start, size_t length, int prot, int flags, int fd, int64_t offset); diff --git a/multimedia/ffmpeg4/files/patch-libavfilter_vf__lensfun.c b/multimedia/ffmpeg4/files/patch-libavfilter_vf__lensfun.c new file mode 100644 index 000000000000..805d07687521 --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-libavfilter_vf__lensfun.c @@ -0,0 +1,51 @@ +Partially revert https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/8b78eb312de9 +until graphics/lensfun is updated to a version that contains +https://github.com/lensfun/lensfun/commit/b135e05d729d + +--- libavfilter/vf_lensfun.c.orig 2021-04-08 21:28:40 UTC ++++ libavfilter/vf_lensfun.c +@@ -222,20 +222,39 @@ static int config_props(AVFilterLink *inlink) + + if (!lensfun->modifier) { + if (lensfun->camera && lensfun->lens) { +- lensfun->modifier = lf_modifier_create(lensfun->lens, ++ lensfun->modifier = lf_modifier_create( ++#if (LF_VERSION > 0x35F00) ++ lensfun->lens, + lensfun->focal_length, ++#endif + lensfun->camera->CropFactor, + inlink->w, + inlink->h, LF_PF_U8, lensfun->reverse); + if (lensfun->mode & VIGNETTING) +- lf_modifier_enable_vignetting_correction(lensfun->modifier, lensfun->aperture, lensfun->focus_distance); ++ lf_modifier_enable_vignetting_correction(lensfun->modifier, ++#if (LF_VERSION <= 0x35F00) ++ lensfun->lens, lensfun->focal_length, ++#endif ++ lensfun->aperture, lensfun->focus_distance); + if (lensfun->mode & GEOMETRY_DISTORTION) { +- lf_modifier_enable_distortion_correction(lensfun->modifier); +- lf_modifier_enable_projection_transform(lensfun->modifier, lensfun->target_geometry); ++ lf_modifier_enable_distortion_correction(lensfun->modifier, ++#if (LF_VERSION <= 0x35F00) ++ lensfun->lens, lensfun->focal_length ++#endif ++ ); ++ lf_modifier_enable_projection_transform(lensfun->modifier, ++#if (LF_VERSION <= 0x35F00) ++ lensfun->lens, lensfun->focal_length, ++#endif ++ lensfun->target_geometry); + lf_modifier_enable_scaling(lensfun->modifier, lensfun->scale); + } + if (lensfun->mode & SUBPIXEL_DISTORTION) +- lf_modifier_enable_tca_correction(lensfun->modifier); ++ lf_modifier_enable_tca_correction(lensfun->modifier, ++#if (LF_VERSION <= 0x35F00) ++ lensfun->lens, lensfun->focal_length ++#endif ++ ); + } else { + // lensfun->camera and lensfun->lens should have been initialized + return AVERROR_BUG; diff --git a/multimedia/ffmpeg4/files/patch-libavformat_rtsp.c b/multimedia/ffmpeg4/files/patch-libavformat_rtsp.c new file mode 100644 index 000000000000..7cd751dbd79c --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-libavformat_rtsp.c @@ -0,0 +1,44 @@ +--- libavformat/rtsp.c.orig 2016-10-27 16:17:40 UTC ++++ libavformat/rtsp.c +@@ -1614,7 +1614,12 @@ int ff_rtsp_make_setup_request(AVFormatC + } + if (ttl > 0) + snprintf(optbuf, sizeof(optbuf), "?ttl=%d", ttl); +- getnameinfo((struct sockaddr*) &addr, sizeof(addr), ++ getnameinfo((struct sockaddr*) &addr, ++#if HAVE_STRUCT_SOCKADDR_SA_LEN ++ ((struct sockaddr*) &addr)->sa_len, ++#else ++ sizeof(addr), ++#endif + namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST); + ff_url_join(url, sizeof(url), "rtp", NULL, namebuf, + port, "%s", optbuf); +@@ -1830,8 +1835,13 @@ redirect: + goto fail; + } + if (!getpeername(tcp_fd, (struct sockaddr*) &peer, &peer_len)) { +- getnameinfo((struct sockaddr*) &peer, peer_len, host, sizeof(host), +- NULL, 0, NI_NUMERICHOST); ++ getnameinfo((struct sockaddr*) &peer, ++#if HAVE_STRUCT_SOCKADDR_SA_LEN ++ ((struct sockaddr*) &peer)->sa_len, ++#else ++ peer_len, ++#endif ++ host, sizeof(host), NULL, 0, NI_NUMERICHOST); + } + + /* request options supported by the server; this also detects server +@@ -2310,7 +2320,11 @@ static int sdp_read_header(AVFormatConte + AVDictionary *opts = map_to_opts(rt); + + err = getnameinfo((struct sockaddr*) &rtsp_st->sdp_ip, ++#if HAVE_STRUCT_SOCKADDR_SA_LEN ++ ((struct sockaddr*) &rtsp_st->sdp_ip)->sa_len, ++#else + sizeof(rtsp_st->sdp_ip), ++#endif + namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST); + if (err) { + av_log(s, AV_LOG_ERROR, "getnameinfo: %s\n", gai_strerror(err)); diff --git a/multimedia/ffmpeg4/files/patch-libavutil-common.h b/multimedia/ffmpeg4/files/patch-libavutil-common.h new file mode 100644 index 000000000000..2c8c84e6d372 --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-libavutil-common.h @@ -0,0 +1,13 @@ +--- libavutil/common.h.orig 2015-06-19 20:44:53 UTC ++++ libavutil/common.h +@@ -49,6 +49,10 @@ + # define AV_NE(be, le) (le) + #endif + ++#ifndef UINT64_C ++#define UINT64_C(c) (c ## UL) ++#endif ++ + //rounded division & shift + #define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) + /* assume b>0 */ diff --git a/multimedia/ffmpeg4/files/patch-libavutil-x86-asm.h b/multimedia/ffmpeg4/files/patch-libavutil-x86-asm.h new file mode 100644 index 000000000000..71e31a029955 --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-libavutil-x86-asm.h @@ -0,0 +1,11 @@ +--- libavutil/x86/asm.h.orig 2015-06-19 20:44:54 UTC ++++ libavutil/x86/asm.h +@@ -71,7 +71,7 @@ typedef int32_t x86_reg; + typedef int x86_reg; + #endif + +-#define HAVE_7REGS (ARCH_X86_64 || (HAVE_EBX_AVAILABLE && HAVE_EBP_AVAILABLE)) ++#define HAVE_7REGS (ARCH_X86_64 || (HAVE_EBX_AVAILABLE && HAVE_EBP_AVAILABLE && !defined(__clang__))) + #define HAVE_6REGS (ARCH_X86_64 || (HAVE_EBX_AVAILABLE || HAVE_EBP_AVAILABLE)) + + #if ARCH_X86_64 && defined(PIC) diff --git a/multimedia/ffmpeg4/files/patch-libavutil_hwcontext__drm.c b/multimedia/ffmpeg4/files/patch-libavutil_hwcontext__drm.c new file mode 100644 index 000000000000..f5e186300975 --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-libavutil_hwcontext__drm.c @@ -0,0 +1,24 @@ +drm-kmod doesn't expose uAPI yet, so bundle like Mesa + +--- libavutil/hwcontext_drm.c.orig 2022-07-22 17:58:39 UTC ++++ libavutil/hwcontext_drm.c +@@ -28,6 +28,19 @@ + #if HAVE_LINUX_DMA_BUF_H + #include <linux/dma-buf.h> + #include <sys/ioctl.h> ++#else // BSDs ++#undef HAVE_LINUX_DMA_BUF_H ++#define HAVE_LINUX_DMA_BUF_H 1 ++#include <sys/ioctl.h> ++struct dma_buf_sync { ++ uint64_t flags; ++}; ++#define DMA_BUF_BASE 'b' ++#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) ++#define DMA_BUF_SYNC_READ (1 << 0) ++#define DMA_BUF_SYNC_WRITE (2 << 0) ++#define DMA_BUF_SYNC_START (0 << 2) ++#define DMA_BUF_SYNC_END (1 << 2) + #endif + + #include <drm.h> diff --git a/multimedia/ffmpeg4/files/patch-libswscale_ppc_yuv2rgb__altivec.c b/multimedia/ffmpeg4/files/patch-libswscale_ppc_yuv2rgb__altivec.c new file mode 100644 index 000000000000..38ff9087b842 --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-libswscale_ppc_yuv2rgb__altivec.c @@ -0,0 +1,15 @@ +Error: +libswscale/ppc/yuv2rgb_altivec.c:288:36: error: redeclaration of 'vec_xl' must have the 'overloadable' attribute +static inline vector unsigned char vec_xl(signed long long offset, const ubyte *addr) + +--- libswscale/ppc/yuv2rgb_altivec.c.orig 2021-04-08 21:28:40 UTC ++++ libswscale/ppc/yuv2rgb_altivec.c +@@ -284,7 +284,7 @@ static inline void cvtyuvtoRGB(SwsContext *c, vector s + * ------------------------------------------------------------------------------ + */ + +-#if !HAVE_VSX ++#if !HAVE_VSX && !defined(__clang__) + static inline vector unsigned char vec_xl(signed long long offset, const ubyte *addr) + { + const vector unsigned char *v_addr = (const vector unsigned char *) (addr + offset); diff --git a/multimedia/ffmpeg4/files/patch-svtav1 b/multimedia/ffmpeg4/files/patch-svtav1 new file mode 100644 index 000000000000..5d8893a959fa --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-svtav1 @@ -0,0 +1,437 @@ +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/04b89e8ae33b +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/64e2fb3f9d89 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/0463f5d6d56d +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/c5f314309067 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/c33b4048859a +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/a2b090da7932 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/1dddb930aaf0 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/4e47ebf38b97 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/50bc87263576 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/d794b36a7788 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/51c0b9e829be +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/e3c4442b249a +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/6fd1533057ff +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/ded0334d214f +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/70887d44ffa3 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/fe100bc556d7 +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/fb70e0611bd7 + +--- configure.orig 2021-10-24 20:47:11 UTC ++++ configure +@@ -6430,7 +6430,7 @@ enabled libsrt && require_pkg_config libsrt + enabled libssh && require_pkg_config libssh libssh libssh/sftp.h sftp_init + enabled libspeex && require_pkg_config libspeex speex speex/speex.h speex_decoder_init + enabled libsrt && require_pkg_config libsrt "srt >= 1.3.0" srt/srt.h srt_socket +-enabled libsvtav1 && require_pkg_config libsvtav1 "SvtAv1Enc >= 0.8.4" EbSvtAv1Enc.h svt_av1_enc_init_handle ++enabled libsvtav1 && require_pkg_config libsvtav1 "SvtAv1Enc >= 0.9.0" EbSvtAv1Enc.h svt_av1_enc_init_handle + enabled libtensorflow && require libtensorflow tensorflow/c/c_api.h TF_Version -ltensorflow + enabled libtesseract && require_pkg_config libtesseract tesseract tesseract/capi.h TessBaseAPICreate + enabled libtheora && require libtheora theora/theoraenc.h th_info_init -ltheoraenc -ltheoradec -logg +--- doc/encoders.texi.orig 2021-10-24 20:47:07 UTC ++++ doc/encoders.texi +@@ -1754,28 +1754,15 @@ Set the operating point tier. + @item tier + Set the operating point tier. + +-@item rc +-Set the rate control mode to use. +- +-Possible modes: +-@table @option +-@item cqp +-Constant quantizer: use fixed values of qindex (dependent on the frame type) +-throughout the stream. This mode is the default. +- +-@item vbr +-Variable bitrate: use a target bitrate for the whole stream. +- +-@item cvbr +-Constrained variable bitrate: use a target bitrate for each GOP. +-@end table +- + @item qmax + Set the maximum quantizer to use when using a bitrate mode. + + @item qmin + Set the minimum quantizer to use when using a bitrate mode. + ++@item crf ++Constant rate factor value used in crf rate control mode (0-63). ++ + @item qp + Set the quantizer used in cqp rate control mode (0-63). + +@@ -1786,14 +1773,18 @@ Set number of frames to look ahead (0-120). + Set number of frames to look ahead (0-120). + + @item preset +-Set the quality-speed tradeoff, in the range 0 to 8. Higher values are +-faster but lower quality. Defaults to 8 (highest speed). ++Set the quality-speed tradeoff, in the range 0 to 13. Higher values are ++faster but lower quality. + + @item tile_rows + Set log2 of the number of rows of tiles to use (0-6). + + @item tile_columns + Set log2 of the number of columns of tiles to use (0-4). ++ ++@item svtav1-params ++Set SVT-AV1 options using a list of @var{key}=@var{value} pairs separated ++by ":". See the SVT-AV1 encoder user guide for a list of accepted parameters. + + @end table + +--- libavcodec/libsvtav1.c.orig 2021-10-24 20:47:07 UTC ++++ libavcodec/libsvtav1.c +@@ -60,17 +60,20 @@ typedef struct SvtContext { + EOS_STATUS eos_flag; + + // User options. ++ AVDictionary *svtav1_opts; ++#if FF_API_SVTAV1_OPTS + int hierarchical_level; + int la_depth; +- int enc_mode; +- int rc_mode; + int scd; +- int qp; + + int tier; + + int tile_columns; + int tile_rows; ++#endif ++ int enc_mode; ++ int crf; ++ int qp; + } SvtContext; + + static const struct { +@@ -151,11 +154,126 @@ static int config_enc_params(EbSvtAv1EncConfiguration + { + SvtContext *svt_enc = avctx->priv_data; + const AVPixFmtDescriptor *desc; ++ AVDictionaryEntry *en = NULL; + ++ // Update param from options ++#if FF_API_SVTAV1_OPTS ++ param->hierarchical_levels = svt_enc->hierarchical_level; ++ param->tier = svt_enc->tier; ++ param->scene_change_detection = svt_enc->scd; ++ param->tile_columns = svt_enc->tile_columns; ++ param->tile_rows = svt_enc->tile_rows; ++ ++ if (svt_enc->la_depth >= 0) ++ param->look_ahead_distance = svt_enc->la_depth; ++#endif ++ ++ if (svt_enc->enc_mode >= 0) ++ param->enc_mode = svt_enc->enc_mode; ++ ++ if (avctx->bit_rate) { ++ param->target_bit_rate = avctx->bit_rate; ++ if (avctx->rc_max_rate != avctx->bit_rate) ++ param->rate_control_mode = 1; ++ else ++ param->rate_control_mode = 2; ++ ++ param->max_qp_allowed = avctx->qmax; ++ param->min_qp_allowed = avctx->qmin; ++ } ++ param->max_bit_rate = avctx->rc_max_rate; ++ param->vbv_bufsize = avctx->rc_buffer_size; ++ ++ if (svt_enc->crf > 0) { ++ param->qp = svt_enc->crf; ++ param->rate_control_mode = 0; ++ } else if (svt_enc->qp > 0) { ++ param->qp = svt_enc->qp; ++ param->rate_control_mode = 0; ++ param->enable_adaptive_quantization = 0; ++ } ++ ++ desc = av_pix_fmt_desc_get(avctx->pix_fmt); ++ param->color_primaries = avctx->color_primaries; ++ param->matrix_coefficients = (desc->flags & AV_PIX_FMT_FLAG_RGB) ? ++ AVCOL_SPC_RGB : avctx->colorspace; ++ param->transfer_characteristics = avctx->color_trc; ++ ++ if (avctx->color_range != AVCOL_RANGE_UNSPECIFIED) ++ param->color_range = avctx->color_range == AVCOL_RANGE_JPEG; ++ else ++ param->color_range = !!(desc->flags & AV_PIX_FMT_FLAG_RGB); ++ ++#if SVT_AV1_CHECK_VERSION(1, 0, 0) ++ if (avctx->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED) { ++ const char *name = ++ av_chroma_location_name(avctx->chroma_sample_location); ++ ++ switch (avctx->chroma_sample_location) { ++ case AVCHROMA_LOC_LEFT: ++ param->chroma_sample_position = EB_CSP_VERTICAL; ++ break; ++ case AVCHROMA_LOC_TOPLEFT: ++ param->chroma_sample_position = EB_CSP_COLOCATED; ++ break; ++ default: ++ if (!name) ++ break; ++ ++ av_log(avctx, AV_LOG_WARNING, ++ "Specified chroma sample location %s is unsupported " ++ "on the AV1 bit stream level. Usage of a container that " ++ "allows passing this information - such as Matroska - " ++ "is recommended.\n", ++ name); ++ break; ++ } ++ } ++#endif ++ ++ if (avctx->profile != FF_PROFILE_UNKNOWN) ++ param->profile = avctx->profile; ++ ++ if (avctx->level != FF_LEVEL_UNKNOWN) ++ param->level = avctx->level; ++ ++ if (avctx->gop_size > 0) ++ param->intra_period_length = avctx->gop_size - 1; ++ ++ if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { ++ param->frame_rate_numerator = avctx->framerate.num; ++ param->frame_rate_denominator = avctx->framerate.den; ++ } else { ++ param->frame_rate_numerator = avctx->time_base.den; ++ param->frame_rate_denominator = avctx->time_base.num * avctx->ticks_per_frame; ++ } ++ ++ /* 2 = IDR, closed GOP, 1 = CRA, open GOP */ ++ param->intra_refresh_type = avctx->flags & AV_CODEC_FLAG_CLOSED_GOP ? 2 : 1; ++ ++#if SVT_AV1_CHECK_VERSION(0, 9, 1) ++ while ((en = av_dict_get(svt_enc->svtav1_opts, "", en, AV_DICT_IGNORE_SUFFIX))) { ++ EbErrorType ret = svt_av1_enc_parse_parameter(param, en->key, en->value); ++ if (ret != EB_ErrorNone) { ++ int level = (avctx->err_recognition & AV_EF_EXPLODE) ? AV_LOG_ERROR : AV_LOG_WARNING; ++ av_log(avctx, level, "Error parsing option %s: %s.\n", en->key, en->value); ++ if (avctx->err_recognition & AV_EF_EXPLODE) ++ return AVERROR(EINVAL); ++ } ++ } ++#else ++ if ((en = av_dict_get(svt_enc->svtav1_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { ++ int level = (avctx->err_recognition & AV_EF_EXPLODE) ? AV_LOG_ERROR : AV_LOG_WARNING; ++ av_log(avctx, level, "svt-params needs libavcodec to be compiled with SVT-AV1 " ++ "headers >= 0.9.1.\n"); ++ if (avctx->err_recognition & AV_EF_EXPLODE) ++ return AVERROR(ENOSYS); ++ } ++#endif ++ + param->source_width = avctx->width; + param->source_height = avctx->height; + +- desc = av_pix_fmt_desc_get(avctx->pix_fmt); + param->encoder_bit_depth = desc->comp[0].depth; + + if (desc->log2_chroma_w == 1 && desc->log2_chroma_h == 1) +@@ -169,12 +287,6 @@ static int config_enc_params(EbSvtAv1EncConfiguration + return AVERROR(EINVAL); + } + +- if (avctx->profile != FF_PROFILE_UNKNOWN) +- param->profile = avctx->profile; +- +- if (avctx->level != FF_LEVEL_UNKNOWN) +- param->level = avctx->level; +- + if ((param->encoder_color_format == EB_YUV422 || param->encoder_bit_depth > 10) + && param->profile != FF_PROFILE_AV1_PROFESSIONAL ) { + av_log(avctx, AV_LOG_WARNING, "Forcing Professional profile\n"); +@@ -184,40 +296,21 @@ static int config_enc_params(EbSvtAv1EncConfiguration + param->profile = FF_PROFILE_AV1_HIGH; + } + +- // Update param from options +- param->hierarchical_levels = svt_enc->hierarchical_level; +- param->enc_mode = svt_enc->enc_mode; +- param->tier = svt_enc->tier; +- param->rate_control_mode = svt_enc->rc_mode; +- param->scene_change_detection = svt_enc->scd; +- param->qp = svt_enc->qp; ++ avctx->bit_rate = param->rate_control_mode > 0 ? ++ param->target_bit_rate : 0; ++ avctx->rc_max_rate = param->max_bit_rate; ++ avctx->rc_buffer_size = param->vbv_bufsize; + +- param->target_bit_rate = avctx->bit_rate; ++ if (avctx->bit_rate || avctx->rc_max_rate || avctx->rc_buffer_size) { ++ AVCPBProperties *cpb_props = ff_add_cpb_side_data(avctx); ++ if (!cpb_props) ++ return AVERROR(ENOMEM); + +- if (avctx->gop_size > 0) +- param->intra_period_length = avctx->gop_size - 1; +- +- if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { +- param->frame_rate_numerator = avctx->framerate.num; +- param->frame_rate_denominator = avctx->framerate.den; +- } else { +- param->frame_rate_numerator = avctx->time_base.den; +- param->frame_rate_denominator = avctx->time_base.num * avctx->ticks_per_frame; ++ cpb_props->buffer_size = avctx->rc_buffer_size; ++ cpb_props->max_bitrate = avctx->rc_max_rate; ++ cpb_props->avg_bitrate = avctx->bit_rate; + } + +- if (param->rate_control_mode) { +- param->max_qp_allowed = avctx->qmax; +- param->min_qp_allowed = avctx->qmin; +- } +- +- param->intra_refresh_type = 2; /* Real keyframes only */ +- +- if (svt_enc->la_depth >= 0) +- param->look_ahead_distance = svt_enc->la_depth; +- +- param->tile_columns = svt_enc->tile_columns; +- param->tile_rows = svt_enc->tile_rows; +- + return 0; + } + +@@ -330,11 +423,8 @@ static int eb_send_frame(AVCodecContext *avctx, const + if (svt_enc->eos_flag == EOS_SENT) + return 0; + +- headerPtrLast.n_alloc_len = 0; +- headerPtrLast.n_filled_len = 0; +- headerPtrLast.n_tick_count = 0; +- headerPtrLast.p_app_private = NULL; +- headerPtrLast.p_buffer = NULL; ++ memset(&headerPtrLast, 0, sizeof(headerPtrLast)); ++ headerPtrLast.pic_type = EB_AV1_INVALID_PICTURE; + headerPtrLast.flags = EB_BUFFERFLAG_EOS; + + svt_av1_enc_send_picture(svt_enc->svt_handle, &headerPtrLast); +@@ -350,6 +440,16 @@ static int eb_send_frame(AVCodecContext *avctx, const + headerPtr->p_app_private = NULL; + headerPtr->pts = frame->pts; + ++ switch (frame->pict_type) { ++ case AV_PICTURE_TYPE_I: ++ headerPtr->pic_type = EB_AV1_KEY_PICTURE; ++ break; ++ default: ++ // Actually means auto, or default. ++ headerPtr->pic_type = EB_AV1_INVALID_PICTURE; ++ break; ++ } ++ + svt_av1_enc_send_picture(svt_enc->svt_handle, headerPtr); + + return 0; +@@ -472,21 +572,22 @@ static const AVOption options[] = { + #define OFFSET(x) offsetof(SvtContext, x) + #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM + static const AVOption options[] = { +- { "hielevel", "Hierarchical prediction levels setting", OFFSET(hierarchical_level), +- AV_OPT_TYPE_INT, { .i64 = 4 }, 3, 4, VE , "hielevel"}, ++#if FF_API_SVTAV1_OPTS ++ { "hielevel", "Hierarchical prediction levels setting (Deprecated, use svtav1-params)", OFFSET(hierarchical_level), ++ AV_OPT_TYPE_INT, { .i64 = 4 }, 3, 4, VE | AV_OPT_FLAG_DEPRECATED , "hielevel"}, + { "3level", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3 }, INT_MIN, INT_MAX, VE, "hielevel" }, + { "4level", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 4 }, INT_MIN, INT_MAX, VE, "hielevel" }, + +- { "la_depth", "Look ahead distance [0, 120]", OFFSET(la_depth), +- AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 120, VE }, ++ { "la_depth", "Look ahead distance [0, 120] (Deprecated, use svtav1-params)", OFFSET(la_depth), ++ AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 120, VE | AV_OPT_FLAG_DEPRECATED }, + +- { "preset", "Encoding preset [0, 8]", +- OFFSET(enc_mode), AV_OPT_TYPE_INT, { .i64 = MAX_ENC_PRESET }, 0, MAX_ENC_PRESET, VE }, +- +- { "tier", "Set operating point tier", OFFSET(tier), +- AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE, "tier" }, ++ { "tier", "Set operating point tier (Deprecated, use svtav1-params)", OFFSET(tier), ++ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE | AV_OPT_FLAG_DEPRECATED, "tier" }, + { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, VE, "tier" }, + { "high", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, VE, "tier" }, ++#endif ++ { "preset", "Encoding preset", ++ OFFSET(enc_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, MAX_ENC_PRESET, VE }, + + FF_AV1_PROFILE_OPTS + +@@ -518,21 +619,20 @@ static const AVOption options[] = { + { LEVEL("7.3", 73) }, + #undef LEVEL + +- { "rc", "Bit rate control mode", OFFSET(rc_mode), +- AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 3, VE , "rc"}, +- { "cqp", "Constant quantizer", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "rc" }, +- { "vbr", "Variable Bit Rate, use a target bitrate for the entire stream", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "rc" }, +- { "cvbr", "Constrained Variable Bit Rate, use a target bitrate for each GOP", 0, AV_OPT_TYPE_CONST,{ .i64 = 2 }, INT_MIN, INT_MAX, VE, "rc" }, ++ { "crf", "Constant Rate Factor value", OFFSET(crf), ++ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 63, VE }, ++ { "qp", "Initial Quantizer level value", OFFSET(qp), ++ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 63, VE }, ++#if FF_API_SVTAV1_OPTS ++ { "sc_detection", "Scene change detection (Deprecated, use svtav1-params)", OFFSET(scd), ++ AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE | AV_OPT_FLAG_DEPRECATED }, + +- { "qp", "Quantizer to use with cqp rate control mode", OFFSET(qp), +- AV_OPT_TYPE_INT, { .i64 = 50 }, 0, 63, VE }, ++ { "tile_columns", "Log2 of number of tile columns to use (Deprecated, use svtav1-params)", OFFSET(tile_columns), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 4, VE | AV_OPT_FLAG_DEPRECATED }, ++ { "tile_rows", "Log2 of number of tile rows to use (Deprecated, use svtav1-params)", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 6, VE | AV_OPT_FLAG_DEPRECATED }, ++#endif + +- { "sc_detection", "Scene change detection", OFFSET(scd), +- AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, ++ { "svtav1-params", "Set the SVT-AV1 configuration using a :-separated list of key=value parameters", OFFSET(svtav1_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE }, + +- { "tile_columns", "Log2 of number of tile columns to use", OFFSET(tile_columns), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, VE}, +- { "tile_rows", "Log2 of number of tile rows to use", OFFSET(tile_rows), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 6, VE}, +- + {NULL}, + }; + +@@ -544,9 +644,10 @@ static const AVCodecDefault eb_enc_defaults[] = { + }; + + static const AVCodecDefault eb_enc_defaults[] = { +- { "b", "7M" }, ++ { "b", "0" }, ++ { "flags", "+cgop" }, + { "g", "-1" }, +- { "qmin", "0" }, ++ { "qmin", "1" }, + { "qmax", "63" }, + { NULL }, + }; +@@ -561,12 +662,11 @@ AVCodec ff_libsvtav1_encoder = { + .receive_packet = eb_receive_packet, + .close = eb_enc_close, + .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS, +- .caps_internal = FF_CODEC_CAP_AUTO_THREADS, ++ .caps_internal = FF_CODEC_CAP_AUTO_THREADS | FF_CODEC_CAP_INIT_CLEANUP, + .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV420P10, + AV_PIX_FMT_NONE }, + .priv_class = &class, + .defaults = eb_enc_defaults, +- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, + .wrapper_name = "libsvtav1", + }; +--- libavcodec/version.h.orig 2021-10-24 20:47:07 UTC ++++ libavcodec/version.h +@@ -168,5 +168,8 @@ + #ifndef FF_API_INIT_PACKET + #define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 60) + #endif ++#ifndef FF_API_SVTAV1_OPTS ++#define FF_API_SVTAV1_OPTS (LIBAVCODEC_VERSION_MAJOR < 60) ++#endif + + #endif /* AVCODEC_VERSION_H */ diff --git a/multimedia/ffmpeg4/files/patch-vmaf b/multimedia/ffmpeg4/files/patch-vmaf new file mode 100644 index 000000000000..abe772274d3c --- /dev/null +++ b/multimedia/ffmpeg4/files/patch-vmaf @@ -0,0 +1,951 @@ +https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/3d29724c008d + +--- configure.orig 2021-10-24 20:47:11 UTC ++++ configure +@@ -3663,7 +3663,7 @@ vidstabtransform_filter_deps="libvidstab" + vaguedenoiser_filter_deps="gpl" + vidstabdetect_filter_deps="libvidstab" + vidstabtransform_filter_deps="libvidstab" +-libvmaf_filter_deps="libvmaf pthreads" ++libvmaf_filter_deps="libvmaf" + zmq_filter_deps="libzmq" + zoompan_filter_deps="swscale" + zscale_filter_deps="libzimg const_nan" +@@ -6441,7 +6441,7 @@ enabled libvidstab && require_pkg_config libvid + enabled libuavs3d && require_pkg_config libuavs3d "uavs3d >= 1.1.41" uavs3d.h uavs3d_decode + enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl + enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit +-enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 1.5.2" libvmaf.h compute_vmaf ++enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 2.0.0" libvmaf.h vmaf_init + enabled libvo_amrwbenc && require libvo_amrwbenc vo-amrwbenc/enc_if.h E_IF_init -lvo-amrwbenc + enabled libvorbis && require_pkg_config libvorbis vorbis vorbis/codec.h vorbis_info_init && + require_pkg_config libvorbisenc vorbisenc vorbis/vorbisenc.h vorbis_encode_init +--- doc/filters.texi.orig 2021-10-24 20:47:07 UTC ++++ doc/filters.texi +@@ -13867,66 +13867,58 @@ ffmpeg -i input.mov -vf lensfun=make=Canon:model="Cano + + @section libvmaf + +-Obtain the VMAF (Video Multi-Method Assessment Fusion) +-score between two input videos. ++Calulate the VMAF (Video Multi-Method Assessment Fusion) score for a ++reference/distorted pair of input videos. + + The obtained VMAF score is printed through the logging system. + + It requires Netflix's vmaf library (libvmaf) as a pre-requisite. + After installing the library it can be enabled using: + @code{./configure --enable-libvmaf}. +-If no model path is specified it uses the default model: @code{vmaf_v0.6.1.pkl}. + + The filter has following options: + + @table @option ++@item model ++A `|` delimited list of vmaf models. Each model can be configured with a number of parameters. ++Default value: @code{"version=vmaf_v0.6.1"} ++ + @item model_path +-Set the model path which is to be used for SVM. +-Default value: @code{"/usr/local/share/model/vmaf_v0.6.1.pkl"} ++Deprecated, use model='path=...'. + +-@item log_path +-Set the file path to be used to store logs. +- +-@item log_fmt +-Set the format of the log file (csv, json or xml). +- + @item enable_transform +-This option can enable/disable the @code{score_transform} applied to the final predicted VMAF score, +-if you have specified score_transform option in the input parameter file passed to @code{run_vmaf_training.py} +-Default value: @code{false} ++Deprecated, use model='enable_transform=true'. + + @item phone_model +-Invokes the phone model which will generate VMAF scores higher than in the +-regular model, which is more suitable for laptop, TV, etc. viewing conditions. +-Default value: @code{false} ++Deprecated, use model='enable_transform=true'. + ++@item enable_conf_interval ++Deprecated, use model='enable_conf_interval=true'. ++ ++@item feature ++A `|` delimited list of features. Each feature can be configured with a number of parameters. ++ + @item psnr +-Enables computing psnr along with vmaf. +-Default value: @code{false} ++Deprecated, use feature='name=psnr'. + + @item ssim +-Enables computing ssim along with vmaf. +-Default value: @code{false} ++Deprecated, use feature='name=ssim'. + + @item ms_ssim +-Enables computing ms_ssim along with vmaf. +-Default value: @code{false} ++Deprecated, use feature='name=ms_ssim'. + +-@item pool +-Set the pool method to be used for computing vmaf. +-Options are @code{min}, @code{harmonic_mean} or @code{mean} (default). ++@item log_path ++Set the file path to be used to store log files. + ++@item log_fmt ++Set the format of the log file (xml, json, csv, or sub). ++ + @item n_threads +-Set number of threads to be used when computing vmaf. +-Default value: @code{0}, which makes use of all available logical processors. ++Set number of threads to be used when initializing libvmaf. ++Default value: @code{0}, no threads. + + @item n_subsample +-Set interval for frame subsampling used when computing vmaf. +-Default value: @code{1} +- +-@item enable_conf_interval +-Enables confidence interval. +-Default value: @code{false} ++Set frame subsampling interval to be used. + @end table + + This filter also supports the @ref{framesync} options. +@@ -13934,23 +13926,31 @@ This filter also supports the @ref{framesync} options. + @subsection Examples + @itemize + @item +-On the below examples the input file @file{main.mpg} being processed is +-compared with the reference file @file{ref.mpg}. ++In the examples below, a distorted video @file{distorted.mpg} is ++compared with a reference file @file{reference.mpg}. + ++@item ++Basic usage: + @example +-ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf -f null - ++ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf=log_path=output.xml -f null - + @end example + + @item +-Example with options: ++Example with multiple models: + @example +-ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf="psnr=1:log_fmt=json" -f null - ++ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf='model=version=vmaf_v0.6.1\\:name=vmaf|version=vmaf_v0.6.1neg\\:name=vmaf_neg' -f null - + @end example + + @item ++Example with multiple addtional features: ++@example ++ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf='feature=name=psnr|name=ciede' -f null - ++@end example ++ ++@item + Example with options and different containers: + @example +-ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=psnr=1:log_fmt=json" -f null - ++ffmpeg -i distorted.mpg -i reference.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=log_fmt=json:log_path=output.json" -f null - + @end example + @end itemize + +--- libavfilter/vf_libvmaf.c.orig 2021-10-24 20:47:07 UTC ++++ libavfilter/vf_libvmaf.c +@@ -24,8 +24,8 @@ + * Calculate the VMAF between two input videos. + */ + +-#include <pthread.h> + #include <libvmaf.h> ++ + #include "libavutil/avstring.h" + #include "libavutil/opt.h" + #include "libavutil/pixdesc.h" +@@ -39,23 +39,9 @@ typedef struct LIBVMAFContext { + typedef struct LIBVMAFContext { + const AVClass *class; + FFFrameSync fs; +- const AVPixFmtDescriptor *desc; +- int width; +- int height; +- double vmaf_score; +- int vmaf_thread_created; +- pthread_t vmaf_thread; +- pthread_mutex_t lock; +- pthread_cond_t cond; +- int eof; +- AVFrame *gmain; +- AVFrame *gref; +- int frame_set; + char *model_path; + char *log_path; + char *log_fmt; +- int disable_clip; +- int disable_avx; + int enable_transform; + int phone_model; + int psnr; +@@ -65,185 +51,488 @@ typedef struct LIBVMAFContext { + int n_threads; + int n_subsample; + int enable_conf_interval; +- int error; ++ char *model_cfg; ++ char *feature_cfg; ++ VmafContext *vmaf; ++ VmafModel **model; ++ unsigned model_cnt; ++ unsigned frame_cnt; ++ unsigned bpc; + } LIBVMAFContext; + + #define OFFSET(x) offsetof(LIBVMAFContext, x) + #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + + static const AVOption libvmaf_options[] = { +- {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS}, +- {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS}, +- {"log_fmt", "Set the format of the log (csv, json or xml).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS}, +- {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, +- {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, +- {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, +- {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, +- {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, ++ {"model_path", "use model='path=...'.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED}, ++ {"log_path", "Set the file path to be used to write log.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS}, ++ {"log_fmt", "Set the format of the log (csv, json, xml, or sub).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str="xml"}, 0, 1, FLAGS}, ++ {"enable_transform", "use model='enable_transform=true'.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED}, ++ {"phone_model", "use model='enable_transform=true'.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED}, ++ {"psnr", "use feature='name=psnr'.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED}, ++ {"ssim", "use feature='name=ssim'.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED}, ++ {"ms_ssim", "use feature='name=ms_ssim'.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED}, + {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS}, + {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS}, + {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS}, +- {"enable_conf_interval", "Enables confidence interval.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, ++ {"enable_conf_interval", "model='enable_conf_interval=true'.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED}, ++ {"model", "Set the model to be used for computing vmaf.", OFFSET(model_cfg), AV_OPT_TYPE_STRING, {.str="version=vmaf_v0.6.1"}, 0, 1, FLAGS}, ++ {"feature", "Set the feature to be used for computing vmaf.", OFFSET(feature_cfg), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS}, + { NULL } + }; + + FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs); + +-#define read_frame_fn(type, bits) \ +- static int read_frame_##bits##bit(float *ref_data, float *main_data, \ +- float *temp_data, int stride, void *ctx) \ +-{ \ +- LIBVMAFContext *s = (LIBVMAFContext *) ctx; \ +- int ret; \ +- \ +- pthread_mutex_lock(&s->lock); \ +- \ +- while (!s->frame_set && !s->eof) { \ +- pthread_cond_wait(&s->cond, &s->lock); \ +- } \ +- \ +- if (s->frame_set) { \ +- int ref_stride = s->gref->linesize[0]; \ +- int main_stride = s->gmain->linesize[0]; \ +- \ +- const type *ref_ptr = (const type *) s->gref->data[0]; \ +- const type *main_ptr = (const type *) s->gmain->data[0]; \ +- \ +- float *ptr = ref_data; \ +- float factor = 1.f / (1 << (bits - 8)); \ +- \ +- int h = s->height; \ +- int w = s->width; \ +- \ +- int i,j; \ +- \ +- for (i = 0; i < h; i++) { \ +- for ( j = 0; j < w; j++) { \ +- ptr[j] = ref_ptr[j] * factor; \ +- } \ +- ref_ptr += ref_stride / sizeof(*ref_ptr); \ +- ptr += stride / sizeof(*ptr); \ +- } \ +- \ +- ptr = main_data; \ +- \ +- for (i = 0; i < h; i++) { \ +- for (j = 0; j < w; j++) { \ +- ptr[j] = main_ptr[j] * factor; \ +- } \ +- main_ptr += main_stride / sizeof(*main_ptr); \ +- ptr += stride / sizeof(*ptr); \ +- } \ +- } \ +- \ +- ret = !s->frame_set; \ +- \ +- av_frame_unref(s->gref); \ +- av_frame_unref(s->gmain); \ +- s->frame_set = 0; \ +- \ +- pthread_cond_signal(&s->cond); \ +- pthread_mutex_unlock(&s->lock); \ +- \ +- if (ret) { \ +- return 2; \ +- } \ +- \ +- return 0; \ ++static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt) ++{ ++ switch (av_pix_fmt) { ++ case AV_PIX_FMT_YUV420P: ++ case AV_PIX_FMT_YUV420P10LE: ++ case AV_PIX_FMT_YUV420P12LE: ++ case AV_PIX_FMT_YUV420P16LE: ++ return VMAF_PIX_FMT_YUV420P; ++ case AV_PIX_FMT_YUV422P: ++ case AV_PIX_FMT_YUV422P10LE: ++ case AV_PIX_FMT_YUV422P12LE: ++ case AV_PIX_FMT_YUV422P16LE: ++ return VMAF_PIX_FMT_YUV422P; ++ case AV_PIX_FMT_YUV444P: ++ case AV_PIX_FMT_YUV444P10LE: ++ case AV_PIX_FMT_YUV444P12LE: ++ case AV_PIX_FMT_YUV444P16LE: ++ return VMAF_PIX_FMT_YUV444P; ++ default: ++ return VMAF_PIX_FMT_UNKNOWN; ++ } + } + +-read_frame_fn(uint8_t, 8); +-read_frame_fn(uint16_t, 10); ++static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc) ++{ ++ int err = vmaf_picture_alloc(dst, pix_fmt_map(src->format), bpc, ++ src->width, src->height); ++ if (err) ++ return AVERROR(ENOMEM); + +-static void compute_vmaf_score(LIBVMAFContext *s) ++ for (unsigned i = 0; i < 3; i++) { ++ uint8_t *src_data = src->data[i]; ++ uint8_t *dst_data = dst->data[i]; ++ for (unsigned j = 0; j < dst->h[i]; j++) { ++ memcpy(dst_data, src_data, sizeof(*dst_data) * dst->w[i]); ++ src_data += src->linesize[i]; ++ dst_data += dst->stride[i]; ++ } ++ } ++ ++ return 0; ++} ++ ++static int do_vmaf(FFFrameSync *fs) + { +- int (*read_frame)(float *ref_data, float *main_data, float *temp_data, +- int stride, void *ctx); +- char *format; ++ AVFilterContext *ctx = fs->parent; ++ LIBVMAFContext *s = ctx->priv; ++ VmafPicture pic_ref, pic_dist; ++ AVFrame *ref, *dist; ++ int err = 0; + +- if (s->desc->comp[0].depth <= 8) { +- read_frame = read_frame_8bit; +- } else { +- read_frame = read_frame_10bit; ++ int ret = ff_framesync_dualinput_get(fs, &dist, &ref); ++ if (ret < 0) ++ return ret; ++ if (ctx->is_disabled || !ref) ++ return ff_filter_frame(ctx->outputs[0], dist); ++ ++ err = copy_picture_data(ref, &pic_ref, s->bpc); ++ if (err) { ++ av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n"); ++ return AVERROR(ENOMEM); + } + +- format = (char *) s->desc->name; ++ err = copy_picture_data(dist, &pic_dist, s->bpc); ++ if (err) { ++ av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n"); ++ vmaf_picture_unref(&pic_ref); ++ return AVERROR(ENOMEM); ++ } + +- s->error = compute_vmaf(&s->vmaf_score, format, s->width, s->height, +- read_frame, s, s->model_path, s->log_path, +- s->log_fmt, 0, 0, s->enable_transform, +- s->phone_model, s->psnr, s->ssim, +- s->ms_ssim, s->pool, +- s->n_threads, s->n_subsample, s->enable_conf_interval); ++ err = vmaf_read_pictures(s->vmaf, &pic_ref, &pic_dist, s->frame_cnt++); ++ if (err) { ++ av_log(s, AV_LOG_ERROR, "problem during vmaf_read_pictures.\n"); ++ return AVERROR(EINVAL); ++ } ++ ++ return ff_filter_frame(ctx->outputs[0], dist); + } + +-static void *call_vmaf(void *ctx) ++ ++static AVDictionary **delimited_dict_parse(char *str, unsigned *cnt) + { +- LIBVMAFContext *s = (LIBVMAFContext *) ctx; +- compute_vmaf_score(s); +- if (!s->error) { +- av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score); +- } else { +- pthread_mutex_lock(&s->lock); +- pthread_cond_signal(&s->cond); +- pthread_mutex_unlock(&s->lock); ++ AVDictionary **dict = NULL; ++ char *str_copy = NULL; ++ char *saveptr = NULL; ++ unsigned cnt2; ++ int err = 0; ++ ++ if (!str) ++ return NULL; ++ ++ cnt2 = 1; ++ for (char *p = str; *p; p++) { ++ if (*p == '|') ++ cnt2++; + } +- pthread_exit(NULL); ++ ++ dict = av_calloc(cnt2, sizeof(*dict)); ++ if (!dict) ++ goto fail; ++ ++ str_copy = av_strdup(str); ++ if (!str_copy) ++ goto fail; ++ ++ *cnt = 0; ++ for (unsigned i = 0; i < cnt2; i++) { ++ char *s = av_strtok(i == 0 ? str_copy : NULL, "|", &saveptr); ++ if (!s) ++ continue; ++ err = av_dict_parse_string(&dict[(*cnt)++], s, "=", ":", 0); ++ if (err) ++ goto fail; ++ } ++ ++ av_free(str_copy); ++ return dict; ++ ++fail: ++ if (dict) { ++ for (unsigned i = 0; i < *cnt; i++) { ++ if (dict[i]) ++ av_dict_free(&dict[i]); ++ } ++ av_free(dict); ++ } ++ ++ av_free(str_copy); ++ *cnt = 0; + return NULL; + } + +-static int do_vmaf(FFFrameSync *fs) ++static int parse_features(AVFilterContext *ctx) + { +- AVFilterContext *ctx = fs->parent; + LIBVMAFContext *s = ctx->priv; +- AVFrame *master, *ref; +- int ret; ++ AVDictionary **dict = NULL; ++ unsigned dict_cnt; ++ int err = 0; + +- ret = ff_framesync_dualinput_get(fs, &master, &ref); +- if (ret < 0) +- return ret; +- if (!ref) +- return ff_filter_frame(ctx->outputs[0], master); ++ if (!s->feature_cfg) ++ return 0; + +- pthread_mutex_lock(&s->lock); ++ dict = delimited_dict_parse(s->feature_cfg, &dict_cnt); ++ if (!dict) { ++ av_log(ctx, AV_LOG_ERROR, ++ "could not parse feature config: %s\n", s->feature_cfg); ++ return AVERROR(EINVAL); ++ } + +- while (s->frame_set && !s->error) { +- pthread_cond_wait(&s->cond, &s->lock); ++ for (unsigned i = 0; i < dict_cnt; i++) { ++ char *feature_name = NULL; ++ VmafFeatureDictionary *feature_opts_dict = NULL; ++ AVDictionaryEntry *e = NULL; ++ ++ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) { ++ if (av_stristr(e->key, "name")) { ++ feature_name = e->value; ++ continue; ++ } ++ ++ err = vmaf_feature_dictionary_set(&feature_opts_dict, e->key, ++ e->value); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "could not set feature option: %s.%s=%s\n", ++ feature_name, e->key, e->value); ++ goto exit; ++ } ++ } ++ ++ err = vmaf_use_feature(s->vmaf, feature_name, feature_opts_dict); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem during vmaf_use_feature: %s\n", feature_name); ++ goto exit; ++ } + } + +- if (s->error) { ++exit: ++ for (unsigned i = 0; i < dict_cnt; i++) { ++ if (dict[i]) ++ av_dict_free(&dict[i]); ++ } ++ av_free(dict); ++ return err; ++} ++ ++static int parse_models(AVFilterContext *ctx) ++{ ++ LIBVMAFContext *s = ctx->priv; ++ AVDictionary **dict; ++ unsigned dict_cnt; ++ int err = 0; ++ ++ if (!s->model_cfg) return 0; ++ ++ dict_cnt = 0; ++ dict = delimited_dict_parse(s->model_cfg, &dict_cnt); ++ if (!dict) { + av_log(ctx, AV_LOG_ERROR, +- "libvmaf encountered an error, check log for details\n"); +- pthread_mutex_unlock(&s->lock); ++ "could not parse model config: %s\n", s->model_cfg); + return AVERROR(EINVAL); + } + +- av_frame_ref(s->gref, ref); +- av_frame_ref(s->gmain, master); ++ s->model_cnt = dict_cnt; ++ s->model = av_calloc(s->model_cnt, sizeof(*s->model)); ++ if (!s->model) ++ return AVERROR(ENOMEM); + +- s->frame_set = 1; ++ for (unsigned i = 0; i < dict_cnt; i++) { ++ VmafModelConfig model_cfg = { 0 }; ++ AVDictionaryEntry *e = NULL; ++ char *version = NULL; ++ char *path = NULL; + +- pthread_cond_signal(&s->cond); +- pthread_mutex_unlock(&s->lock); ++ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) { ++ if (av_stristr(e->key, "disable_clip")) { ++ model_cfg.flags |= av_stristr(e->value, "true") ? ++ VMAF_MODEL_FLAG_DISABLE_CLIP : 0; ++ continue; ++ } + +- return ff_filter_frame(ctx->outputs[0], master); ++ if (av_stristr(e->key, "enable_transform")) { ++ model_cfg.flags |= av_stristr(e->value, "true") ? ++ VMAF_MODEL_FLAG_ENABLE_TRANSFORM : 0; ++ continue; ++ } ++ ++ if (av_stristr(e->key, "name")) { ++ model_cfg.name = e->value; ++ continue; ++ } ++ ++ if (av_stristr(e->key, "version")) { ++ version = e->value; ++ continue; ++ } ++ ++ if (av_stristr(e->key, "path")) { ++ path = e->value; ++ continue; ++ } ++ } ++ ++ if (version) { ++ err = vmaf_model_load(&s->model[i], &model_cfg, version); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "could not load libvmaf model with version: %s\n", ++ version); ++ goto exit; ++ } ++ } ++ ++ if (path && !s->model[i]) { ++ err = vmaf_model_load_from_path(&s->model[i], &model_cfg, path); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "could not load libvmaf model with path: %s\n", ++ path); ++ goto exit; ++ } ++ } ++ ++ if (!s->model[i]) { ++ av_log(ctx, AV_LOG_ERROR, ++ "could not load libvmaf model with config: %s\n", ++ s->model_cfg); ++ goto exit; ++ } ++ ++ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) { ++ VmafFeatureDictionary *feature_opts_dict = NULL; ++ char *feature_opt = NULL; ++ ++ char *feature_name = av_strtok(e->key, ".", &feature_opt); ++ if (!feature_opt) ++ continue; ++ ++ err = vmaf_feature_dictionary_set(&feature_opts_dict, ++ feature_opt, e->value); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "could not set feature option: %s.%s=%s\n", ++ feature_name, feature_opt, e->value); ++ err = AVERROR(EINVAL); ++ goto exit; ++ } ++ ++ err = vmaf_model_feature_overload(s->model[i], feature_name, ++ feature_opts_dict); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "could not overload feature: %s\n", feature_name); ++ err = AVERROR(EINVAL); ++ goto exit; ++ } ++ } ++ } ++ ++ for (unsigned i = 0; i < s->model_cnt; i++) { ++ err = vmaf_use_features_from_model(s->vmaf, s->model[i]); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem during vmaf_use_features_from_model\n"); ++ err = AVERROR(EINVAL); ++ goto exit; ++ } ++ } ++ ++exit: ++ for (unsigned i = 0; i < dict_cnt; i++) { ++ if (dict[i]) ++ av_dict_free(&dict[i]); ++ } ++ av_free(dict); ++ return err; + } + ++static enum VmafLogLevel log_level_map(int log_level) ++{ ++ switch (log_level) { ++ case AV_LOG_QUIET: ++ return VMAF_LOG_LEVEL_NONE; ++ case AV_LOG_ERROR: ++ return VMAF_LOG_LEVEL_ERROR; ++ case AV_LOG_WARNING: ++ return VMAF_LOG_LEVEL_WARNING; ++ case AV_LOG_INFO: ++ return VMAF_LOG_LEVEL_INFO; ++ case AV_LOG_DEBUG: ++ return VMAF_LOG_LEVEL_DEBUG; ++ default: ++ return VMAF_LOG_LEVEL_INFO; ++ } ++} ++ ++static int parse_deprecated_options(AVFilterContext *ctx) ++{ ++ LIBVMAFContext *s = ctx->priv; ++ VmafModel *model = NULL; ++ VmafModelCollection *model_collection = NULL; ++ enum VmafModelFlags flags = VMAF_MODEL_FLAGS_DEFAULT; ++ int err = 0; ++ ++ VmafModelConfig model_cfg = { ++ .name = "vmaf", ++ .flags = flags, ++ }; ++ ++ if (s->enable_transform || s->phone_model) ++ flags |= VMAF_MODEL_FLAG_ENABLE_TRANSFORM; ++ ++ if (!s->model_path) ++ goto extra_metrics_only; ++ ++ if (s->enable_conf_interval) { ++ err = vmaf_model_collection_load_from_path(&model, &model_collection, ++ &model_cfg, s->model_path); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem loading model file: %s\n", s->model_path); ++ goto exit; ++ } ++ ++ err = vmaf_use_features_from_model_collection(s->vmaf, model_collection); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem loading feature extractors from model file: %s\n", ++ s->model_path); ++ goto exit; ++ } ++ } else { ++ err = vmaf_model_load_from_path(&model, &model_cfg, s->model_path); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem loading model file: %s\n", s->model_path); ++ goto exit; ++ } ++ err = vmaf_use_features_from_model(s->vmaf, model); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem loading feature extractors from model file: %s\n", ++ s->model_path); ++ goto exit; ++ } ++ } ++ ++extra_metrics_only: ++ if (s->psnr) { ++ VmafFeatureDictionary *d = NULL; ++ vmaf_feature_dictionary_set(&d, "enable_chroma", "false"); ++ ++ err = vmaf_use_feature(s->vmaf, "psnr", d); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem loading feature extractor: psnr\n"); ++ goto exit; ++ } ++ } ++ ++ if (s->ssim) { ++ err = vmaf_use_feature(s->vmaf, "float_ssim", NULL); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem loading feature extractor: ssim\n"); ++ goto exit; ++ } ++ } ++ ++ if (s->ms_ssim) { ++ err = vmaf_use_feature(s->vmaf, "float_ms_ssim", NULL); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem loading feature extractor: ms_ssim\n"); ++ goto exit; ++ } ++ } ++ ++exit: ++ return err; ++} ++ + static av_cold int init(AVFilterContext *ctx) + { + LIBVMAFContext *s = ctx->priv; ++ int err = 0; + +- s->gref = av_frame_alloc(); +- s->gmain = av_frame_alloc(); +- if (!s->gref || !s->gmain) +- return AVERROR(ENOMEM); ++ VmafConfiguration cfg = { ++ .log_level = log_level_map(av_log_get_level()), ++ .n_subsample = s->n_subsample, ++ .n_threads = s->n_threads, ++ }; + +- s->error = 0; ++ err = vmaf_init(&s->vmaf, cfg); ++ if (err) ++ return AVERROR(EINVAL); + +- s->vmaf_thread_created = 0; +- pthread_mutex_init(&s->lock, NULL); +- pthread_cond_init (&s->cond, NULL); ++ err = parse_deprecated_options(ctx); ++ if (err) ++ return err; + ++ err = parse_models(ctx); ++ if (err) ++ return err; ++ ++ err = parse_features(ctx); ++ if (err) ++ return err; ++ + s->fs.on_event = do_vmaf; + return 0; + } +@@ -265,31 +554,36 @@ static int config_input_ref(AVFilterLink *inlink) + + static int config_input_ref(AVFilterLink *inlink) + { +- AVFilterContext *ctx = inlink->dst; ++ AVFilterContext *ctx = inlink->dst; + LIBVMAFContext *s = ctx->priv; +- int th; ++ const AVPixFmtDescriptor *desc; ++ int err = 0; + +- if (ctx->inputs[0]->w != ctx->inputs[1]->w || +- ctx->inputs[0]->h != ctx->inputs[1]->h) { +- av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n"); +- return AVERROR(EINVAL); ++ if (ctx->inputs[0]->w != ctx->inputs[1]->w) { ++ av_log(ctx, AV_LOG_ERROR, "input width must match.\n"); ++ err |= AVERROR(EINVAL); + } + if (ctx->inputs[0]->format != ctx->inputs[1]->format) { + av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n"); + return AVERROR(EINVAL); + } + +- s->desc = av_pix_fmt_desc_get(inlink->format); +- s->width = ctx->inputs[0]->w; +- s->height = ctx->inputs[0]->h; ++ if (ctx->inputs[0]->h != ctx->inputs[1]->h) { ++ av_log(ctx, AV_LOG_ERROR, "input height must match.\n"); ++ err |= AVERROR(EINVAL); ++ } + +- th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s); +- if (th) { +- av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n"); +- return AVERROR(EINVAL); ++ if (ctx->inputs[0]->format != ctx->inputs[1]->format) { ++ av_log(ctx, AV_LOG_ERROR, "input pix_fmt must match.\n"); ++ err |= AVERROR(EINVAL); + } +- s->vmaf_thread_created = 1; + ++ if (err) ++ return err; ++ ++ desc = av_pix_fmt_desc_get(inlink->format); ++ s->bpc = desc->comp[0].depth; ++ + return 0; + } + +@@ -320,28 +614,80 @@ static int activate(AVFilterContext *ctx) + return ff_framesync_activate(&s->fs); + } + ++static enum VmafOutputFormat log_fmt_map(const char *log_fmt) ++{ ++ if (log_fmt) { ++ if (av_stristr(log_fmt, "xml")) ++ return VMAF_OUTPUT_FORMAT_XML; ++ if (av_stristr(log_fmt, "json")) ++ return VMAF_OUTPUT_FORMAT_JSON; ++ if (av_stristr(log_fmt, "csv")) ++ return VMAF_OUTPUT_FORMAT_CSV; ++ if (av_stristr(log_fmt, "sub")) ++ return VMAF_OUTPUT_FORMAT_SUB; ++ } ++ ++ return VMAF_OUTPUT_FORMAT_XML; ++} ++ ++static enum VmafPoolingMethod pool_method_map(const char *pool_method) ++{ ++ if (pool_method) { ++ if (av_stristr(pool_method, "min")) ++ return VMAF_POOL_METHOD_MIN; ++ if (av_stristr(pool_method, "mean")) ++ return VMAF_POOL_METHOD_MEAN; ++ if (av_stristr(pool_method, "harmonic_mean")) ++ return VMAF_POOL_METHOD_HARMONIC_MEAN; ++ } ++ ++ return VMAF_POOL_METHOD_MEAN; ++} ++ + static av_cold void uninit(AVFilterContext *ctx) + { + LIBVMAFContext *s = ctx->priv; ++ int err = 0; + + ff_framesync_uninit(&s->fs); + +- pthread_mutex_lock(&s->lock); +- s->eof = 1; +- pthread_cond_signal(&s->cond); +- pthread_mutex_unlock(&s->lock); ++ if (!s->frame_cnt) ++ goto clean_up; + +- if (s->vmaf_thread_created) +- { +- pthread_join(s->vmaf_thread, NULL); +- s->vmaf_thread_created = 0; ++ err = vmaf_read_pictures(s->vmaf, NULL, NULL, 0); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem flushing libvmaf context.\n"); + } + +- av_frame_free(&s->gref); +- av_frame_free(&s->gmain); ++ for (unsigned i = 0; i < s->model_cnt; i++) { ++ double vmaf_score; ++ err = vmaf_score_pooled(s->vmaf, s->model[i], pool_method_map(s->pool), ++ &vmaf_score, 0, s->frame_cnt - 1); ++ if (err) { ++ av_log(ctx, AV_LOG_ERROR, ++ "problem getting pooled vmaf score.\n"); ++ } + +- pthread_mutex_destroy(&s->lock); +- pthread_cond_destroy(&s->cond); ++ av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n", vmaf_score); ++ } ++ ++ if (s->vmaf) { ++ if (s->log_path && !err) ++ vmaf_write_output(s->vmaf, s->log_path, log_fmt_map(s->log_fmt)); ++ } ++ ++clean_up: ++ if (s->model) { ++ for (unsigned i = 0; i < s->model_cnt; i++) { ++ if (s->model[i]) ++ vmaf_model_destroy(s->model[i]); ++ } ++ av_free(s->model); ++ } ++ ++ if (s->vmaf) ++ vmaf_close(s->vmaf); + } + + static const AVFilterPad libvmaf_inputs[] = { |
