mirror of
https://github.com/jellyfin/jellyfin-ffmpeg.git
synced 2025-04-18 20:24:05 +03:00
New upstream version 4.3.1
This commit is contained in:
parent
78cf502736
commit
0fe993576f
184
Changelog
184
Changelog
@ -1,73 +1,123 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 4.2.1:
|
||||
- avformat/vividas: check for tiny blocks using alignment
|
||||
- avcodec/vc1_pred: Fix refdist in scaleforopp()
|
||||
- avcodec/vorbisdec: fix FASTDIV usage for vr_type == 2
|
||||
- avcodec/iff: Check for overlap in cmap_read_palette()
|
||||
- avcodec/apedec: Fix 32bit int overflow in do_apply_filter()
|
||||
- lavf/rawenc: Only accept the appropriate stream type for raw muxers.
|
||||
- avformat/matroskadec: use av_fast_realloc to reallocate ebml list arrays
|
||||
- avformat/matroskadec: use proper types for some EbmlSyntax fields
|
||||
- avcodec/ralf: fix undefined shift in extend_code()
|
||||
- avcodec/ralf: fix undefined shift
|
||||
- avcodec/bgmc: Check input space in ff_bgmc_decode_init()
|
||||
- avcodec/vp3: Check for end of input in 2 places of vp4_unpack_macroblocks()
|
||||
- avcodec/truemotion2: Fix multiple integer overflows in tm2_null_res_block()
|
||||
- avcodec/vc1_block: Check the return code from vc1_decode_p_block()
|
||||
- avcodec/vc1dec: Require res_sprite for wmv3images
|
||||
- avcodec/vc1_block: Check for double escapes
|
||||
- avcodec/vorbisdec: Check get_vlc2() failure
|
||||
- avcodec/tta: Fix integer overflow in prediction
|
||||
- avcodec/vb: Check input packet size to be large enough to contain flags
|
||||
- avcodec/cavsdec: Limit the number of access units per packet to 2
|
||||
- avcodec/atrac9dec: Check block_align
|
||||
- avcodec/alac: Check for bps of 0
|
||||
- avcodec/alac: Fix multiple integer overflows in lpc_prediction()
|
||||
- avcodec/rl2: set dimensions
|
||||
- avcodec/aacdec: Add FF_CODEC_CAP_INIT_CLEANUP
|
||||
- avcodec/idcinvideo: Add 320x240 default maximum resolution
|
||||
- avformat/realtextdec: free queue on error
|
||||
- avcodec/vp5/6/8: use vpX_rac_is_end()
|
||||
- avformat/vividas: Check av_xiphlacing() return value before use
|
||||
- avcodec/alsdec: Fix integer overflow in decode_var_block_data()
|
||||
- avcodec/alsdec: Limit maximum channels to 512
|
||||
- avcodec/anm: Check input size for a frame with just a stop code
|
||||
- avcodec/flicvideo: Optimize and Simplify FLI_COPY in flic_decode_frame_24BPP() by using bytestream2_get_buffer()
|
||||
- avcodec/loco: Check left column value
|
||||
- avcodec/ffwavesynth: Fixes invalid shift with pink noise seeking
|
||||
- avcodec/ffwavesynth: Fix integer overflow for some corner case values
|
||||
- avcodec/indeo2: Check remaining input more often
|
||||
- avcodec/diracdec: Check that slices are fewer than pixels
|
||||
- avcodec/vp56: Consider the alpha start as end of the prior header
|
||||
- avcodec/4xm: Check for end of input in decode_p_block()
|
||||
- avcodec/hevcdec: Check delta_luma_weight_l0/1
|
||||
- avcodec/hnm4video: Optimize postprocess_current_frame()
|
||||
- avcodec/hevc_refs: Optimize 16bit generate_missing_ref()
|
||||
- avcodec/scpr: Use av_memcpy_backptr() in type 17 and 33
|
||||
- avcodec/tiff: Enforce increasing offsets
|
||||
- avcodec/dds: Use ff_set_dimensions()
|
||||
- avformat/vividas: Fix another infinite loop
|
||||
- avformat/vividas: Fix infinite loop in header parser
|
||||
- avcodec/mpc8: Fix 32bit mask/enum
|
||||
- avcodec/alsdec: Fix integer overflows of raw_samples in decode_var_block_data()
|
||||
- avcodec/alsdec: Fix integer overflow of raw_samples in decode_blocks()
|
||||
- avcodec/alsdec: fix mantisse shift
|
||||
- avcodec/pngdec: consider chunk size in minimal size check
|
||||
- avcodec/vc1_block: Fix invalid shifts in vc1_decode_i_blocks()
|
||||
- avcodec/vc1_block: fix invalid shift in vc1_decode_p_mb()
|
||||
- avcodec/aacdec_template: fix integer overflow in imdct_and_windowing()
|
||||
- avformat/mpegts: Check if ready on SCTE reception
|
||||
- avcodec/omx: fix xFramerate calculation
|
||||
- avformat/avidec: add support for recognizing HEVC fourcc when demuxing
|
||||
- avformat/mpegts: fix teletext PTS when selecting teletext streams only
|
||||
- avcodec/h2645_parse: zero initialize the rbsp buffer
|
||||
- avcodec/omx: Fix handling of fragmented buffers
|
||||
- avcodec/omx: ensure zerocopy mode can be disabled on rpi builds
|
||||
- avformat/mxfdec: do not ignore bad size errors
|
||||
- avformat/matroskadec: Fix seeking
|
||||
- ffplay: properly detect all window size changes
|
||||
version 4.3.1:
|
||||
avcodec/tiff: Check input space in dng_decode_jpeg()
|
||||
avcodec/mjpeg_parser: Adjust size rejection threshold
|
||||
avcodec/cbs_jpeg: Fix uninitialized end index in cbs_jpeg_split_fragment()
|
||||
avformat/sdp: Fix potential write beyond end of buffer
|
||||
avformat/mm: Check for existence of audio stream
|
||||
avformat/mov: Fix unaligned read of uint32_t and endian-dependance in mov_read_default
|
||||
avcodec/apedec: Fix undefined integer overflow with 24bit
|
||||
avcodec/loco: Fix integer overflow with large values from loco_get_rice()
|
||||
avformat/smjpegdec: Check the existence of referred streams
|
||||
avcodec/tiff: Check frame parameters before blit for DNG
|
||||
avcodec/mjpegdec: Limit bayer to single plane outputting format
|
||||
avcodec/pnmdec: Fix misaligned reads
|
||||
avcodec/mv30: Fix integer overflows in idct2_1d()
|
||||
avcodec/hcadec: Check total_band_count against imdct_in size
|
||||
avcodec/scpr3: Fix out of array access with dectab
|
||||
avcodec/tiff: Do not overrun the array ends in dng_blit()
|
||||
avcodec/dstdec: Replace AC overread check by sample rate check
|
||||
dnn_backend_native: Add overflow check for length calculation.
|
||||
avcodec/h264_metadata_bsf: Fix invalid av_freep
|
||||
avcodec/cbs_h265: set default VUI parameters when vui_parameters_present_flag is false
|
||||
avcodec/av1_parser: initialize avctx->pix_fmt
|
||||
avcodec/av1_parser: add missing parsing for RGB pixel format signaling
|
||||
avcodec/av1_parser: set context values outside the OBU parsing loop
|
||||
avutil/avsscanf: Add () to avoid integer overflow in scanexp()
|
||||
avformat/utils: reorder duration computation to avoid overflow
|
||||
avcodec/pngdec: Check for fctl after idat
|
||||
avformat/hls: Pass a copy of the URL for probing
|
||||
avutil/common: Fix integer overflow in av_ceil_log2_c()
|
||||
avcodec/wmalosslessdec: fix overflow with pred in revert_cdlms
|
||||
avformat/mvdec: Fix integer overflow with billions of channels
|
||||
avformat/microdvddec: skip malformed lines without frame number.
|
||||
dnn_backend_native: check operand index
|
||||
dnn_backend_native.c: refine code for fail case
|
||||
avformat/mov: fix memleaks
|
||||
libavformat/mov: Fix memleaks when demuxing DV audio
|
||||
avcodec/cbs_av1: Fix writing uvlc numbers >= INT_MAX
|
||||
avformat/avc, mxfenc: Avoid allocation of H264 SPS structure, fix memleak
|
||||
avcodec/bitstream: Don't check for undefined behaviour after it happened
|
||||
avformat/aviobuf: Also return truncated buffer in avio_get_dyn_buf()
|
||||
avformat/aviobuf: Don't check for overflow after it happened
|
||||
|
||||
version 4.3:
|
||||
- v360 filter
|
||||
- Intel QSV-accelerated MJPEG decoding
|
||||
- Intel QSV-accelerated VP9 decoding
|
||||
- Support for TrueHD in mp4
|
||||
- Support AMD AMF encoder on Linux (via Vulkan)
|
||||
- IMM5 video decoder
|
||||
- ZeroMQ protocol
|
||||
- support Sipro ACELP.KELVIN decoding
|
||||
- streamhash muxer
|
||||
- sierpinski video source
|
||||
- scroll video filter
|
||||
- photosensitivity filter
|
||||
- anlms filter
|
||||
- arnndn filter
|
||||
- bilateral filter
|
||||
- maskedmin and maskedmax filters
|
||||
- VDPAU VP9 hwaccel
|
||||
- median filter
|
||||
- QSV-accelerated VP9 encoding
|
||||
- AV1 encoding support via librav1e
|
||||
- AV1 frame merge bitstream filter
|
||||
- AV1 Annex B demuxer
|
||||
- axcorrelate filter
|
||||
- mvdv decoder
|
||||
- mvha decoder
|
||||
- MPEG-H 3D Audio support in mp4
|
||||
- thistogram filter
|
||||
- freezeframes filter
|
||||
- Argonaut Games ADPCM decoder
|
||||
- Argonaut Games ASF demuxer
|
||||
- xfade video filter
|
||||
- xfade_opencl filter
|
||||
- afirsrc audio filter source
|
||||
- pad_opencl filter
|
||||
- Simon & Schuster Interactive ADPCM decoder
|
||||
- Real War KVAG demuxer
|
||||
- CDToons video decoder
|
||||
- siren audio decoder
|
||||
- Rayman 2 ADPCM decoder
|
||||
- Rayman 2 APM demuxer
|
||||
- cas video filter
|
||||
- High Voltage Software ADPCM decoder
|
||||
- LEGO Racers ALP (.tun & .pcm) demuxer
|
||||
- AMQP 0-9-1 protocol (RabbitMQ)
|
||||
- Vulkan support
|
||||
- avgblur_vulkan, overlay_vulkan, scale_vulkan and chromaber_vulkan filters
|
||||
- ADPCM IMA MTF decoder
|
||||
- FWSE demuxer
|
||||
- DERF DPCM decoder
|
||||
- DERF demuxer
|
||||
- CRI HCA decoder
|
||||
- CRI HCA demuxer
|
||||
- overlay_cuda filter
|
||||
- switch from AvxSynth to AviSynth+ on Linux
|
||||
- mv30 decoder
|
||||
- Expanded styling support for 3GPP Timed Text Subtitles (movtext)
|
||||
- WebP parser
|
||||
- tmedian filter
|
||||
- maskedthreshold filter
|
||||
- Support for muxing pcm and pgs in m2ts
|
||||
- Cunning Developments ADPCM decoder
|
||||
- asubboost filter
|
||||
- Pro Pinball Series Soundbank demuxer
|
||||
- pcm_rechunk bitstream filter
|
||||
- scdet filter
|
||||
- NotchLC decoder
|
||||
- gradients source video filter
|
||||
- MediaFoundation encoder wrapper
|
||||
- untile filter
|
||||
- Simon & Schuster Interactive ADPCM encoder
|
||||
- PFM decoder
|
||||
- dblur video filter
|
||||
- Real War KVAG muxer
|
||||
|
||||
|
||||
version 4.2:
|
||||
- tpad filter
|
||||
|
30
LICENSE.md
30
LICENSE.md
@ -21,10 +21,11 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `compat/solaris/make_sunver.pl`
|
||||
- `doc/t2h.pm`
|
||||
- `doc/texi2pod.pl`
|
||||
- `libswresample/swresample-test.c`
|
||||
- `libswresample/tests/swresample.c`
|
||||
- `tests/checkasm/*`
|
||||
- `tests/tiny_ssim.c`
|
||||
- the following filters in libavfilter:
|
||||
- `signature_lookup.c`
|
||||
- `vf_blackframe.c`
|
||||
- `vf_boxblur.c`
|
||||
- `vf_colormatrix.c`
|
||||
@ -34,13 +35,13 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_eq.c`
|
||||
- `vf_find_rect.c`
|
||||
- `vf_fspp.c`
|
||||
- `vf_geq.c`
|
||||
- `vf_histeq.c`
|
||||
- `vf_hqdn3d.c`
|
||||
- `vf_interlace.c`
|
||||
- `vf_kerndeint.c`
|
||||
- `vf_lensfun.c` (GPL version 3 or later)
|
||||
- `vf_mcdeint.c`
|
||||
- `vf_mpdecimate.c`
|
||||
- `vf_nnedi.c`
|
||||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
@ -49,12 +50,14 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_pullup.c`
|
||||
- `vf_repeatfields.c`
|
||||
- `vf_sab.c`
|
||||
- `vf_signature.c`
|
||||
- `vf_smartblur.c`
|
||||
- `vf_spp.c`
|
||||
- `vf_stereo3d.c`
|
||||
- `vf_super2xsai.c`
|
||||
- `vf_tinterlace.c`
|
||||
- `vf_uspp.c`
|
||||
- `vf_vaguedenoiser.c`
|
||||
- `vsrc_mptestsrc.c`
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
@ -80,24 +83,39 @@ affect the licensing of binaries resulting from the combination.
|
||||
|
||||
### Compatible libraries
|
||||
|
||||
The following libraries are under GPL:
|
||||
The following libraries are under GPL version 2:
|
||||
- avisynth
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libdavs2
|
||||
- librubberband
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxavs2
|
||||
- libxvid
|
||||
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing `--enable-gpl` to configure.
|
||||
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
The following libraries are under LGPL version 3:
|
||||
- gmp
|
||||
- libaribb24
|
||||
- liblensfun
|
||||
|
||||
When combining them with FFmpeg, use the configure option `--enable-version3` to
|
||||
upgrade FFmpeg to the LGPL v3.
|
||||
|
||||
The VMAF, mbedTLS, RK MPI, OpenCORE and VisualOn libraries are under the Apache License
|
||||
2.0. That license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing `--enable-version3` to configure.
|
||||
|
||||
The smbclient library is under the GPL v3, to combine it with FFmpeg,
|
||||
the options `--enable-gpl` and `--enable-version3` have to be passed to
|
||||
configure to upgrade FFmpeg to the GPL v3.
|
||||
|
||||
### Incompatible libraries
|
||||
|
||||
There are certain libraries you can combine with FFmpeg whose licenses are not
|
||||
|
23
MAINTAINERS
23
MAINTAINERS
@ -53,8 +53,8 @@ Communication
|
||||
website Deby Barbara Lepage
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos
|
||||
Patchwork Andriy Gelman
|
||||
mailing lists Baptiste Coudurier
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet
|
||||
Launchpad Timothy Gu
|
||||
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, Rodger Combs, wm4
|
||||
@ -78,6 +78,7 @@ Other:
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
hwcontext_cuda* Timo Rothenpieler
|
||||
hwcontext_vulkan* Lynne
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
lzo Reimar Doeffinger
|
||||
@ -88,6 +89,7 @@ Other:
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
rc4 Reimar Doeffinger
|
||||
ripemd.c, ripemd.h James Almer
|
||||
tx* Lynne
|
||||
|
||||
|
||||
libavcodec
|
||||
@ -192,12 +194,14 @@ Codecs:
|
||||
libdavs2.c Huiwen Ren
|
||||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenh264enc.c Martin Storsjo, Linjie Fu
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libtheoraenc.c David Conrad
|
||||
libvorbis.c David Conrad
|
||||
libvpx* James Zern
|
||||
libxavs.c Stefan Gehrer
|
||||
libxavs2.c Huiwen Ren
|
||||
libzvbi-teletextdec.c Marton Balint
|
||||
lzo.h, lzo.c Reimar Doeffinger
|
||||
mdec.c Michael Niedermayer
|
||||
@ -213,6 +217,7 @@ Codecs:
|
||||
msvideo1.c Mike Melanson
|
||||
nuv.c Reimar Doeffinger
|
||||
nvdec*, nvenc* Timo Rothenpieler
|
||||
omx.c Martin Storsjo, Aman Gupta
|
||||
opus* Rostislav Pehlivanov
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
@ -368,6 +373,8 @@ Filters:
|
||||
Sources:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
|
||||
dnn Yejun Guo
|
||||
|
||||
libavformat
|
||||
===========
|
||||
|
||||
@ -429,9 +436,9 @@ Muxers/Demuxers:
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
lxfdec.c Tomas Härdin
|
||||
matroska.c Aurelien Jacobs
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
matroska.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskadec.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskaenc.c David Conrad, Andreas Rheinhardt
|
||||
matroska subtitles (matroskaenc.c) John Peebles
|
||||
metadata* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
@ -446,7 +453,7 @@ Muxers/Demuxers:
|
||||
mpegtsenc.c Baptiste Coudurier
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier
|
||||
mxf* Baptiste Coudurier, Tomas Härdin
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut* Michael Niedermayer
|
||||
@ -454,7 +461,6 @@ Muxers/Demuxers:
|
||||
oggdec.c, oggdec.h David Conrad
|
||||
oggenc.c Baptiste Coudurier
|
||||
oggparse*.c David Conrad
|
||||
oggparsedaala* Rostislav Pehlivanov
|
||||
oma.c Maxim Poliakovski
|
||||
paf.c Paul B Mahol
|
||||
psxstr.c Mike Melanson
|
||||
@ -502,6 +508,7 @@ Protocols:
|
||||
ftp.c Lukasz Marek
|
||||
http.c Ronald S. Bultje
|
||||
libssh.c Lukasz Marek
|
||||
libzmq.c Andriy Gelman
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
icecast.c Marvin Scholz
|
||||
@ -557,6 +564,7 @@ Joakim Plate
|
||||
Jun Zhao
|
||||
Kieran Kunhya
|
||||
Kirill Gavrilov
|
||||
Limin Wang
|
||||
Martin Storsjö
|
||||
Panagiotis Issaris
|
||||
Pedro Arthur
|
||||
@ -600,12 +608,14 @@ James Almer 7751 2E8C FD94 A169 57E6 9A7A 1463 01AD 7376 59E0
|
||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lou Logan (llogan) 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Lynne FE50 139C 6805 72CA FD52 1F8D A2FE A5F0 3F03 4464
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Philip Langdale 5DC5 8D66 5FBA 3A43 18EC 045E F8D6 B194 6A75 682E
|
||||
Ramiro Polla 7859 C65B 751B 1179 792E DAE8 8E95 8B2F 9B6C 5700
|
||||
Reimar Doeffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
@ -614,6 +624,7 @@ Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Steinar H. Gunderson C2E9 004F F028 C18E 4EAD DB83 7F61 7561 7797 8F76
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Thilo Borgmann (thilo) CE1D B7F4 4D20 FC3A DD9F FE5A 257C 5B8F 1D20 B92F
|
||||
Tiancheng "Timothy" Gu 9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
|
||||
Tim Nicholson 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
|
||||
Tomas Härdin (thardin) A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
|
4
Makefile
4
Makefile
@ -50,6 +50,9 @@ $(TOOLS): %$(EXESUF): %.o
|
||||
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_bsf_%_fuzzer$(EXESUF): tools/target_bsf_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
@ -148,6 +151,7 @@ distclean:: clean
|
||||
version.h libavutil/ffversion.h libavcodec/codec_names.h \
|
||||
libavcodec/bsf_list.c libavformat/protocol_list.c \
|
||||
libavcodec/codec_list.c libavcodec/parser_list.c \
|
||||
libavfilter/filter_list.c libavdevice/indev_list.c libavdevice/outdev_list.c \
|
||||
libavformat/muxer_list.c libavformat/demuxer_list.c
|
||||
ifeq ($(SRC_LINK),src)
|
||||
$(RM) src
|
||||
|
@ -1,10 +1,10 @@
|
||||
|
||||
┌────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 4.2 "Ada" │
|
||||
│ RELEASE NOTES for FFmpeg 4.3 "4:3" │
|
||||
└────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 4.2 "Ada", about 8
|
||||
months after the release of FFmpeg 4.1.
|
||||
The FFmpeg Project proudly presents FFmpeg 4.3 "4:3", about 10
|
||||
months after the release of FFmpeg 4.2.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
# We just wrap `build` so this is really it
|
||||
name: "jellyfin-ffmpeg"
|
||||
version: "4.2.1-7"
|
||||
version: "4.3.1-1"
|
||||
packages:
|
||||
- stretch-amd64
|
||||
- stretch-armhf
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,94 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CAPI_H
|
||||
#define AVS_CAPI_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# if defined(GCC) && defined(X86_32)
|
||||
# define AVSC_CC
|
||||
# else // MSVC builds and 64-bit GCC
|
||||
# ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
# else
|
||||
# define AVSC_CC __stdcall
|
||||
# endif
|
||||
# endif
|
||||
#else // needed for programs that talk to AviSynth+
|
||||
# ifndef AVSC_WIN32_GCC32 // see comment below
|
||||
# ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
# else
|
||||
# define AVSC_CC __stdcall
|
||||
# endif
|
||||
# else
|
||||
# define AVSC_CC
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// On 64-bit Windows, there's only one calling convention,
|
||||
// so there is no difference between MSVC and GCC. On 32-bit,
|
||||
// this isn't true. The convention that GCC needs to use to
|
||||
// even build AviSynth+ as 32-bit makes anything that uses
|
||||
// it incompatible with 32-bit MSVC builds of AviSynth+.
|
||||
// The AVSC_WIN32_GCC32 define is meant to provide a user
|
||||
// switchable way to make builds of FFmpeg to test 32-bit
|
||||
// GCC builds of AviSynth+ without having to screw around
|
||||
// with alternate headers, while still default to the usual
|
||||
// situation of using 32-bit MSVC builds of AviSynth+.
|
||||
|
||||
// Hopefully, this situation will eventually be resolved
|
||||
// and a broadly compatible solution will arise so the
|
||||
// same 32-bit FFmpeg build can handle either MSVC or GCC
|
||||
// builds of AviSynth+.
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# define AVSC_EXPORT __declspec(dllexport)
|
||||
# define AVSC_API(ret, name) EXTERN_C AVSC_EXPORT ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif //AVS_CAPI_H
|
@ -1,70 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CONFIG_H
|
||||
#define AVS_CONFIG_H
|
||||
|
||||
// Undefine this to get cdecl calling convention
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
// NOTE TO PLUGIN AUTHORS:
|
||||
// Because FRAME_ALIGN can be substantially higher than the alignment
|
||||
// a plugin actually needs, plugins should not use FRAME_ALIGN to check for
|
||||
// alignment. They should always request the exact alignment value they need.
|
||||
// This is to make sure that plugins work over the widest range of AviSynth
|
||||
// builds possible.
|
||||
#define FRAME_ALIGN 64
|
||||
|
||||
#if defined(_M_AMD64) || defined(__x86_64)
|
||||
# define X86_64
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
# define X86_32
|
||||
#else
|
||||
# error Unsupported CPU architecture.
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# define MSVC
|
||||
#elif defined(__GNUC__)
|
||||
# define GCC
|
||||
#elif defined(__clang__)
|
||||
# define CLANG
|
||||
#else
|
||||
# error Unsupported compiler.
|
||||
#endif
|
||||
|
||||
#if defined(GCC)
|
||||
# undef __forceinline
|
||||
# define __forceinline inline
|
||||
#endif
|
||||
|
||||
#endif //AVS_CONFIG_H
|
@ -1,57 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_TYPES_H
|
||||
#define AVS_TYPES_H
|
||||
|
||||
// Define all types necessary for interfacing with avisynth.dll
|
||||
|
||||
#ifdef __cplusplus
|
||||
#include <cstddef>
|
||||
#else
|
||||
#include <stddef.h>
|
||||
#endif
|
||||
|
||||
// Raster types used by VirtualDub & Avisynth
|
||||
typedef unsigned int Pixel32;
|
||||
typedef unsigned char BYTE;
|
||||
|
||||
// Audio Sample information
|
||||
typedef float SFLOAT;
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
#endif //AVS_TYPES_H
|
@ -1,728 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
// MA 02110-1301 USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef __AVXSYNTH_C__
|
||||
#define __AVXSYNTH_C__
|
||||
|
||||
#include "windowsPorts/windows2linux.h"
|
||||
#include <stdarg.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVXSYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 3 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
AVS_SAMPLE_INT16 = 1<<1,
|
||||
AVS_SAMPLE_INT24 = 1<<2,
|
||||
AVS_SAMPLE_INT32 = 1<<3,
|
||||
AVS_SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {AVS_PLANAR_Y=1<<0,
|
||||
AVS_PLANAR_U=1<<1,
|
||||
AVS_PLANAR_V=1<<2,
|
||||
AVS_PLANAR_ALIGNED=1<<3,
|
||||
AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED};
|
||||
|
||||
// Colorspace properties.
|
||||
enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_YUV = 1<<29,
|
||||
AVS_CS_INTERLEAVED = 1<<30,
|
||||
AVS_CS_PLANAR = 1<<31};
|
||||
|
||||
// Specific colorformats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YV12 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
AVS_CS_IYUV = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR // same as above
|
||||
};
|
||||
|
||||
enum {
|
||||
AVS_IT_BFF = 1<<0,
|
||||
AVS_IT_TFF = 1<<1,
|
||||
AVS_IT_FIELDBASED = 1<<2};
|
||||
|
||||
enum {
|
||||
AVS_FILTER_TYPE=1,
|
||||
AVS_FILTER_INPUT_COLORSPACE=2,
|
||||
AVS_FILTER_OUTPUT_TYPE=9,
|
||||
AVS_FILTER_NAME=4,
|
||||
AVS_FILTER_AUTHOR=5,
|
||||
AVS_FILTER_VERSION=6,
|
||||
AVS_FILTER_ARGS=7,
|
||||
AVS_FILTER_ARGS_INFO=8,
|
||||
AVS_FILTER_ARGS_DESCRIPTION=10,
|
||||
AVS_FILTER_DESCRIPTION=11};
|
||||
|
||||
enum { //SUBTYPES
|
||||
AVS_FILTER_TYPE_AUDIO=1,
|
||||
AVS_FILTER_TYPE_VIDEO=2,
|
||||
AVS_FILTER_OUTPUT_TYPE_SAME=3,
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
|
||||
int pixel_type;
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type;
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
|
||||
// useful functions of the above
|
||||
AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p)
|
||||
{ return (p->width!=0); }
|
||||
|
||||
AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p)
|
||||
{ return (p->audio_samples_per_second!=0); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_BGR); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
|
||||
|
||||
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
|
||||
|
||||
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_YUV ); }
|
||||
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12) == AVS_CS_YV12)||((p->pixel_type & AVS_CS_I420) == AVS_CS_I420); }
|
||||
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
case AVS_SAMPLE_INT8: return sizeof(signed char);
|
||||
case AVS_SAMPLE_INT16: return sizeof(signed short);
|
||||
case AVS_SAMPLE_INT24: return 3;
|
||||
case AVS_SAMPLE_INT32: return sizeof(signed int);
|
||||
case AVS_SAMPLE_FLOAT: return sizeof(float);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames)
|
||||
{ return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); }
|
||||
|
||||
AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); }
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes)
|
||||
{ return bytes / avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return samples * avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels; }
|
||||
|
||||
AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p)
|
||||
{ return p->sample_type;}
|
||||
|
||||
// useful mutator
|
||||
AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type|=property; }
|
||||
|
||||
AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type&=~property; }
|
||||
|
||||
AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased)
|
||||
{ if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; }
|
||||
|
||||
AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator)
|
||||
{
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
p->fps_numerator = numerator/x;
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoFrame
|
||||
//
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
unsigned char * data;
|
||||
int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
long sequence_number;
|
||||
|
||||
long refcount;
|
||||
} AVS_VideoFrameBuffer;
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
int refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_size>>1;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Value
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
// for some function e'rror
|
||||
short array_size;
|
||||
union {
|
||||
void * clip; // do not use directly, use avs_take_clip
|
||||
char boolean;
|
||||
int integer;
|
||||
INT64 integer64; // match addition of __int64 to avxplugin.h
|
||||
float floating_pt;
|
||||
const char * string;
|
||||
const AVS_Value * array;
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
{ return v.d.integer; }
|
||||
AVSC_INLINE const char * avs_as_string(AVS_Value v)
|
||||
{ return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE double avs_as_float(AVS_Value v)
|
||||
{ return avs_is_int(v) ? v.d.integer : v.d.floating_pt; }
|
||||
AVSC_INLINE const char * avs_as_error(AVS_Value v)
|
||||
{ return avs_is_error(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v)
|
||||
{ return v.d.array; }
|
||||
AVSC_INLINE int avs_array_size(AVS_Value v)
|
||||
{ return avs_is_array(v) ? v.array_size : 1; }
|
||||
AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
{ return avs_is_array(v) ? v.d.array[index] : v; }
|
||||
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Clip
|
||||
//
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_clip)(AVS_Clip *);
|
||||
AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *);
|
||||
|
||||
AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error
|
||||
|
||||
AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *);
|
||||
|
||||
AVSC_API(int, avs_get_version)(AVS_Clip *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n);
|
||||
// The returned video frame must be released with avs_release_video_frame
|
||||
|
||||
AVSC_API(int, avs_get_parity)(AVS_Clip *, int n);
|
||||
// return field parity if field_based, else parity of first field in frame
|
||||
|
||||
AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
// start and count are in samples
|
||||
|
||||
AVSC_API(int, avs_set_cache_hints)(AVS_Clip *,
|
||||
int cachehints, size_t frame_range);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// This is the callback type used by avs_add_function
|
||||
typedef AVS_Value (AVSC_CC * AVS_ApplyFunc)
|
||||
(AVS_ScriptEnvironment *, AVS_Value args, void * user_data);
|
||||
|
||||
typedef struct AVS_FilterInfo AVS_FilterInfo;
|
||||
struct AVS_FilterInfo
|
||||
{
|
||||
// these members should not be modified outside of the AVS_ApplyFunc callback
|
||||
AVS_Clip * child;
|
||||
AVS_VideoInfo vi;
|
||||
AVS_ScriptEnvironment * env;
|
||||
AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints,
|
||||
int frame_range);
|
||||
void (AVSC_CC * free_filter)(AVS_FilterInfo *);
|
||||
|
||||
// Should be set when ever there is an error to report.
|
||||
// It is cleared before any of the above methods are called
|
||||
const char * error;
|
||||
// this is to store whatever and may be modified at will
|
||||
void * user_data;
|
||||
};
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
// clip.
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e,
|
||||
AVS_FilterInfo * * fi,
|
||||
AVS_Value child, int store_child);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_ScriptEnvironment
|
||||
//
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
AVS_CPU_FORCE = 0x01, // N/A
|
||||
AVS_CPU_FPU = 0x02, // 386/486DX
|
||||
AVS_CPU_MMX = 0x04, // P55C, K6, PII
|
||||
AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
AVS_CPU_SSE2 = 0x20, // PIV, Hammer
|
||||
AVS_CPU_3DNOW = 0x40, // K6-2
|
||||
AVS_CPU_3DNOW_EXT = 0x80, // Athlon
|
||||
AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2,
|
||||
// which only Hammer will have anyway)
|
||||
};
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...);
|
||||
|
||||
AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, va_list val);
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
|
||||
AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *,
|
||||
const char * name, const char * params,
|
||||
AVS_ApplyFunc apply, void * user_data);
|
||||
|
||||
AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name);
|
||||
|
||||
AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name,
|
||||
AVS_Value args, const char** arg_names);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val);
|
||||
|
||||
AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val);
|
||||
|
||||
//void avs_push_context(AVS_ScriptEnvironment *, int level=0);
|
||||
//void avs_pop_context(AVS_ScriptEnvironment *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, unsigned char* dstp, int dst_pitch, const unsigned char* srcp, int src_pitch, int row_size, int height);
|
||||
|
||||
typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env);
|
||||
AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height);
|
||||
// The returned video frame must be be released
|
||||
|
||||
AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem);
|
||||
|
||||
AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir);
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// this symbol is the entry point for the plugin and must
|
||||
// be defined
|
||||
AVSC_EXPORT
|
||||
const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env);
|
||||
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *);
|
||||
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV);
|
||||
// The returned video frame must be be released
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif //__AVXSYNTH_C__
|
@ -1,85 +0,0 @@
|
||||
#ifndef __DATA_TYPE_CONVERSIONS_H__
|
||||
#define __DATA_TYPE_CONVERSIONS_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <wchar.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
|
||||
typedef int64_t __int64;
|
||||
typedef int32_t __int32;
|
||||
#ifdef __cplusplus
|
||||
typedef bool BOOL;
|
||||
#else
|
||||
typedef uint32_t BOOL;
|
||||
#endif // __cplusplus
|
||||
typedef void* HMODULE;
|
||||
typedef void* LPVOID;
|
||||
typedef void* PVOID;
|
||||
typedef PVOID HANDLE;
|
||||
typedef HANDLE HWND;
|
||||
typedef HANDLE HINSTANCE;
|
||||
typedef void* HDC;
|
||||
typedef void* HBITMAP;
|
||||
typedef void* HICON;
|
||||
typedef void* HFONT;
|
||||
typedef void* HGDIOBJ;
|
||||
typedef void* HBRUSH;
|
||||
typedef void* HMMIO;
|
||||
typedef void* HACMSTREAM;
|
||||
typedef void* HACMDRIVER;
|
||||
typedef void* HIC;
|
||||
typedef void* HACMOBJ;
|
||||
typedef HACMSTREAM* LPHACMSTREAM;
|
||||
typedef void* HACMDRIVERID;
|
||||
typedef void* LPHACMDRIVER;
|
||||
typedef unsigned char BYTE;
|
||||
typedef BYTE* LPBYTE;
|
||||
typedef char TCHAR;
|
||||
typedef TCHAR* LPTSTR;
|
||||
typedef const TCHAR* LPCTSTR;
|
||||
typedef char* LPSTR;
|
||||
typedef LPSTR LPOLESTR;
|
||||
typedef const char* LPCSTR;
|
||||
typedef LPCSTR LPCOLESTR;
|
||||
typedef wchar_t WCHAR;
|
||||
typedef unsigned short WORD;
|
||||
typedef unsigned int UINT;
|
||||
typedef UINT MMRESULT;
|
||||
typedef uint32_t DWORD;
|
||||
typedef DWORD COLORREF;
|
||||
typedef DWORD FOURCC;
|
||||
typedef DWORD HRESULT;
|
||||
typedef DWORD* LPDWORD;
|
||||
typedef DWORD* DWORD_PTR;
|
||||
typedef int32_t LONG;
|
||||
typedef int32_t* LONG_PTR;
|
||||
typedef LONG_PTR LRESULT;
|
||||
typedef uint32_t ULONG;
|
||||
typedef uint32_t* ULONG_PTR;
|
||||
//typedef __int64_t intptr_t;
|
||||
typedef uint64_t _fsize_t;
|
||||
|
||||
|
||||
//
|
||||
// Structures
|
||||
//
|
||||
|
||||
typedef struct _GUID {
|
||||
DWORD Data1;
|
||||
WORD Data2;
|
||||
WORD Data3;
|
||||
BYTE Data4[8];
|
||||
} GUID;
|
||||
|
||||
typedef GUID REFIID;
|
||||
typedef GUID CLSID;
|
||||
typedef CLSID* LPCLSID;
|
||||
typedef GUID IID;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
#endif // __DATA_TYPE_CONVERSIONS_H__
|
@ -1,77 +0,0 @@
|
||||
#ifndef __WINDOWS2LINUX_H__
|
||||
#define __WINDOWS2LINUX_H__
|
||||
|
||||
/*
|
||||
* LINUX SPECIFIC DEFINITIONS
|
||||
*/
|
||||
//
|
||||
// Data types conversions
|
||||
//
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "basicDataTypeConversions.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
//
|
||||
// purposefully define the following MSFT definitions
|
||||
// to mean nothing (as they do not mean anything on Linux)
|
||||
//
|
||||
#define __stdcall
|
||||
#define __cdecl
|
||||
#define noreturn
|
||||
#define __declspec(x)
|
||||
#define STDAPI extern "C" HRESULT
|
||||
#define STDMETHODIMP HRESULT __stdcall
|
||||
#define STDMETHODIMP_(x) x __stdcall
|
||||
|
||||
#define STDMETHOD(x) virtual HRESULT x
|
||||
#define STDMETHOD_(a, x) virtual a x
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE true
|
||||
#endif
|
||||
|
||||
#ifndef FALSE
|
||||
#define FALSE false
|
||||
#endif
|
||||
|
||||
#define S_OK (0x00000000)
|
||||
#define S_FALSE (0x00000001)
|
||||
#define E_NOINTERFACE (0X80004002)
|
||||
#define E_POINTER (0x80004003)
|
||||
#define E_FAIL (0x80004005)
|
||||
#define E_OUTOFMEMORY (0x8007000E)
|
||||
|
||||
#define INVALID_HANDLE_VALUE ((HANDLE)((LONG_PTR)-1))
|
||||
#define FAILED(hr) ((hr) & 0x80000000)
|
||||
#define SUCCEEDED(hr) (!FAILED(hr))
|
||||
|
||||
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||
#define MAKEWORD(a,b) (((a) << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
#define lstrcmpi strcasecmp
|
||||
#define _stricmp strcasecmp
|
||||
#define InterlockedIncrement(x) __sync_fetch_and_add((x), 1)
|
||||
#define InterlockedDecrement(x) __sync_fetch_and_sub((x), 1)
|
||||
// Windows uses (new, old) ordering but GCC has (old, new)
|
||||
#define InterlockedCompareExchange(x,y,z) __sync_val_compare_and_swap(x,z,y)
|
||||
|
||||
#define UInt32x32To64(a, b) ( (uint64_t) ( ((uint64_t)((uint32_t)(a))) * ((uint32_t)(b)) ) )
|
||||
#define Int64ShrlMod32(a, b) ( (uint64_t) ( (uint64_t)(a) >> (b) ) )
|
||||
#define Int32x32To64(a, b) ((__int64)(((__int64)((long)(a))) * ((long)(b))))
|
||||
|
||||
#define MulDiv(nNumber, nNumerator, nDenominator) (int32_t) (((int64_t) (nNumber) * (int64_t) (nNumerator) + (int64_t) ((nDenominator)/2)) / (int64_t) (nDenominator))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // __WINDOWS2LINUX_H__
|
@ -27,10 +27,8 @@ IN="$2"
|
||||
NAME="$(basename "$IN" | sed 's/\..*//')"
|
||||
|
||||
printf "const char %s_ptx[] = \\" "$NAME" > "$OUT"
|
||||
while IFS= read -r LINE
|
||||
do
|
||||
printf "\n\t\"%s\\\n\"" "$(printf "%s" "$LINE" | sed -e 's/\r//g' -e 's/["\\]/\\&/g')" >> "$OUT"
|
||||
done < "$IN"
|
||||
printf ";\n" >> "$OUT"
|
||||
echo >> "$OUT"
|
||||
sed -e "$(printf 's/\r//g')" -e 's/["\\]/\\&/g' -e "$(printf 's/^/\t"/')" -e 's/$/\\n"/' < "$IN" >> "$OUT"
|
||||
echo ";" >> "$OUT"
|
||||
|
||||
exit 0
|
||||
|
@ -27,15 +27,19 @@
|
||||
#define COMPAT_OS2THREADS_H
|
||||
|
||||
#define INCL_DOS
|
||||
#define INCL_DOSERRORS
|
||||
#include <os2.h>
|
||||
|
||||
#undef __STRICT_ANSI__ /* for _beginthread() */
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <sys/builtin.h>
|
||||
#include <sys/fmutex.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
typedef struct {
|
||||
TID tid;
|
||||
@ -163,6 +167,28 @@ static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_timedwait(pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
|
||||
ULONG t = av_clip64(abs_milli - av_gettime() / 1000, 0, ULONG_MAX);
|
||||
|
||||
__atomic_increment(&cond->wait_count);
|
||||
|
||||
pthread_mutex_unlock(mutex);
|
||||
|
||||
APIRET ret = DosWaitEventSem(cond->event_sem, t);
|
||||
|
||||
__atomic_decrement(&cond->wait_count);
|
||||
|
||||
DosPostEventSem(cond->ack_sem);
|
||||
|
||||
pthread_mutex_lock(mutex);
|
||||
|
||||
return (ret == ERROR_TIMEOUT) ? ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
|
@ -38,11 +38,13 @@
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <process.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
typedef struct pthread_t {
|
||||
void *handle;
|
||||
@ -61,6 +63,9 @@ typedef CONDITION_VARIABLE pthread_cond_t;
|
||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
||||
|
||||
#define PTHREAD_CANCEL_ENABLE 1
|
||||
#define PTHREAD_CANCEL_DISABLE 0
|
||||
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t *h = (pthread_t*)arg;
|
||||
@ -156,10 +161,31 @@ static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
|
||||
DWORD t = av_clip64(abs_milli - av_gettime() / 1000, 0, UINT32_MAX);
|
||||
|
||||
if (!SleepConditionVariableSRW(cond, mutex, t, 0)) {
|
||||
DWORD err = GetLastError();
|
||||
if (err == ERROR_TIMEOUT)
|
||||
return ETIMEDOUT;
|
||||
else
|
||||
return EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
WakeConditionVariable(cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_setcancelstate(int state, int *oldstate)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* COMPAT_W32PTHREADS_H */
|
||||
|
230
configure
vendored
230
configure
vendored
@ -236,6 +236,7 @@ External library support:
|
||||
--enable-libfontconfig enable libfontconfig, useful for drawtext filter [no]
|
||||
--enable-libfreetype enable libfreetype, needed for drawtext filter [no]
|
||||
--enable-libfribidi enable libfribidi, improves drawtext filter [no]
|
||||
--enable-libglslang enable GLSL->SPIRV compilation via libglslang [no]
|
||||
--enable-libgme enable Game Music Emu via libgme [no]
|
||||
--enable-libgsm enable GSM de/encoding via libgsm [no]
|
||||
--enable-libiec61883 enable iec61883 via libiec61883 [no]
|
||||
@ -254,6 +255,8 @@ External library support:
|
||||
--enable-libopenmpt enable decoding tracked files via libopenmpt [no]
|
||||
--enable-libopus enable Opus de/encoding via libopus [no]
|
||||
--enable-libpulse enable Pulseaudio input via libpulse [no]
|
||||
--enable-librabbitmq enable RabbitMQ library [no]
|
||||
--enable-librav1e enable AV1 encoding via rav1e [no]
|
||||
--enable-librsvg enable SVG rasterization via librsvg [no]
|
||||
--enable-librubberband enable rubberband needed for rubberband filter [no]
|
||||
--enable-librtmp enable RTMP[E] support via librtmp [no]
|
||||
@ -301,6 +304,7 @@ External library support:
|
||||
--enable-mbedtls enable mbedTLS, needed for https support
|
||||
if openssl, gnutls or libtls is not used [no]
|
||||
--enable-mediacodec enable Android MediaCodec support [no]
|
||||
--enable-mediafoundation enable encoding via MediaFoundation [auto]
|
||||
--enable-libmysofa enable libmysofa, needed for sofalizer filter [no]
|
||||
--enable-openal enable OpenAL 1.1 capture support [no]
|
||||
--enable-opencl enable OpenCL processing [no]
|
||||
@ -315,6 +319,7 @@ External library support:
|
||||
--disable-securetransport disable Secure Transport, needed for TLS support
|
||||
on OSX if openssl and gnutls are not used [autodetect]
|
||||
--enable-vapoursynth enable VapourSynth demuxer [no]
|
||||
--enable-vulkan enable Vulkan code [no]
|
||||
--disable-xlib disable xlib [autodetect]
|
||||
--disable-zlib disable zlib [autodetect]
|
||||
|
||||
@ -481,6 +486,7 @@ Developer options (useful when working on FFmpeg itself):
|
||||
--ignore-tests=TESTS comma-separated list (without "fate-" prefix
|
||||
in the name) of tests whose result is ignored
|
||||
--enable-linux-perf enable Linux Performance Monitor API
|
||||
--disable-large-tests disable tests that use a large amount of memory
|
||||
|
||||
NOTE: Object files are built at the place where configure is launched.
|
||||
EOF
|
||||
@ -1547,11 +1553,11 @@ require_cc(){
|
||||
}
|
||||
|
||||
require_cpp(){
|
||||
name="$1"
|
||||
headers="$2"
|
||||
classes="$3"
|
||||
shift 3
|
||||
check_lib_cpp "$headers" "$classes" "$@" || die "ERROR: $name not found"
|
||||
log require_cpp "$@"
|
||||
name_version="$1"
|
||||
name="${1%% *}"
|
||||
shift
|
||||
check_lib_cpp "$name" "$@" || die "ERROR: $name_version not found"
|
||||
}
|
||||
|
||||
require_headers(){
|
||||
@ -1662,7 +1668,7 @@ COMPONENT_LIST="
|
||||
"
|
||||
|
||||
EXAMPLE_LIST="
|
||||
avio_dir_cmd_example
|
||||
avio_list_dir_example
|
||||
avio_reading_example
|
||||
decode_audio_example
|
||||
decode_video_example
|
||||
@ -1699,6 +1705,7 @@ EXTERNAL_AUTODETECT_LIBRARY_LIST="
|
||||
libxcb_shape
|
||||
libxcb_xfixes
|
||||
lzma
|
||||
mediafoundation
|
||||
schannel
|
||||
sdl2
|
||||
securetransport
|
||||
@ -1768,6 +1775,7 @@ EXTERNAL_LIBRARY_LIST="
|
||||
libfontconfig
|
||||
libfreetype
|
||||
libfribidi
|
||||
libglslang
|
||||
libgme
|
||||
libgsm
|
||||
libiec61883
|
||||
@ -1784,6 +1792,8 @@ EXTERNAL_LIBRARY_LIST="
|
||||
libopenmpt
|
||||
libopus
|
||||
libpulse
|
||||
librabbitmq
|
||||
librav1e
|
||||
librsvg
|
||||
librtmp
|
||||
libshine
|
||||
@ -1851,6 +1861,7 @@ HWACCEL_LIBRARY_LIST="
|
||||
mmal
|
||||
omx
|
||||
opencl
|
||||
vulkan
|
||||
"
|
||||
|
||||
DOCUMENT_LIST="
|
||||
@ -1929,6 +1940,7 @@ CONFIG_LIST="
|
||||
$SUBSYSTEM_LIST
|
||||
autodetect
|
||||
fontconfig
|
||||
large_tests
|
||||
linux_perf
|
||||
memory_poisoning
|
||||
neon_clobber_test
|
||||
@ -2192,10 +2204,12 @@ SYSTEM_FUNCS="
|
||||
getaddrinfo
|
||||
gethrtime
|
||||
getopt
|
||||
GetModuleHandle
|
||||
GetProcessAffinityMask
|
||||
GetProcessMemoryInfo
|
||||
GetProcessTimes
|
||||
getrusage
|
||||
GetStdHandle
|
||||
GetSystemTimeAsFileTime
|
||||
gettimeofday
|
||||
glob
|
||||
@ -2221,6 +2235,7 @@ SYSTEM_FUNCS="
|
||||
SecItemImport
|
||||
SetConsoleTextAttribute
|
||||
SetConsoleCtrlHandler
|
||||
SetDllDirectory
|
||||
setmode
|
||||
setrlimit
|
||||
Sleep
|
||||
@ -2268,6 +2283,9 @@ TOOLCHAIN_FEATURES="
|
||||
TYPES_LIST="
|
||||
kCMVideoCodecType_HEVC
|
||||
kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
|
||||
kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ
|
||||
kCVImageBufferTransferFunction_ITU_R_2100_HLG
|
||||
kCVImageBufferTransferFunction_Linear
|
||||
socklen_t
|
||||
struct_addrinfo
|
||||
struct_group_source_req
|
||||
@ -2632,6 +2650,7 @@ ac3_decoder_select="ac3_parser ac3dsp bswapdsp fmtconvert mdct"
|
||||
ac3_fixed_decoder_select="ac3_parser ac3dsp bswapdsp mdct"
|
||||
ac3_encoder_select="ac3dsp audiodsp mdct me_cmp"
|
||||
ac3_fixed_encoder_select="ac3dsp audiodsp mdct me_cmp"
|
||||
acelp_kelvin_decoder_select="audiodsp"
|
||||
adpcm_g722_decoder_select="g722dsp"
|
||||
adpcm_g722_encoder_select="g722dsp"
|
||||
aic_decoder_select="golomb idctdsp"
|
||||
@ -2655,7 +2674,9 @@ asv2_decoder_select="blockdsp bswapdsp idctdsp"
|
||||
asv2_encoder_select="aandcttables bswapdsp fdctdsp pixblockdsp"
|
||||
atrac1_decoder_select="mdct sinewin"
|
||||
atrac3_decoder_select="mdct"
|
||||
atrac3al_decoder_select="mdct"
|
||||
atrac3p_decoder_select="mdct sinewin"
|
||||
atrac3pal_decoder_select="mdct sinewin"
|
||||
atrac9_decoder_select="mdct"
|
||||
avrn_decoder_select="exif jpegtables"
|
||||
bink_decoder_select="blockdsp hpeldsp"
|
||||
@ -2669,6 +2690,7 @@ cook_decoder_select="audiodsp mdct sinewin"
|
||||
cscd_decoder_select="lzo"
|
||||
cscd_decoder_suggest="zlib"
|
||||
dca_decoder_select="mdct"
|
||||
dca_encoder_select="mdct"
|
||||
dds_decoder_select="texturedsp"
|
||||
dirac_decoder_select="dirac_parse dwt golomb videodsp mpegvideoenc"
|
||||
dnxhd_decoder_select="blockdsp idctdsp"
|
||||
@ -2720,6 +2742,8 @@ huffyuv_encoder_select="bswapdsp huffman huffyuvencdsp llvidencdsp"
|
||||
hymt_decoder_select="huffyuv_decoder"
|
||||
iac_decoder_select="imc_decoder"
|
||||
imc_decoder_select="bswapdsp fft mdct sinewin"
|
||||
imm4_decoder_select="bswapdsp"
|
||||
imm5_decoder_select="h264_decoder hevc_decoder"
|
||||
indeo3_decoder_select="hpeldsp"
|
||||
indeo4_decoder_select="ividsp"
|
||||
indeo5_decoder_select="ividsp"
|
||||
@ -2731,7 +2755,7 @@ ljpeg_encoder_select="idctdsp jpegtables mpegvideoenc"
|
||||
lscr_decoder_deps="zlib"
|
||||
magicyuv_decoder_select="llviddsp"
|
||||
magicyuv_encoder_select="llvidencdsp"
|
||||
mdec_decoder_select="blockdsp idctdsp mpegvideo"
|
||||
mdec_decoder_select="blockdsp bswapdsp idctdsp mpegvideo"
|
||||
metasound_decoder_select="lsp mdct sinewin"
|
||||
mimic_decoder_select="blockdsp bswapdsp hpeldsp idctdsp"
|
||||
mjpeg_decoder_select="blockdsp hpeldsp exif idctdsp jpegtables"
|
||||
@ -2768,10 +2792,14 @@ msmpeg4v3_decoder_select="h263_decoder"
|
||||
msmpeg4v3_encoder_select="h263_encoder"
|
||||
mss2_decoder_select="mpegvideo qpeldsp vc1_decoder"
|
||||
mts2_decoder_select="mss34dsp"
|
||||
mv30_decoder_select="aandcttables blockdsp"
|
||||
mvha_decoder_deps="zlib"
|
||||
mvha_decoder_select="llviddsp"
|
||||
mwsc_decoder_deps="zlib"
|
||||
mxpeg_decoder_select="mjpeg_decoder"
|
||||
nellymoser_decoder_select="mdct sinewin"
|
||||
nellymoser_encoder_select="audio_frame_queue mdct sinewin"
|
||||
notchlc_decoder_select="lzf"
|
||||
nuv_decoder_select="idctdsp lzo"
|
||||
on2avc_decoder_select="mdct"
|
||||
opus_decoder_deps="swresample"
|
||||
@ -2817,6 +2845,7 @@ tdsc_decoder_deps="zlib"
|
||||
tdsc_decoder_select="mjpeg_decoder"
|
||||
theora_decoder_select="vp3_decoder"
|
||||
thp_decoder_select="mjpeg_decoder"
|
||||
tiff_decoder_select="mjpeg_decoder"
|
||||
tiff_decoder_suggest="zlib lzma"
|
||||
tiff_encoder_suggest="zlib"
|
||||
truehd_decoder_select="mlp_parser"
|
||||
@ -2859,6 +2888,7 @@ wmv3_decoder_select="vc1_decoder"
|
||||
wmv3image_decoder_select="wmv3_decoder"
|
||||
xma1_decoder_select="wmapro_decoder"
|
||||
xma2_decoder_select="wmapro_decoder"
|
||||
ylc_decoder_select="bswapdsp"
|
||||
zerocodec_decoder_deps="zlib"
|
||||
zlib_decoder_deps="zlib"
|
||||
zlib_encoder_deps="zlib"
|
||||
@ -2974,6 +3004,8 @@ vp9_nvdec_hwaccel_deps="nvdec"
|
||||
vp9_nvdec_hwaccel_select="vp9_decoder"
|
||||
vp9_vaapi_hwaccel_deps="vaapi VADecPictureParameterBufferVP9_bit_depth"
|
||||
vp9_vaapi_hwaccel_select="vp9_decoder"
|
||||
vp9_vdpau_hwaccel_deps="vdpau VdpPictureInfoVP9"
|
||||
vp9_vdpau_hwaccel_select="vp9_decoder"
|
||||
wmv3_d3d11va_hwaccel_select="vc1_d3d11va_hwaccel"
|
||||
wmv3_d3d11va2_hwaccel_select="vc1_d3d11va2_hwaccel"
|
||||
wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel"
|
||||
@ -2982,6 +3014,8 @@ wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel"
|
||||
wmv3_vdpau_hwaccel_select="vc1_vdpau_hwaccel"
|
||||
|
||||
# hardware-accelerated codecs
|
||||
mediafoundation_deps="mftransform_h MFCreateAlignedMemoryBuffer"
|
||||
mediafoundation_extralibs="-lmfplat -lmfuuid -lole32 -lstrmiids"
|
||||
omx_deps="libdl pthreads"
|
||||
omx_rpi_select="omx"
|
||||
qsv_deps="libmfx"
|
||||
@ -2998,12 +3032,16 @@ scale_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
|
||||
thumbnail_cuda_filter_deps="ffnvcodec"
|
||||
thumbnail_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
|
||||
transpose_npp_filter_deps="ffnvcodec libnpp"
|
||||
overlay_cuda_filter_deps="ffnvcodec"
|
||||
overlay_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
|
||||
|
||||
amf_deps_any="libdl LoadLibrary"
|
||||
nvenc_deps="ffnvcodec"
|
||||
nvenc_deps_any="libdl LoadLibrary"
|
||||
nvenc_encoder_deps="nvenc"
|
||||
|
||||
aac_mf_encoder_deps="mediafoundation"
|
||||
ac3_mf_encoder_deps="mediafoundation"
|
||||
h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"
|
||||
h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"
|
||||
h264_amf_encoder_deps="amf"
|
||||
@ -3012,10 +3050,11 @@ h264_cuvid_decoder_deps="cuvid"
|
||||
h264_cuvid_decoder_select="h264_mp4toannexb_bsf"
|
||||
h264_mediacodec_decoder_deps="mediacodec"
|
||||
h264_mediacodec_decoder_select="h264_mp4toannexb_bsf h264_parser"
|
||||
h264_mf_encoder_deps="mediafoundation"
|
||||
h264_mmal_decoder_deps="mmal"
|
||||
h264_nvenc_encoder_deps="nvenc"
|
||||
h264_omx_encoder_deps="omx"
|
||||
h264_qsv_decoder_select="h264_mp4toannexb_bsf h264_parser qsvdec"
|
||||
h264_qsv_decoder_select="h264_mp4toannexb_bsf qsvdec"
|
||||
h264_qsv_encoder_select="qsvenc"
|
||||
h264_rkmpp_decoder_deps="rkmpp"
|
||||
h264_rkmpp_decoder_select="h264_mp4toannexb_bsf"
|
||||
@ -3028,8 +3067,9 @@ hevc_cuvid_decoder_deps="cuvid"
|
||||
hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"
|
||||
hevc_mediacodec_decoder_deps="mediacodec"
|
||||
hevc_mediacodec_decoder_select="hevc_mp4toannexb_bsf hevc_parser"
|
||||
hevc_mf_encoder_deps="mediafoundation"
|
||||
hevc_nvenc_encoder_deps="nvenc"
|
||||
hevc_qsv_decoder_select="hevc_mp4toannexb_bsf hevc_parser qsvdec"
|
||||
hevc_qsv_decoder_select="hevc_mp4toannexb_bsf qsvdec"
|
||||
hevc_qsv_encoder_select="hevcparse qsvenc"
|
||||
hevc_rkmpp_decoder_deps="rkmpp"
|
||||
hevc_rkmpp_decoder_select="hevc_mp4toannexb_bsf"
|
||||
@ -3039,17 +3079,19 @@ hevc_v4l2m2m_decoder_deps="v4l2_m2m hevc_v4l2_m2m"
|
||||
hevc_v4l2m2m_decoder_select="hevc_mp4toannexb_bsf"
|
||||
hevc_v4l2m2m_encoder_deps="v4l2_m2m hevc_v4l2_m2m"
|
||||
mjpeg_cuvid_decoder_deps="cuvid"
|
||||
mjpeg_qsv_decoder_select="qsvdec"
|
||||
mjpeg_qsv_encoder_deps="libmfx"
|
||||
mjpeg_qsv_encoder_select="qsvenc"
|
||||
mjpeg_vaapi_encoder_deps="VAEncPictureParameterBufferJPEG"
|
||||
mjpeg_vaapi_encoder_select="cbs_jpeg jpegtables vaapi_encode"
|
||||
mp3_mf_encoder_deps="mediafoundation"
|
||||
mpeg1_cuvid_decoder_deps="cuvid"
|
||||
mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m"
|
||||
mpeg2_crystalhd_decoder_select="crystalhd"
|
||||
mpeg2_cuvid_decoder_deps="cuvid"
|
||||
mpeg2_mmal_decoder_deps="mmal"
|
||||
mpeg2_mediacodec_decoder_deps="mediacodec"
|
||||
mpeg2_qsv_decoder_select="qsvdec mpegvideo_parser"
|
||||
mpeg2_qsv_decoder_select="qsvdec"
|
||||
mpeg2_qsv_encoder_select="qsvenc"
|
||||
mpeg2_vaapi_encoder_select="cbs_mpeg2 vaapi_encode"
|
||||
mpeg2_v4l2m2m_decoder_deps="v4l2_m2m mpeg2_v4l2_m2m"
|
||||
@ -3066,11 +3108,11 @@ nvenc_hevc_encoder_select="hevc_nvenc_encoder"
|
||||
vc1_crystalhd_decoder_select="crystalhd"
|
||||
vc1_cuvid_decoder_deps="cuvid"
|
||||
vc1_mmal_decoder_deps="mmal"
|
||||
vc1_qsv_decoder_select="qsvdec vc1_parser"
|
||||
vc1_qsv_decoder_select="qsvdec"
|
||||
vc1_v4l2m2m_decoder_deps="v4l2_m2m vc1_v4l2_m2m"
|
||||
vp8_cuvid_decoder_deps="cuvid"
|
||||
vp8_mediacodec_decoder_deps="mediacodec"
|
||||
vp8_qsv_decoder_select="qsvdec vp8_parser"
|
||||
vp8_qsv_decoder_select="qsvdec"
|
||||
vp8_rkmpp_decoder_deps="rkmpp"
|
||||
vp8_vaapi_encoder_deps="VAEncPictureParameterBufferVP8"
|
||||
vp8_vaapi_encoder_select="vaapi_encode"
|
||||
@ -3078,9 +3120,12 @@ vp8_v4l2m2m_decoder_deps="v4l2_m2m vp8_v4l2_m2m"
|
||||
vp8_v4l2m2m_encoder_deps="v4l2_m2m vp8_v4l2_m2m"
|
||||
vp9_cuvid_decoder_deps="cuvid"
|
||||
vp9_mediacodec_decoder_deps="mediacodec"
|
||||
vp9_qsv_decoder_select="qsvdec"
|
||||
vp9_rkmpp_decoder_deps="rkmpp"
|
||||
vp9_vaapi_encoder_deps="VAEncPictureParameterBufferVP9"
|
||||
vp9_vaapi_encoder_select="vaapi_encode"
|
||||
vp9_qsv_encoder_deps="libmfx MFX_CODEC_VP9"
|
||||
vp9_qsv_encoder_select="qsvenc"
|
||||
vp9_v4l2m2m_decoder_deps="v4l2_m2m vp9_v4l2_m2m"
|
||||
wmv3_crystalhd_decoder_select="crystalhd"
|
||||
|
||||
@ -3096,6 +3141,7 @@ vc1_parser_select="vc1dsp"
|
||||
|
||||
# bitstream_filters
|
||||
aac_adtstoasc_bsf_select="adts_header"
|
||||
av1_frame_merge_bsf_select="cbs_av1"
|
||||
av1_frame_split_bsf_select="cbs_av1"
|
||||
av1_metadata_bsf_select="cbs_av1"
|
||||
eac3_core_bsf_select="ac3_parser"
|
||||
@ -3132,6 +3178,7 @@ mp2_at_decoder_select="mpegaudioheader"
|
||||
mp3_at_decoder_select="mpegaudioheader"
|
||||
pcm_alaw_at_decoder_deps="audiotoolbox"
|
||||
pcm_mulaw_at_decoder_deps="audiotoolbox"
|
||||
qdmc_decoder_select="fft"
|
||||
qdmc_at_decoder_deps="audiotoolbox"
|
||||
qdm2_at_decoder_deps="audiotoolbox"
|
||||
aac_at_encoder_deps="audiotoolbox"
|
||||
@ -3185,6 +3232,8 @@ libopenmpt_demuxer_deps="libopenmpt"
|
||||
libopus_decoder_deps="libopus"
|
||||
libopus_encoder_deps="libopus"
|
||||
libopus_encoder_select="audio_frame_queue"
|
||||
librav1e_encoder_deps="librav1e"
|
||||
librav1e_encoder_select="extract_extradata_bsf"
|
||||
librsvg_decoder_deps="librsvg"
|
||||
libshine_encoder_deps="libshine"
|
||||
libshine_encoder_select="audio_frame_queue"
|
||||
@ -3221,11 +3270,13 @@ videotoolbox_encoder_deps="videotoolbox VTCompressionSessionPrepareToEncodeFrame
|
||||
|
||||
# demuxers / muxers
|
||||
ac3_demuxer_select="ac3_parser"
|
||||
act_demuxer_select="riffdec"
|
||||
aiff_muxer_select="iso_media"
|
||||
asf_demuxer_select="riffdec"
|
||||
asf_o_demuxer_select="riffdec"
|
||||
asf_muxer_select="riffenc"
|
||||
asf_stream_muxer_select="asf_muxer"
|
||||
av1_demuxer_select="av1_frame_merge_bsf av1_parser"
|
||||
avi_demuxer_select="iso_media riffdec exif"
|
||||
avi_muxer_select="riffenc"
|
||||
caf_demuxer_select="iso_media riffdec"
|
||||
@ -3242,6 +3293,8 @@ eac3_demuxer_select="ac3_parser"
|
||||
f4v_muxer_select="mov_muxer"
|
||||
fifo_muxer_deps="threads"
|
||||
flac_demuxer_select="flac_parser"
|
||||
flv_muxer_select="aac_adtstoasc_bsf"
|
||||
gxf_muxer_select="pcm_rechunk_bsf"
|
||||
hds_muxer_select="flv_muxer"
|
||||
hls_muxer_select="mpegts_muxer"
|
||||
hls_muxer_suggest="gcrypt openssl"
|
||||
@ -3250,20 +3303,23 @@ image2_brender_pix_demuxer_select="image2_demuxer"
|
||||
ipod_muxer_select="mov_muxer"
|
||||
ismv_muxer_select="mov_muxer"
|
||||
ivf_muxer_select="av1_metadata_bsf vp9_superframe_bsf"
|
||||
latm_muxer_select="aac_adtstoasc_bsf"
|
||||
matroska_audio_muxer_select="matroska_muxer"
|
||||
matroska_demuxer_select="iso_media riffdec"
|
||||
matroska_demuxer_suggest="bzlib lzo zlib"
|
||||
matroska_muxer_select="iso_media riffenc"
|
||||
matroska_muxer_select="iso_media riffenc vp9_superframe_bsf aac_adtstoasc_bsf"
|
||||
mlp_demuxer_select="mlp_parser"
|
||||
mmf_muxer_select="riffenc"
|
||||
mov_demuxer_select="iso_media riffdec"
|
||||
mov_demuxer_suggest="zlib"
|
||||
mov_muxer_select="iso_media riffenc rtpenc_chain"
|
||||
mov_muxer_select="iso_media riffenc rtpenc_chain vp9_superframe_bsf aac_adtstoasc_bsf"
|
||||
mp3_demuxer_select="mpegaudio_parser"
|
||||
mp3_muxer_select="mpegaudioheader"
|
||||
mp4_muxer_select="mov_muxer"
|
||||
mpegts_demuxer_select="iso_media"
|
||||
mpegts_muxer_select="adts_muxer latm_muxer"
|
||||
mpegts_muxer_select="adts_muxer latm_muxer h264_mp4toannexb_bsf hevc_mp4toannexb_bsf"
|
||||
mpegtsraw_demuxer_select="mpegts_demuxer"
|
||||
mxf_muxer_select="golomb pcm_rechunk_bsf"
|
||||
mxf_d10_muxer_select="mxf_muxer"
|
||||
mxf_opatom_muxer_select="mxf_muxer"
|
||||
nut_muxer_select="riffenc"
|
||||
@ -3274,7 +3330,7 @@ ogv_muxer_select="ogg_muxer"
|
||||
opus_muxer_select="ogg_muxer"
|
||||
psp_muxer_select="mov_muxer"
|
||||
rtp_demuxer_select="sdp_demuxer"
|
||||
rtp_muxer_select="golomb"
|
||||
rtp_muxer_select="golomb jpegtables"
|
||||
rtpdec_select="asf_demuxer jpegtables mov_demuxer mpegts_demuxer rm_demuxer rtp_protocol srtp"
|
||||
rtsp_demuxer_select="http_protocol rtpdec"
|
||||
rtsp_muxer_select="rtp_muxer http_protocol rtp_protocol rtpenc_chain"
|
||||
@ -3287,6 +3343,7 @@ spdif_muxer_select="adts_header"
|
||||
spx_muxer_select="ogg_muxer"
|
||||
swf_demuxer_suggest="zlib"
|
||||
tak_demuxer_select="tak_parser"
|
||||
truehd_demuxer_select="mlp_parser"
|
||||
tg2_muxer_select="mov_muxer"
|
||||
tgp_muxer_select="mov_muxer"
|
||||
vobsub_demuxer_select="mpegps_demuxer"
|
||||
@ -3294,6 +3351,7 @@ w64_demuxer_select="wav_demuxer"
|
||||
w64_muxer_select="wav_muxer"
|
||||
wav_demuxer_select="riffdec"
|
||||
wav_muxer_select="riffenc"
|
||||
webm_chunk_muxer_select="webm_muxer"
|
||||
webm_muxer_select="iso_media riffenc"
|
||||
webm_dash_manifest_demuxer_select="matroska_demuxer"
|
||||
wtv_demuxer_select="mpegts_demuxer riffdec"
|
||||
@ -3398,6 +3456,8 @@ unix_protocol_deps="sys_un_h"
|
||||
unix_protocol_select="network"
|
||||
|
||||
# external library protocols
|
||||
libamqp_protocol_deps="librabbitmq"
|
||||
libamqp_protocol_select="network"
|
||||
librtmp_protocol_deps="librtmp"
|
||||
librtmpe_protocol_deps="librtmp"
|
||||
librtmps_protocol_deps="librtmp"
|
||||
@ -3408,6 +3468,8 @@ libsrt_protocol_deps="libsrt"
|
||||
libsrt_protocol_select="network"
|
||||
libssh_protocol_deps="libssh"
|
||||
libtls_conflict="openssl gnutls mbedtls"
|
||||
libzmq_protocol_deps="libzmq"
|
||||
libzmq_protocol_select="network"
|
||||
|
||||
# filters
|
||||
afftdn_filter_deps="avcodec"
|
||||
@ -3415,7 +3477,7 @@ afftdn_filter_select="fft"
|
||||
afftfilt_filter_deps="avcodec"
|
||||
afftfilt_filter_select="fft"
|
||||
afir_filter_deps="avcodec"
|
||||
afir_filter_select="fft"
|
||||
afir_filter_select="rdft"
|
||||
amovie_filter_deps="avcodec avformat"
|
||||
aresample_filter_deps="swresample"
|
||||
asr_filter_deps="pocketsphinx"
|
||||
@ -3423,6 +3485,7 @@ ass_filter_deps="libass"
|
||||
atempo_filter_deps="avcodec"
|
||||
atempo_filter_select="rdft"
|
||||
avgblur_opencl_filter_deps="opencl"
|
||||
avgblur_vulkan_filter_deps="vulkan libglslang"
|
||||
azmq_filter_deps="libzmq"
|
||||
blackframe_filter_deps="gpl"
|
||||
bm3d_filter_deps="avcodec"
|
||||
@ -3430,6 +3493,7 @@ bm3d_filter_select="dct"
|
||||
boxblur_filter_deps="gpl"
|
||||
boxblur_opencl_filter_deps="opencl gpl"
|
||||
bs2b_filter_deps="libbs2b"
|
||||
chromaber_vulkan_filter_deps="vulkan libglslang"
|
||||
colorkey_opencl_filter_deps="opencl"
|
||||
colormatrix_filter_deps="gpl"
|
||||
convolution_opencl_filter_deps="opencl"
|
||||
@ -3449,7 +3513,10 @@ delogo_filter_deps="gpl"
|
||||
denoise_vaapi_filter_deps="vaapi"
|
||||
derain_filter_select="dnn"
|
||||
deshake_filter_select="pixelutils"
|
||||
deshake_opencl_filter_deps="opencl"
|
||||
dilation_opencl_filter_deps="opencl"
|
||||
dnn_processing_filter_deps="swscale"
|
||||
dnn_processing_filter_select="dnn"
|
||||
drawtext_filter_deps="libfreetype"
|
||||
drawtext_filter_suggest="libfontconfig libfribidi"
|
||||
elbg_filter_deps="avcodec"
|
||||
@ -3468,7 +3535,7 @@ freezedetect_filter_select="scene_sad"
|
||||
frei0r_filter_deps="frei0r libdl"
|
||||
frei0r_src_filter_deps="frei0r libdl"
|
||||
fspp_filter_deps="gpl"
|
||||
geq_filter_deps="gpl"
|
||||
headphone_filter_select="fft"
|
||||
histeq_filter_deps="gpl"
|
||||
hqdn3d_filter_deps="gpl"
|
||||
interlace_filter_deps="gpl"
|
||||
@ -3491,7 +3558,9 @@ openclsrc_filter_deps="opencl"
|
||||
overlay_opencl_filter_deps="opencl"
|
||||
overlay_qsv_filter_deps="libmfx"
|
||||
overlay_qsv_filter_select="qsvvpp"
|
||||
overlay_vulkan_filter_deps="vulkan libglslang"
|
||||
owdenoise_filter_deps="gpl"
|
||||
pad_opencl_filter_deps="opencl"
|
||||
pan_filter_deps="swresample"
|
||||
perspective_filter_deps="gpl"
|
||||
phase_filter_deps="gpl"
|
||||
@ -3510,6 +3579,7 @@ sab_filter_deps="gpl swscale"
|
||||
scale2ref_filter_deps="swscale"
|
||||
scale_filter_deps="swscale"
|
||||
scale_qsv_filter_deps="libmfx"
|
||||
scdet_filter_select="scene_sad"
|
||||
select_filter_select="scene_sad"
|
||||
sharpness_vaapi_filter_deps="vaapi"
|
||||
showcqt_filter_deps="avcodec avformat swscale"
|
||||
@ -3517,11 +3587,13 @@ showcqt_filter_suggest="libfontconfig libfreetype"
|
||||
showcqt_filter_select="fft"
|
||||
showfreqs_filter_deps="avcodec"
|
||||
showfreqs_filter_select="fft"
|
||||
showspatial_filter_select="fft"
|
||||
showspectrum_filter_deps="avcodec"
|
||||
showspectrum_filter_select="fft"
|
||||
showspectrumpic_filter_deps="avcodec"
|
||||
showspectrumpic_filter_select="fft"
|
||||
signature_filter_deps="gpl avcodec avformat"
|
||||
sinc_filter_select="rdft"
|
||||
smartblur_filter_deps="gpl swscale"
|
||||
sobel_opencl_filter_deps="opencl"
|
||||
sofalizer_filter_deps="libmysofa avcodec"
|
||||
@ -3536,10 +3608,13 @@ stereo3d_filter_deps="gpl"
|
||||
subtitles_filter_deps="avformat avcodec libass"
|
||||
super2xsai_filter_deps="gpl"
|
||||
pixfmts_super2xsai_test_deps="super2xsai_filter"
|
||||
superequalizer_filter_select="rdft"
|
||||
surround_filter_select="rdft"
|
||||
tinterlace_filter_deps="gpl"
|
||||
tinterlace_merge_test_deps="tinterlace_filter"
|
||||
tinterlace_pad_test_deps="tinterlace_filter"
|
||||
tonemap_filter_deps="const_nan"
|
||||
tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping"
|
||||
tonemap_opencl_filter_deps="opencl const_nan"
|
||||
transpose_opencl_filter_deps="opencl"
|
||||
transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags"
|
||||
@ -3553,13 +3628,15 @@ zmq_filter_deps="libzmq"
|
||||
zoompan_filter_deps="swscale"
|
||||
zscale_filter_deps="libzimg const_nan"
|
||||
scale_vaapi_filter_deps="vaapi"
|
||||
scale_vulkan_filter_deps="vulkan libglslang"
|
||||
vpp_qsv_filter_deps="libmfx"
|
||||
vpp_qsv_filter_select="qsvvpp"
|
||||
xfade_opencl_filter_deps="opencl"
|
||||
yadif_cuda_filter_deps="ffnvcodec"
|
||||
yadif_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
|
||||
|
||||
# examples
|
||||
avio_dir_cmd_deps="avformat avutil"
|
||||
avio_list_dir_deps="avformat avutil"
|
||||
avio_reading_deps="avformat avcodec avutil"
|
||||
decode_audio_example_deps="avcodec avutil"
|
||||
decode_video_example_deps="avcodec avutil"
|
||||
@ -3599,7 +3676,7 @@ avformat_deps="avcodec avutil"
|
||||
avformat_suggest="libm network zlib"
|
||||
avresample_deps="avutil"
|
||||
avresample_suggest="libm"
|
||||
avutil_suggest="clock_gettime ffnvcodec libm libdrm libmfx opencl user32 vaapi videotoolbox corefoundation corevideo coremedia bcrypt"
|
||||
avutil_suggest="clock_gettime ffnvcodec libm libdrm libmfx opencl user32 vaapi vulkan videotoolbox corefoundation corevideo coremedia bcrypt"
|
||||
postproc_deps="avutil gpl"
|
||||
postproc_suggest="libm"
|
||||
swresample_deps="avutil"
|
||||
@ -3688,6 +3765,7 @@ enable asm
|
||||
enable debug
|
||||
enable doc
|
||||
enable faan faandct faanidct
|
||||
enable large_tests
|
||||
enable optimizations
|
||||
enable runtime_cpudetect
|
||||
enable safe_bitstream_reader
|
||||
@ -4363,7 +4441,7 @@ msvc_common_flags(){
|
||||
# generic catch all at the bottom will print the original flag.
|
||||
-Wall) ;;
|
||||
-Wextra) ;;
|
||||
-std=c99) ;;
|
||||
-std=c*) ;;
|
||||
# Common flags
|
||||
-fomit-frame-pointer) ;;
|
||||
-g) echo -Z7 ;;
|
||||
@ -4379,6 +4457,7 @@ msvc_common_flags(){
|
||||
-l*) echo ${flag#-l}.lib ;;
|
||||
-LARGEADDRESSAWARE) echo $flag ;;
|
||||
-L*) echo -libpath:${flag#-L} ;;
|
||||
-Wl,*) ;;
|
||||
*) echo $flag ;;
|
||||
esac
|
||||
done
|
||||
@ -4606,7 +4685,11 @@ probe_cc(){
|
||||
_ld_path='-libpath:'
|
||||
elif $_cc -nologo- 2>&1 | grep -q Microsoft || { $_cc -v 2>&1 | grep -q clang && $_cc -? > /dev/null 2>&1; }; then
|
||||
_type=msvc
|
||||
_ident=$($_cc 2>&1 | head -n1 | tr -d '\r')
|
||||
if $_cc -nologo- 2>&1 | grep -q Microsoft; then
|
||||
_ident=$($_cc 2>&1 | head -n1 | tr -d '\r')
|
||||
else
|
||||
_ident=$($_cc --version 2>/dev/null | head -n1 | tr -d '\r')
|
||||
fi
|
||||
_DEPCMD='$(DEP$(1)) $(DEP$(1)FLAGS) $($(1)DEP_FLAGS) $< 2>&1 | awk '\''/including/ { sub(/^.*file: */, ""); gsub(/\\/, "/"); if (!match($$0, / /)) print "$@:", $$0 }'\'' > $(@:.o=.d)'
|
||||
_DEPFLAGS='$(CPPFLAGS) $(CFLAGS) -showIncludes -Zs'
|
||||
_cflags_speed="-O2"
|
||||
@ -4734,7 +4817,7 @@ fi
|
||||
|
||||
if test "$cpu" = host; then
|
||||
enabled cross_compile &&
|
||||
die "--cpu=host makes no sense when cross-compiling."
|
||||
warn "--cpu=host makes no sense when cross-compiling."
|
||||
|
||||
case "$cc_type" in
|
||||
gcc|llvm_gcc)
|
||||
@ -5253,6 +5336,7 @@ case $target_os in
|
||||
;;
|
||||
openbsd|bitrig)
|
||||
disable symver
|
||||
enable section_data_rel_ro
|
||||
striptype=""
|
||||
SHFLAGS='-shared'
|
||||
SLIB_INSTALL_NAME='$(SLIBNAME).$(LIBMAJOR).$(LIBMINOR)'
|
||||
@ -5291,6 +5375,11 @@ case $target_os in
|
||||
fi
|
||||
version_script='-exported_symbols_list'
|
||||
VERSION_SCRIPT_POSTPROCESS_CMD='tr " " "\n" | sed -n /global:/,/local:/p | grep ";" | tr ";" "\n" | sed -E "s/(.+)/_\1/g" | sed -E "s/(.+[^*])$$$$/\1*/"'
|
||||
# Workaround for Xcode 11 -fstack-check bug
|
||||
if enabled clang; then
|
||||
clang_version=$($cc -dumpversion)
|
||||
test ${clang_version%%.*} -eq 11 && add_cflags -fno-stack-check
|
||||
fi
|
||||
;;
|
||||
msys*)
|
||||
die "Native MSYS builds are discouraged, please use the MINGW environment."
|
||||
@ -5883,10 +5972,10 @@ EOF
|
||||
elf*) enabled debug && append X86ASMFLAGS $x86asm_debug ;;
|
||||
esac
|
||||
|
||||
check_x86asm avx512_external "vmovdqa32 [eax]{k1}{z}, zmm0"
|
||||
check_x86asm avx2_external "vextracti128 xmm0, ymm0, 0"
|
||||
check_x86asm xop_external "vpmacsdd xmm0, xmm1, xmm2, xmm3"
|
||||
check_x86asm fma4_external "vfmaddps ymm0, ymm1, ymm2, ymm3"
|
||||
enabled avx512 && check_x86asm avx512_external "vmovdqa32 [eax]{k1}{z}, zmm0"
|
||||
enabled avx2 && check_x86asm avx2_external "vextracti128 xmm0, ymm0, 0"
|
||||
enabled xop && check_x86asm xop_external "vpmacsdd xmm0, xmm1, xmm2, xmm3"
|
||||
enabled fma4 && check_x86asm fma4_external "vfmaddps ymm0, ymm1, ymm2, ymm3"
|
||||
check_x86asm cpunop "CPU amdnop"
|
||||
fi
|
||||
|
||||
@ -5996,14 +6085,17 @@ check_func_headers mach/mach_time.h mach_absolute_time
|
||||
check_func_headers stdlib.h getenv
|
||||
check_func_headers sys/stat.h lstat
|
||||
|
||||
check_func_headers windows.h GetModuleHandle
|
||||
check_func_headers windows.h GetProcessAffinityMask
|
||||
check_func_headers windows.h GetProcessTimes
|
||||
check_func_headers windows.h GetStdHandle
|
||||
check_func_headers windows.h GetSystemTimeAsFileTime
|
||||
check_func_headers windows.h LoadLibrary
|
||||
check_func_headers windows.h MapViewOfFile
|
||||
check_func_headers windows.h PeekNamedPipe
|
||||
check_func_headers windows.h SetConsoleTextAttribute
|
||||
check_func_headers windows.h SetConsoleCtrlHandler
|
||||
check_func_headers windows.h SetDllDirectory
|
||||
check_func_headers windows.h Sleep
|
||||
check_func_headers windows.h VirtualAlloc
|
||||
check_func_headers glob.h glob
|
||||
@ -6019,6 +6111,7 @@ check_headers io.h
|
||||
check_headers linux/perf_event.h
|
||||
check_headers libcrystalhd/libcrystalhd_if.h
|
||||
check_headers malloc.h
|
||||
check_headers mftransform.h
|
||||
check_headers net/udplite.h
|
||||
check_headers poll.h
|
||||
check_headers sys/param.h
|
||||
@ -6069,6 +6162,9 @@ enabled videotoolbox && {
|
||||
check_lib coreservices CoreServices/CoreServices.h UTGetOSTypeFromString "-framework CoreServices"
|
||||
check_func_headers CoreMedia/CMFormatDescription.h kCMVideoCodecType_HEVC "-framework CoreMedia"
|
||||
check_func_headers CoreVideo/CVPixelBuffer.h kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange "-framework CoreVideo"
|
||||
check_func_headers CoreVideo/CVImageBuffer.h kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ "-framework CoreVideo"
|
||||
check_func_headers CoreVideo/CVImageBuffer.h kCVImageBufferTransferFunction_ITU_R_2100_HLG "-framework CoreVideo"
|
||||
check_func_headers CoreVideo/CVImageBuffer.h kCVImageBufferTransferFunction_Linear "-framework CoreVideo"
|
||||
}
|
||||
|
||||
check_struct "sys/time.h sys/resource.h" "struct rusage" ru_maxrss
|
||||
@ -6078,8 +6174,10 @@ check_type "windows.h dxva.h" "DXVA_PicParams_VP9" -DWINAPI_FAMILY=WINAPI_FAMILY
|
||||
check_type "windows.h d3d11.h" "ID3D11VideoDecoder"
|
||||
check_type "windows.h d3d11.h" "ID3D11VideoContext"
|
||||
check_type "d3d9.h dxva2api.h" DXVA2_ConfigPictureDecode -D_WIN32_WINNT=0x0602
|
||||
check_func_headers mfapi.h MFCreateAlignedMemoryBuffer -lmfplat
|
||||
|
||||
check_type "vdpau/vdpau.h" "VdpPictureInfoHEVC"
|
||||
check_type "vdpau/vdpau.h" "VdpPictureInfoVP9"
|
||||
|
||||
if [ -z "$nvccflags" ]; then
|
||||
nvccflags=$nvccflags_default
|
||||
@ -6100,10 +6198,10 @@ fi
|
||||
|
||||
if ! disabled ffnvcodec; then
|
||||
ffnv_hdr_list="ffnvcodec/nvEncodeAPI.h ffnvcodec/dynlink_cuda.h ffnvcodec/dynlink_cuviddec.h ffnvcodec/dynlink_nvcuvid.h"
|
||||
check_pkg_config ffnvcodec "ffnvcodec >= 9.0.18.0" "$ffnv_hdr_list" "" || \
|
||||
check_pkg_config ffnvcodec "ffnvcodec >= 8.2.15.8 ffnvcodec < 8.3" "$ffnv_hdr_list" "" || \
|
||||
check_pkg_config ffnvcodec "ffnvcodec >= 8.1.24.9 ffnvcodec < 8.2" "$ffnv_hdr_list" "" || \
|
||||
check_pkg_config ffnvcodec "ffnvcodec >= 8.0.14.9 ffnvcodec < 8.1" "$ffnv_hdr_list" ""
|
||||
check_pkg_config ffnvcodec "ffnvcodec >= 9.1.23.1" "$ffnv_hdr_list" "" || \
|
||||
check_pkg_config ffnvcodec "ffnvcodec >= 9.0.18.3 ffnvcodec < 9.1" "$ffnv_hdr_list" "" || \
|
||||
check_pkg_config ffnvcodec "ffnvcodec >= 8.2.15.10 ffnvcodec < 8.3" "$ffnv_hdr_list" "" || \
|
||||
check_pkg_config ffnvcodec "ffnvcodec >= 8.1.24.11 ffnvcodec < 8.2" "$ffnv_hdr_list" ""
|
||||
fi
|
||||
|
||||
check_cpp_condition winrt windows.h "!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)"
|
||||
@ -6173,6 +6271,7 @@ for func in $COMPLEX_FUNCS; do
|
||||
done
|
||||
|
||||
# these are off by default, so fail if requested and not available
|
||||
enabled avisynth && require_headers "avisynth/avisynth_c.h"
|
||||
enabled cuda_nvcc && { check_nvcc cuda_nvcc || die "ERROR: failed checking for nvcc."; }
|
||||
enabled chromaprint && require chromaprint chromaprint.h chromaprint_get_version -lchromaprint
|
||||
enabled decklink && { require_headers DeckLinkAPI.h &&
|
||||
@ -6196,7 +6295,7 @@ enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
||||
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
|
||||
enabled libcaca && require_pkg_config libcaca caca caca.h caca_create_canvas
|
||||
enabled libcodec2 && require libcodec2 codec2/codec2.h codec2_create -lcodec2
|
||||
enabled libdav1d && require_pkg_config libdav1d "dav1d >= 0.2.1" "dav1d/dav1d.h" dav1d_version
|
||||
enabled libdav1d && require_pkg_config libdav1d "dav1d >= 0.4.0" "dav1d/dav1d.h" dav1d_version
|
||||
enabled libdavs2 && require_pkg_config libdavs2 "davs2 >= 1.6.0" davs2.h davs2_decoder_open
|
||||
enabled libdc1394 && require_pkg_config libdc1394 libdc1394-2 dc1394/dc1394.h dc1394_new
|
||||
enabled libdrm && require_pkg_config libdrm libdrm xf86drm.h drmGetVersion
|
||||
@ -6209,6 +6308,7 @@ enabled fontconfig && enable libfontconfig
|
||||
enabled libfontconfig && require_pkg_config libfontconfig fontconfig "fontconfig/fontconfig.h" FcInit
|
||||
enabled libfreetype && require_pkg_config libfreetype freetype2 "ft2build.h FT_FREETYPE_H" FT_Init_FreeType
|
||||
enabled libfribidi && require_pkg_config libfribidi fribidi fribidi.h fribidi_version_info
|
||||
enabled libglslang && require_cpp libglslang glslang/SPIRV/GlslangToSpv.h "glslang::TIntermediate*" -lglslang -lOSDependent -lHLSL -lOGLCompiler -lSPVRemapper -lSPIRV -lSPIRV-Tools-opt -lSPIRV-Tools -lpthread -lstdc++
|
||||
enabled libgme && { check_pkg_config libgme libgme gme/gme.h gme_new_emu ||
|
||||
require libgme gme/gme.h gme_new_emu -lgme -lstdc++; }
|
||||
enabled libgsm && { for gsm_hdr in "gsm.h" "gsm/gsm.h"; do
|
||||
@ -6226,10 +6326,14 @@ enabled liblensfun && require_pkg_config liblensfun lensfun lensfun.h lf_
|
||||
# can find the libraries and headers through other means.
|
||||
enabled libmfx && { check_pkg_config libmfx libmfx "mfx/mfxvideo.h" MFXInit ||
|
||||
{ require libmfx "mfx/mfxvideo.h" MFXInit "-llibmfx $advapi32_extralibs" && warn "using libmfx without pkg-config"; } }
|
||||
if enabled libmfx; then
|
||||
check_cc MFX_CODEC_VP9 "mfx/mfxvp9.h mfx/mfxstructures.h" "MFX_CODEC_VP9"
|
||||
fi
|
||||
|
||||
enabled libmodplug && require_pkg_config libmodplug libmodplug libmodplug/modplug.h ModPlug_Load
|
||||
enabled libmp3lame && require "libmp3lame >= 3.98.3" lame/lame.h lame_set_VBR_quality -lmp3lame $libm_extralibs
|
||||
enabled libmysofa && { check_pkg_config libmysofa libmysofa mysofa.h mysofa_load ||
|
||||
require libmysofa mysofa.h mysofa_load -lmysofa $zlib_extralibs; }
|
||||
enabled libmysofa && { check_pkg_config libmysofa libmysofa mysofa.h mysofa_neighborhood_init_withstepdefine ||
|
||||
require libmysofa mysofa.h mysofa_neighborhood_init_withstepdefine -lmysofa $zlib_extralibs; }
|
||||
enabled libnpp && { check_lib libnpp npp.h nppGetLibVersion -lnppig -lnppicc -lnppc -lnppidei ||
|
||||
check_lib libnpp npp.h nppGetLibVersion -lnppi -lnppc -lnppidei ||
|
||||
die "ERROR: libnpp not found"; }
|
||||
@ -6252,6 +6356,8 @@ enabled libopus && {
|
||||
}
|
||||
}
|
||||
enabled libpulse && require_pkg_config libpulse libpulse pulse/pulseaudio.h pa_context_new
|
||||
enabled librabbitmq && require_pkg_config librabbitmq "librabbitmq >= 0.7.1" amqp.h amqp_new_connection
|
||||
enabled librav1e && require_pkg_config librav1e "rav1e >= 0.1.0" rav1e.h rav1e_context_new
|
||||
enabled librsvg && require_pkg_config librsvg librsvg-2.0 librsvg-2.0/librsvg/rsvg.h rsvg_handle_render_cairo
|
||||
enabled librtmp && require_pkg_config librtmp librtmp librtmp/rtmp.h RTMP_Socket
|
||||
enabled librubberband && require_pkg_config librubberband "rubberband >= 1.8.1" rubberband/rubberband-c.h rubberband_new -lstdc++ && append librubberband_extralibs "-lstdc++"
|
||||
@ -6309,12 +6415,12 @@ enabled libx264 && { check_pkg_config libx264 x264 "stdint.h x264.h" x
|
||||
require_cpp_condition libx264 x264.h "X264_BUILD >= 118" &&
|
||||
check_cpp_condition libx262 x264.h "X264_MPEG2"
|
||||
enabled libx265 && require_pkg_config libx265 x265 x265.h x265_api_get &&
|
||||
require_cpp_condition libx265 x265.h "X265_BUILD >= 68"
|
||||
require_cpp_condition libx265 x265.h "X265_BUILD >= 70"
|
||||
enabled libxavs && require libxavs "stdint.h xavs.h" xavs_encoder_encode "-lxavs $pthreads_extralibs $libm_extralibs"
|
||||
enabled libxavs2 && require_pkg_config libxavs2 "xavs2 >= 1.3.0" "stdint.h xavs2.h" xavs2_api_get
|
||||
enabled libxvid && require libxvid xvid.h xvid_global -lxvidcore
|
||||
enabled libzimg && require_pkg_config libzimg "zimg >= 2.7.0" zimg.h zimg_get_api_version
|
||||
enabled libzmq && require_pkg_config libzmq libzmq zmq.h zmq_ctx_new
|
||||
enabled libzmq && require_pkg_config libzmq "libzmq >= 4.2.1" zmq.h zmq_ctx_new
|
||||
enabled libzvbi && require_pkg_config libzvbi zvbi-0.2 libzvbi.h vbi_decoder_new &&
|
||||
{ test_cpp_condition libzvbi.h "VBI_VERSION_MAJOR > 0 || VBI_VERSION_MINOR > 2 || VBI_VERSION_MINOR == 2 && VBI_VERSION_MICRO >= 28" ||
|
||||
enabled gpl || die "ERROR: libzvbi requires version 0.2.28 or --enable-gpl."; }
|
||||
@ -6349,12 +6455,16 @@ enabled opengl && { check_lib opengl GL/glx.h glXGetProcAddress "-lGL
|
||||
check_lib opengl ES2/gl.h glGetError "-isysroot=${sysroot} -Wl,-framework,OpenGLES" ||
|
||||
die "ERROR: opengl not found."
|
||||
}
|
||||
enabled omx_rpi && { test_code cc OMX_Core.h OMX_IndexConfigBrcmVideoRequestIFrame ||
|
||||
{ ! enabled cross_compile &&
|
||||
add_cflags -isystem/opt/vc/include/IL &&
|
||||
test_code cc OMX_Core.h OMX_IndexConfigBrcmVideoRequestIFrame; } ||
|
||||
die "ERROR: OpenMAX IL headers from raspberrypi/firmware not found"; } &&
|
||||
enable omx
|
||||
enabled omx && require_headers OMX_Core.h
|
||||
enabled omx_rpi && { check_headers OMX_Core.h ||
|
||||
{ ! enabled cross_compile && add_cflags -isystem/opt/vc/include/IL && check_headers OMX_Core.h ; } ||
|
||||
die "ERROR: OpenMAX IL headers not found"; } && enable omx
|
||||
enabled openssl && { check_pkg_config openssl openssl openssl/ssl.h OPENSSL_init_ssl ||
|
||||
check_pkg_config openssl openssl openssl/ssl.h SSL_library_init ||
|
||||
check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto ||
|
||||
check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||
|
||||
check_lib openssl openssl/ssl.h SSL_library_init -lssl32 -leay32 ||
|
||||
check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 ||
|
||||
@ -6430,19 +6540,21 @@ pod2man --help > /dev/null 2>&1 && enable pod2man || disable pod2man
|
||||
rsync --help 2> /dev/null | grep -q 'contimeout' && enable rsync_contimeout || disable rsync_contimeout
|
||||
|
||||
# check V4L2 codecs available in the API
|
||||
check_headers linux/fb.h
|
||||
check_headers linux/videodev2.h
|
||||
test_code cc linux/videodev2.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete
|
||||
check_cc v4l2_m2m linux/videodev2.h "int i = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_VIDEO_M2M | V4L2_BUF_FLAG_LAST;"
|
||||
check_cc vc1_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VC1_ANNEX_G;"
|
||||
check_cc mpeg1_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG1;"
|
||||
check_cc mpeg2_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2;"
|
||||
check_cc mpeg4_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG4;"
|
||||
check_cc hevc_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC;"
|
||||
check_cc h263_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_H263;"
|
||||
check_cc h264_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_H264;"
|
||||
check_cc vp8_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VP8;"
|
||||
check_cc vp9_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VP9;"
|
||||
if enabled v4l2_m2m; then
|
||||
check_headers linux/fb.h
|
||||
check_headers linux/videodev2.h
|
||||
test_code cc linux/videodev2.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete
|
||||
check_cc v4l2_m2m linux/videodev2.h "int i = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_VIDEO_M2M | V4L2_BUF_FLAG_LAST;"
|
||||
check_cc vc1_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VC1_ANNEX_G;"
|
||||
check_cc mpeg1_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG1;"
|
||||
check_cc mpeg2_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2;"
|
||||
check_cc mpeg4_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG4;"
|
||||
check_cc hevc_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC;"
|
||||
check_cc h263_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_H263;"
|
||||
check_cc h264_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_H264;"
|
||||
check_cc vp8_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VP8;"
|
||||
check_cc vp9_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VP9;"
|
||||
fi
|
||||
|
||||
check_headers sys/videoio.h
|
||||
test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete
|
||||
@ -6470,8 +6582,8 @@ else
|
||||
EOF
|
||||
fi
|
||||
|
||||
enabled alsa && check_pkg_config alsa alsa "alsa/asoundlib.h" snd_pcm_htimestamp ||
|
||||
check_lib alsa alsa/asoundlib.h snd_pcm_htimestamp -lasound
|
||||
enabled alsa && { check_pkg_config alsa alsa "alsa/asoundlib.h" snd_pcm_htimestamp ||
|
||||
check_lib alsa alsa/asoundlib.h snd_pcm_htimestamp -lasound; }
|
||||
|
||||
enabled libjack &&
|
||||
require_pkg_config libjack jack jack/jack.h jack_port_get_latency_range
|
||||
@ -6526,6 +6638,7 @@ if enabled vaapi; then
|
||||
|
||||
check_type "va/va.h va/va_dec_hevc.h" "VAPictureParameterBufferHEVC"
|
||||
check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth
|
||||
check_type "va/va.h va/va_vpp.h" "VAProcFilterParameterBufferHDRToneMapping"
|
||||
check_struct "va/va.h va/va_vpp.h" "VAProcPipelineCaps" rotation_flags
|
||||
check_type "va/va.h va/va_enc_hevc.h" "VAEncPictureParameterBufferHEVC"
|
||||
check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
|
||||
@ -6567,6 +6680,9 @@ enabled vdpau &&
|
||||
|
||||
enabled crystalhd && check_lib crystalhd "stdint.h libcrystalhd/libcrystalhd_if.h" DtsCrystalHDVersion -lcrystalhd
|
||||
|
||||
enabled vulkan &&
|
||||
require_pkg_config vulkan "vulkan >= 1.1.97" "vulkan/vulkan.h" vkCreateInstance
|
||||
|
||||
if enabled x86; then
|
||||
case $target_os in
|
||||
mingw32*|mingw64*|win32|win64|linux|cygwin*)
|
||||
@ -6575,7 +6691,7 @@ if enabled x86; then
|
||||
disable ffnvcodec cuvid nvdec nvenc
|
||||
;;
|
||||
esac
|
||||
elif enabled ppc64 && ! enabled bigendian; then
|
||||
elif enabled_any aarch64 ppc64 && ! enabled bigendian; then
|
||||
case $target_os in
|
||||
linux)
|
||||
;;
|
||||
@ -6599,7 +6715,7 @@ EOF
|
||||
|
||||
enabled amf &&
|
||||
check_cpp_condition amf "AMF/core/Version.h" \
|
||||
"(AMF_VERSION_MAJOR << 48 | AMF_VERSION_MINOR << 32 | AMF_VERSION_RELEASE << 16 | AMF_VERSION_BUILD_NUM) >= 0x0001000400040001"
|
||||
"(AMF_VERSION_MAJOR << 48 | AMF_VERSION_MINOR << 32 | AMF_VERSION_RELEASE << 16 | AMF_VERSION_BUILD_NUM) >= 0x0001000400090000"
|
||||
|
||||
# Funny iconv installations are not unusual, so check it after all flags have been set
|
||||
if enabled libc_iconv; then
|
||||
@ -7397,7 +7513,7 @@ cat > $TMPH <<EOF
|
||||
#define FFMPEG_CONFIG_H
|
||||
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
|
||||
#define FFMPEG_LICENSE "$(c_escape $license)"
|
||||
#define CONFIG_THIS_YEAR 2019
|
||||
#define CONFIG_THIS_YEAR 2020
|
||||
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
|
||||
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
|
||||
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
|
||||
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
jellyfin-ffmpeg (4.3.1-1) unstable; urgency=medium
|
||||
|
||||
* New upstream version 4.3.1
|
||||
|
||||
-- nyanmisaka <nst799610810@gmail.com> Wed, 15 Jul 2020 18:24:21 +0800
|
||||
|
||||
jellyfin-ffmpeg (4.2.1-7) unstable; urgency=medium
|
||||
|
||||
* Integrate free libva and intel-vaapi-driver with MIT license in the deb
|
||||
|
2
debian/control
vendored
2
debian/control
vendored
@ -5,7 +5,7 @@ Maintainer: Jellyfin Packaging Team <packaging@jellyfin.org>
|
||||
Uploaders: Jellyfin Packaging Team <packaging@jellyfin.org>
|
||||
Rules-Requires-Root: no
|
||||
Homepage: https://ffmpeg.org/
|
||||
Standards-Version: 4.2.1
|
||||
Standards-Version: 4.3.1
|
||||
Vcs-Git: https://github.com/jellyfin/jellyfin-ffmpeg.git
|
||||
Vcs-Browser: https://github.com/jellyfin/jellyfin-ffmpeg
|
||||
Build-Depends:
|
||||
|
9
doc/.gitignore
vendored
Normal file
9
doc/.gitignore
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
/*.1
|
||||
/*.3
|
||||
/*.html
|
||||
/*.pod
|
||||
/config.texi
|
||||
/avoptions_codec.texi
|
||||
/avoptions_format.texi
|
||||
/fate.txt
|
||||
/print_options
|
117
doc/APIchanges
117
doc/APIchanges
@ -15,6 +15,123 @@ libavutil: 2017-10-21
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2020-06-05 - ec39c2276a - lavu 56.50.100 - buffer.h
|
||||
Passing NULL as alloc argument to av_buffer_pool_init2() is now allowed.
|
||||
|
||||
2020-05-27 - ba6cada92e - lavc 58.88.100 - avcodec.h codec.h
|
||||
Move AVCodec-related public API to new header codec.h.
|
||||
|
||||
2020-05-23 - 064b875e89 - lavu 56.49.100 - video_enc_params.h
|
||||
Add AV_VIDEO_ENC_PARAMS_H264.
|
||||
|
||||
2020-05-23 - 2e08b39444 - lavu 56.48.100 - hwcontext.h
|
||||
Add av_hwdevice_ctx_create_derived_opts.
|
||||
|
||||
2020-05-23 - 6b65c4ec54 - lavu 56.47.100 - rational.h
|
||||
Add av_gcd_q().
|
||||
|
||||
2020-05-22 - af9e622776 - lavu 56.46.101 - opt.h
|
||||
Add AV_OPT_FLAG_CHILD_CONSTS.
|
||||
|
||||
2020-05-22 - 9d443c3e68 - lavc 58.87.100 - avcodec.h codec_par.h
|
||||
Move AVBitstreamFilter-related public API to new header bsf.h.
|
||||
Move AVCodecParameters-related public API to new header codec_par.h.
|
||||
|
||||
2020-05-21 - 13b1bbff0b - lavc 58.86.101 - avcodec.h
|
||||
Deprecated AV_CODEC_CAP_INTRA_ONLY and AV_CODEC_CAP_LOSSLESS.
|
||||
|
||||
2020-05-17 - 84af196c65 - lavu 56.46.100 - common.h
|
||||
Add av_sat_add64() and av_sat_sub64()
|
||||
|
||||
2020-05-12 - 991d417692 - lavu 56.45.100 - video_enc_params.h
|
||||
lavc 58.84.100 - avcodec.h
|
||||
Add a new API for exporting video encoding information.
|
||||
Replaces the deprecated API for exporting QP tables from decoders.
|
||||
Add AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS to request this information from
|
||||
decoders.
|
||||
|
||||
2020-05-10 - dccd07f66d - lavu 56.44.100 - hwcontext_vulkan.h
|
||||
Add enabled_inst_extensions, num_enabled_inst_extensions, enabled_dev_extensions
|
||||
and num_enabled_dev_extensions fields to AVVulkanDeviceContext
|
||||
|
||||
2020-04-22 - 0e1db79e37 - lavc 58.81.100 - packet.h
|
||||
- lavu 56.43.100 - dovi_meta.h
|
||||
Add AV_PKT_DATA_DOVI_CONF and AVDOVIDecoderConfigurationRecord.
|
||||
|
||||
2020-04-15 - 22b25b3ea5 - lavc 58.79.100 - avcodec.h
|
||||
Add formal support for calling avcodec_flush_buffers() on encoders.
|
||||
Encoders that set the cap AV_CODEC_CAP_ENCODER_FLUSH will be flushed.
|
||||
For all other encoders, the call is now a no-op rather than undefined
|
||||
behaviour.
|
||||
|
||||
2020-04-10 - 672946c7fe - lavc 58.78.100 - avcodec.h codec_desc.h codec_id.h packet.h
|
||||
Move AVCodecDesc-related public API to new header codec_desc.h.
|
||||
Move AVCodecID enum to new header codec_id.h.
|
||||
Move AVPacket-related public API to new header packet.h.
|
||||
|
||||
2020-03-29 - 4cb0dda555 - lavf 58.42.100 - avformat.h
|
||||
av_read_frame() now guarantees to handle uninitialized input packets
|
||||
and to return refcounted packets on success.
|
||||
|
||||
2020-03-27 - c52ec0367d - lavc 58.77.100 - avcodec.h
|
||||
av_packet_ref() now guarantees to return the destination packet
|
||||
in a blank state on error.
|
||||
|
||||
2020-03-10 - 05d27f342b - lavc 58.75.100 - avcodec.h
|
||||
Add AV_PKT_DATA_ICC_PROFILE.
|
||||
|
||||
2020-02-21 - d005a7cdfd - lavc 58.73.101 - avcodec.h
|
||||
Add AV_CODEC_EXPORT_DATA_PRFT.
|
||||
|
||||
2020-02-21 - c666689491 - lavc 58.73.100 - avcodec.h
|
||||
Add AVCodecContext.export_side_data and AV_CODEC_EXPORT_DATA_MVS.
|
||||
|
||||
2020-02-13 - e8f054b095 - lavu 56.41.100 - tx.h
|
||||
Add AV_TX_INT32_FFT and AV_TX_INT32_MDCT
|
||||
|
||||
2020-02-12 - 3182114f88 - lavu 56.40.100 - log.h
|
||||
Add av_log_once().
|
||||
|
||||
2020-02-04 - a88449ffb2 - lavu 56.39.100 - hwcontext.h
|
||||
Add AV_PIX_FMT_VULKAN
|
||||
Add AV_HWDEVICE_TYPE_VULKAN and implementation.
|
||||
|
||||
2020-01-30 - 27529eeb27 - lavf 58.37.100 - avio.h
|
||||
Add avio_protocol_get_class().
|
||||
|
||||
2020-01-15 - 717b2074ec - lavc 58.66.100 - avcodec.h
|
||||
Add AV_PKT_DATA_PRFT and AVProducerReferenceTime.
|
||||
|
||||
2019-12-27 - 45259a0ee4 - lavu 56.38.100 - eval.h
|
||||
Add av_expr_count_func().
|
||||
|
||||
2019-12-26 - 16685114d5 - lavu 56.37.100 - buffer.h
|
||||
Add av_buffer_pool_buffer_get_opaque().
|
||||
|
||||
2019-11-17 - 1c23abc88f - lavu 56.36.100 - eval API
|
||||
Add av_expr_count_vars().
|
||||
|
||||
2019-10-14 - f3746d31f9 - lavu 56.35.101 - opt.h
|
||||
Add AV_OPT_FLAG_RUNTIME_PARAM.
|
||||
|
||||
2019-09-25 - f8406ab4b9 - lavc 58.59.100 - avcodec.h
|
||||
Add max_samples
|
||||
|
||||
2019-09-04 - 2a9d461abc - lavu 56.35.100 - hwcontext_videotoolbox.h
|
||||
Add av_map_videotoolbox_format_from_pixfmt2() for full range pixfmt
|
||||
|
||||
2019-09-01 - 8821d1f56e - lavu 56.34.100 - pixfmt.h
|
||||
Add EBU Tech. 3213-E AVColorPrimaries value
|
||||
|
||||
2019-08-17 - 95fa73a2b4 - lavf 58.31.101 - avio.h
|
||||
4K limit removed from avio_printf.
|
||||
|
||||
2019-08-17 - a82f8f2f10 - lavf 58.31.100 - avio.h
|
||||
Add avio_print_string_array and avio_print.
|
||||
|
||||
2019-07-27 - 42e2319ba9 - lavu 56.33.100 - tx.h
|
||||
Add AV_TX_DOUBLE_FFT and AV_TX_DOUBLE_MDCT
|
||||
|
||||
-------- 8< --------- FFmpeg 4.2 was cut here -------- 8< ---------
|
||||
|
||||
2019-06-21 - a30e44098a - lavu 56.30.100 - frame.h
|
||||
|
@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 4.2.1
|
||||
PROJECT_NUMBER = 4.3.1
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
@ -224,6 +224,10 @@ Insert or remove AUD NAL units in all access units of the stream.
|
||||
@item sample_aspect_ratio
|
||||
Set the sample aspect ratio of the stream in the VUI parameters.
|
||||
|
||||
@item overscan_appropriate_flag
|
||||
Set whether the stream is suitable for display using overscan
|
||||
or not (see H.264 section E.2.1).
|
||||
|
||||
@item video_format
|
||||
@item video_full_range_flag
|
||||
Set the video format in the stream (see H.264 section E.2.1 and
|
||||
@ -544,6 +548,36 @@ ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
@section null
|
||||
This bitstream filter passes the packets through unchanged.
|
||||
|
||||
@section pcm_rechunk
|
||||
|
||||
Repacketize PCM audio to a fixed number of samples per packet or a fixed packet
|
||||
rate per second. This is similar to the @ref{asetnsamples,,asetnsamples audio
|
||||
filter,ffmpeg-filters} but works on audio packets instead of audio frames.
|
||||
|
||||
@table @option
|
||||
@item nb_out_samples, n
|
||||
Set the number of samples per each output audio packet. The number is intended
|
||||
as the number of samples @emph{per each channel}. Default value is 1024.
|
||||
|
||||
@item pad, p
|
||||
If set to 1, the filter will pad the last audio packet with silence, so that it
|
||||
will contain the same number of samples (or roughly the same number of samples,
|
||||
see @option{frame_rate}) as the previous ones. Default value is 1.
|
||||
|
||||
@item frame_rate, r
|
||||
This option makes the filter output a fixed number of packets per second instead
|
||||
of a fixed number of samples per packet. If the audio sample rate is not
|
||||
divisible by the frame rate then the number of samples will not be constant but
|
||||
will vary slightly so that each packet will start as close to the frame
|
||||
boundary as possible. Using this option has precedence over @option{nb_out_samples}.
|
||||
@end table
|
||||
|
||||
You can generate the well known 1602-1601-1602-1601-1602 pattern of 48kHz audio
|
||||
for NTSC frame rate using the @option{frame_rate} option.
|
||||
@example
|
||||
ffmpeg -f lavfi -i sine=r=48000:d=1 -c pcm_s16le -bsf pcm_rechunk=r=30000/1001 -f framecrc -
|
||||
@end example
|
||||
|
||||
@section prores_metadata
|
||||
|
||||
Modify color property metadata embedded in prores stream.
|
||||
@ -585,6 +619,10 @@ Keep the same transfer characteristics property (default).
|
||||
@item unknown
|
||||
@item bt709
|
||||
BT 601, BT 709, BT 2020
|
||||
@item smpte2084
|
||||
SMPTE ST 2084
|
||||
@item arib-std-b67
|
||||
ARIB STD-B67
|
||||
@end table
|
||||
|
||||
|
||||
@ -594,7 +632,7 @@ Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same transfer characteristics property (default).
|
||||
Keep the same colorspace property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
@ -610,6 +648,11 @@ Set Rec709 colorspace for each frame of the file
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt709:color_trc=bt709:colorspace=bt709 output.mov
|
||||
@end example
|
||||
|
||||
Set Hybrid Log-Gamma parameters for each frame of the file
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt2020:color_trc=arib-std-b67:colorspace=bt2020nc output.mov
|
||||
@end example
|
||||
|
||||
@section remove_extra
|
||||
|
||||
Remove extradata from packets.
|
||||
@ -659,7 +702,9 @@ Modify metadata embedded in a VP9 stream.
|
||||
|
||||
@table @option
|
||||
@item color_space
|
||||
Set the color space value in the frame header.
|
||||
Set the color space value in the frame header. Note that any frame
|
||||
set to RGB will be implicitly set to PC range and that RGB is
|
||||
incompatible with profiles 0 and 2.
|
||||
@table @samp
|
||||
@item unknown
|
||||
@item bt601
|
||||
@ -671,8 +716,8 @@ Set the color space value in the frame header.
|
||||
@end table
|
||||
|
||||
@item color_range
|
||||
Set the color range value in the frame header. Note that this cannot
|
||||
be set in RGB streams.
|
||||
Set the color range value in the frame header. Note that any value
|
||||
imposed by the color space will take precedence over this value.
|
||||
@table @samp
|
||||
@item tv
|
||||
@item pc
|
||||
|
@ -48,6 +48,8 @@ config
|
||||
tools/target_dec_<decoder>_fuzzer
|
||||
Build fuzzer to fuzz the specified decoder.
|
||||
|
||||
tools/target_bsf_<filter>_fuzzer
|
||||
Build fuzzer to fuzz the specified bitstream filter.
|
||||
|
||||
Useful standard make commands:
|
||||
make -t <target>
|
||||
|
@ -55,6 +55,7 @@ Do not draw edges.
|
||||
@item psnr
|
||||
Set error[?] variables during encoding.
|
||||
@item truncated
|
||||
Input bitstream might be randomly truncated.
|
||||
@item drop_changed
|
||||
Don't output frames whose parameters differ from first decoded frame in stream.
|
||||
Error AVERROR_INPUT_CHANGED is returned when a frame is dropped.
|
||||
@ -79,6 +80,8 @@ Deprecated, use mpegvideo private options instead.
|
||||
Apply interlaced motion estimation.
|
||||
@item cgop
|
||||
Use closed gop.
|
||||
@item output_corrupt
|
||||
Output even potentially corrupted frames.
|
||||
@end table
|
||||
|
||||
@item me_method @var{integer} (@emph{encoding,video})
|
||||
@ -643,6 +646,24 @@ noise preserving sum of squared differences
|
||||
|
||||
@item dia_size @var{integer} (@emph{encoding,video})
|
||||
Set diamond type & size for motion estimation.
|
||||
@table @samp
|
||||
@item (1024, INT_MAX)
|
||||
full motion estimation(slowest)
|
||||
@item (768, 1024]
|
||||
umh motion estimation
|
||||
@item (512, 768]
|
||||
hex motion estimation
|
||||
@item (256, 512]
|
||||
l2s diamond motion estimation
|
||||
@item [2,256]
|
||||
var diamond motion estimation
|
||||
@item (-1, 2)
|
||||
small diamond motion estimation
|
||||
@item -1
|
||||
funny diamond motion estimation
|
||||
@item (INT_MIN, -1)
|
||||
sab diamond motion estimation
|
||||
@end table
|
||||
|
||||
@item last_pred @var{integer} (@emph{encoding,video})
|
||||
Set amount of motion predictors from the previous frame.
|
||||
@ -760,14 +781,12 @@ Set noise reduction.
|
||||
Set number of bits which should be loaded into the rc buffer before
|
||||
decoding starts.
|
||||
|
||||
@item flags2 @var{flags} (@emph{decoding/encoding,audio,video})
|
||||
@item flags2 @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item fast
|
||||
Allow non spec compliant speedup tricks.
|
||||
@item sgop
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item noout
|
||||
Skip bitstream encoding.
|
||||
@item ignorecrop
|
||||
@ -781,6 +800,22 @@ Show all frames before the first keyframe.
|
||||
@item export_mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@item skip_manual
|
||||
Do not skip samples and export skip information as frame side data.
|
||||
@item ass_ro_flush_noop
|
||||
Do not reset ASS ReadOrder field on flush.
|
||||
@end table
|
||||
|
||||
@item export_side_data @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@item prft
|
||||
Export encoder Producer Reference Time into packet side-data (see @code{AV_PKT_DATA_PRFT})
|
||||
for codecs that support it.
|
||||
@end table
|
||||
|
||||
@item error @var{integer} (@emph{encoding,video})
|
||||
@ -820,49 +855,8 @@ Set number of macroblock rows at the bottom which are skipped.
|
||||
|
||||
@item profile @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@item aac_main
|
||||
|
||||
@item aac_low
|
||||
|
||||
@item aac_ssr
|
||||
|
||||
@item aac_ltp
|
||||
|
||||
@item aac_he
|
||||
|
||||
@item aac_he_v2
|
||||
|
||||
@item aac_ld
|
||||
|
||||
@item aac_eld
|
||||
|
||||
@item mpeg2_aac_low
|
||||
|
||||
@item mpeg2_aac_he
|
||||
|
||||
@item mpeg4_sp
|
||||
|
||||
@item mpeg4_core
|
||||
|
||||
@item mpeg4_main
|
||||
|
||||
@item mpeg4_asp
|
||||
|
||||
@item dts
|
||||
|
||||
@item dts_es
|
||||
|
||||
@item dts_96_24
|
||||
|
||||
@item dts_hd_hra
|
||||
|
||||
@item dts_hd_ma
|
||||
|
||||
@end table
|
||||
Set encoder codec profile. Default value is @samp{unknown}. Encoder specific
|
||||
profiles are documented in the relevant encoder documentation.
|
||||
|
||||
@item level @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
|
@ -57,7 +57,7 @@ You need to explicitly configure the build with @code{--enable-libdav1d}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libdav1d wrapper.
|
||||
The following options are supported by the libdav1d wrapper.
|
||||
|
||||
@table @option
|
||||
|
||||
@ -68,8 +68,15 @@ Set amount of frame threads to use during decoding. The default value is 0 (auto
|
||||
Set amount of tile threads to use during decoding. The default value is 0 (autodetect).
|
||||
|
||||
@item filmgrain
|
||||
Apply film grain to the decoded video if present in the bitstream. The default value
|
||||
is true.
|
||||
Apply film grain to the decoded video if present in the bitstream. Defaults to the
|
||||
internal default of the library.
|
||||
|
||||
@item oppoint
|
||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
|
||||
internal default of the library.
|
||||
|
||||
@item alllayers
|
||||
Output all spatial layers of a scalable AV1 bitstream. The default value is false.
|
||||
|
||||
@end table
|
||||
|
||||
@ -280,7 +287,7 @@ palette is stored in the IFO file, and therefore not available when reading
|
||||
from dumped VOB files.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
numbers (without 0x prefix) separated by commas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
|
||||
@ -309,6 +316,11 @@ List of teletext page numbers to decode. Pages that do not match the specified
|
||||
list are dropped. You may use the special @code{*} string to match all pages,
|
||||
or @code{subtitle} to match all subtitle pages.
|
||||
Default value is *.
|
||||
@item txt_default_region
|
||||
Set default character set used for decoding, a value between 0 and 87 (see
|
||||
ETS 300 706, Section 15, Table 32). Default value is -1, which does not
|
||||
override the libzvbi default. This option is needed for some legacy level 1.0
|
||||
transmissions which cannot signal the proper charset.
|
||||
@item txt_chop_top
|
||||
Discards the top teletext line. Default value is 1.
|
||||
@item txt_format
|
||||
|
@ -331,6 +331,10 @@ segment index to start live streams at (negative values are from the end).
|
||||
Maximum number of times a insufficient list is attempted to be reloaded.
|
||||
Default value is 1000.
|
||||
|
||||
@item m3u8_hold_counters
|
||||
The maximum number of times to load m3u8 when it refreshes without new segments.
|
||||
Default value is 1000.
|
||||
|
||||
@item http_persistent
|
||||
Use persistent HTTP connections. Applicable only for HTTP streams.
|
||||
Enabled by default.
|
||||
@ -338,6 +342,10 @@ Enabled by default.
|
||||
@item http_multiple
|
||||
Use multiple HTTP connections for downloading HTTP segments.
|
||||
Enabled by default for HTTP/1.1 servers.
|
||||
|
||||
@item http_seekable
|
||||
Use HTTP partial requests for downloading HTTP segments.
|
||||
0 = disable, 1 = enable, -1 = auto, Default is auto.
|
||||
@end table
|
||||
|
||||
@section image2
|
||||
@ -448,6 +456,17 @@ nanosecond precision.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
@item export_path_metadata
|
||||
If set to 1, will add two extra fields to the metadata found in input, making them
|
||||
also available for other filters (see @var{drawtext} filter for examples). Default
|
||||
value is 0. The extra fields are described below:
|
||||
@table @option
|
||||
@item lavf.image2dec.source_path
|
||||
Corresponds to the full path to the input file being read.
|
||||
@item lavf.image2dec.source_basename
|
||||
Corresponds to the name of the file being read.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@ -585,9 +604,13 @@ Set the sample rate for libopenmpt to output.
|
||||
Range is from 1000 to INT_MAX. The value default is 48000.
|
||||
@end table
|
||||
|
||||
@section mov/mp4/3gp/QuickTime
|
||||
@section mov/mp4/3gp
|
||||
|
||||
QuickTime / MP4 demuxer.
|
||||
Demuxer for Quicktime File Format & ISO/IEC Base Media File Format (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part 12).
|
||||
|
||||
Registered extensions: mov, mp4, m4a, 3gp, 3g2, mj2, psp, m4b, ism, ismv, isma, f4v
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@ -598,10 +621,73 @@ Enabling this can theoretically leak information in some use cases.
|
||||
@item use_absolute_path
|
||||
Allows loading of external tracks via absolute paths, disabled by default.
|
||||
Enabling this poses a security risk. It should only be enabled if the source
|
||||
is known to be non malicious.
|
||||
is known to be non-malicious.
|
||||
|
||||
@item seek_streams_individually
|
||||
When seeking, identify the closest point in each stream individually and demux packets in
|
||||
that stream from identified point. This can lead to a different sequence of packets compared
|
||||
to demuxing linearly from the beginning. Default is true.
|
||||
|
||||
@item ignore_editlist
|
||||
Ignore any edit list atoms. The demuxer, by default, modifies the stream index to reflect the
|
||||
timeline described by the edit list. Default is false.
|
||||
|
||||
@item advanced_editlist
|
||||
Modify the stream index to reflect the timeline described by the edit list. @code{ignore_editlist}
|
||||
must be set to false for this option to be effective.
|
||||
If both @code{ignore_editlist} and this option are set to false, then only the
|
||||
start of the stream index is modified to reflect initial dwell time or starting timestamp
|
||||
described by the edit list. Default is true.
|
||||
|
||||
@item ignore_chapters
|
||||
Don't parse chapters. This includes GoPro 'HiLight' tags/moments. Note that chapters are
|
||||
only parsed when input is seekable. Default is false.
|
||||
|
||||
@item use_mfra_for
|
||||
For seekable fragmented input, set fragment's starting timestamp from media fragment random access box, if present.
|
||||
|
||||
Following options are available:
|
||||
@table @samp
|
||||
@item auto
|
||||
Auto-detect whether to set mfra timestamps as PTS or DTS @emph{(default)}
|
||||
|
||||
@item dts
|
||||
Set mfra timestamps as DTS
|
||||
|
||||
@item pts
|
||||
Set mfra timestamps as PTS
|
||||
|
||||
@item 0
|
||||
Don't use mfra box to set timestamps
|
||||
@end table
|
||||
|
||||
@item export_all
|
||||
Export unrecognized boxes within the @var{udta} box as metadata entries. The first four
|
||||
characters of the box type are set as the key. Default is false.
|
||||
|
||||
@item export_xmp
|
||||
Export entire contents of @var{XMP_} box and @var{uuid} box as a string with key @code{xmp}. Note that
|
||||
if @code{export_all} is set and this option isn't, the contents of @var{XMP_} box are still exported
|
||||
but with key @code{XMP_}. Default is false.
|
||||
|
||||
@item activation_bytes
|
||||
4-byte key required to decrypt Audible AAX and AAX+ files. See Audible AAX subsection below.
|
||||
|
||||
@item audible_fixed_key
|
||||
Fixed key used for handling Audible AAX/AAX+ files. It has been pre-set so should not be necessary to
|
||||
specify.
|
||||
|
||||
@item decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
@end table
|
||||
|
||||
@subsection Audible AAX
|
||||
|
||||
Audible AAX files are encrypted M4B files, and they can be decrypted by specifying a 4 byte activation secret.
|
||||
@example
|
||||
ffmpeg -activation_bytes 1CEB00DA -i test.aax -vn -c:a copy output.mp4
|
||||
@end example
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
|
@ -131,6 +131,9 @@ compound literals (@samp{x = (struct s) @{ 17, 23 @};}).
|
||||
@item
|
||||
for loops with variable definition (@samp{for (int i = 0; i < 8; i++)});
|
||||
|
||||
@item
|
||||
Variadic macros (@samp{#define ARRAY(nb, ...) (int[nb + 1])@{ nb, __VA_ARGS__ @}});
|
||||
|
||||
@item
|
||||
Implementation defined behavior for signed integers is assumed to match the
|
||||
expected behavior for two's complement. Non representable values in integer
|
||||
@ -622,7 +625,7 @@ If the patch fixes a bug, did you provide a verbose analysis of the bug?
|
||||
If the patch fixes a bug, did you provide enough information, including
|
||||
a sample, so the bug can be reproduced and the fix can be verified?
|
||||
Note please do not attach samples >100k to mails but rather provide a
|
||||
URL, you can upload to ftp://upload.ffmpeg.org.
|
||||
URL, you can upload to @url{https://streams.videolan.org/upload/}.
|
||||
|
||||
@item
|
||||
Did you provide a verbose summary about what the patch does change?
|
||||
|
1
doc/doxy/.gitignore
vendored
Normal file
1
doc/doxy/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/html/
|
@ -30,11 +30,7 @@ follows.
|
||||
|
||||
Advanced Audio Coding (AAC) encoder.
|
||||
|
||||
This encoder is the default AAC encoder, natively implemented into FFmpeg. Its
|
||||
quality is on par or better than libfdk_aac at the default bitrate of 128kbps.
|
||||
This encoder also implements more options, profiles and samplerates than
|
||||
other encoders (with only the AAC-HE profile pending to be implemented) so this
|
||||
encoder has become the default and is the recommended choice.
|
||||
This encoder is the default AAC encoder, natively implemented into FFmpeg.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@ -651,10 +647,7 @@ configuration. You need to explicitly configure the build with
|
||||
so if you allow the use of GPL, you should configure with
|
||||
@code{--enable-gpl --enable-nonfree --enable-libfdk-aac}.
|
||||
|
||||
This encoder is considered to produce output on par or worse at 128kbps to the
|
||||
@ref{aacenc,,the native FFmpeg AAC encoder} but can often produce better
|
||||
sounding audio at identical or lower bitrates and has support for the
|
||||
AAC-HE profiles.
|
||||
This encoder has support for the AAC-HE profiles.
|
||||
|
||||
VBR encoding, enabled through the @option{vbr} or @option{flags
|
||||
+qscale} options, is experimental and only works with some
|
||||
@ -1378,6 +1371,49 @@ makes it possible to store non-rgb pix_fmts.
|
||||
|
||||
@end table
|
||||
|
||||
@section librav1e
|
||||
|
||||
rav1e AV1 encoder wrapper.
|
||||
|
||||
Requires the presence of the rav1e headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-librav1e}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item qmax
|
||||
Sets the maximum quantizer to use when using bitrate mode.
|
||||
|
||||
@item qmin
|
||||
Sets the minimum quantizer to use when using bitrate mode.
|
||||
|
||||
@item qp
|
||||
Uses quantizer mode to encode at the given quantizer (0-255).
|
||||
|
||||
@item speed
|
||||
Selects the speed preset (0-10) to encode with.
|
||||
|
||||
@item tiles
|
||||
Selects how many tiles to encode with.
|
||||
|
||||
@item tile-rows
|
||||
Selects how many rows of tiles to encode with.
|
||||
|
||||
@item tile-columns
|
||||
Selects how many columns of tiles to encode with.
|
||||
|
||||
@item rav1e-params
|
||||
Set rav1e options using a list of @var{key}=@var{value} pairs separated
|
||||
by ":". See @command{rav1e --help} for a list of options.
|
||||
|
||||
For example to specify librav1e encoding options with @option{-rav1e-params}:
|
||||
|
||||
@example
|
||||
ffmpeg -i input -c:v librav1e -b:v 500K -rav1e-params speed=5:low_latency=true output.mp4
|
||||
@end example
|
||||
|
||||
@end table
|
||||
|
||||
@section libaom-av1
|
||||
|
||||
libaom AV1 encoder wrapper.
|
||||
@ -1465,6 +1501,15 @@ Complexity-based.
|
||||
Cyclic refresh.
|
||||
@end table
|
||||
|
||||
@item tune (@emph{tune})
|
||||
Set the distortion metric the encoder is tuned with. Default is @code{psnr}.
|
||||
|
||||
@table @samp
|
||||
@item psnr (@emph{0})
|
||||
|
||||
@item ssim (@emph{1})
|
||||
@end table
|
||||
|
||||
@item lag-in-frames
|
||||
Set the maximum number of frames which the encoder may keep in flight
|
||||
at any one time for lookahead purposes. Defaults to the internal
|
||||
@ -1544,6 +1589,9 @@ Enable row based multi-threading. Disabled by default.
|
||||
Enable Constrained Directional Enhancement Filter. The libaom-av1
|
||||
encoder enables CDEF by default.
|
||||
|
||||
@item enable-restoration (@emph{boolean})
|
||||
Enable Loop Restoration Filter. Default is true for libaom-av1.
|
||||
|
||||
@item enable-global-motion (@emph{boolean})
|
||||
Enable the use of global motion for block prediction. Default is true.
|
||||
|
||||
@ -1842,16 +1890,14 @@ Enable error resiliency features.
|
||||
Increase sharpness at the expense of lower PSNR.
|
||||
The valid range is [0, 7].
|
||||
|
||||
@item VP8-specific options
|
||||
@table @option
|
||||
@item ts-parameters
|
||||
Sets the temporal scalability configuration using a :-separated list of
|
||||
key=value pairs. For example, to specify temporal scalability parameters
|
||||
with @code{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v libvpx -ts-parameters ts_number_layers=3:\
|
||||
ts_target_bitrate=250000,500000,1000000:ts_rate_decimator=4,2,1:\
|
||||
ts_periodicity=4:ts_layer_id=0,2,1,2 OUTPUT
|
||||
ts_target_bitrate=250,500,1000:ts_rate_decimator=4,2,1:\
|
||||
ts_periodicity=4:ts_layer_id=0,2,1,2:ts_layering_mode=3 OUTPUT
|
||||
@end example
|
||||
Below is a brief explanation of each of the parameters, please
|
||||
refer to @code{struct vpx_codec_enc_cfg} in @code{vpx/vpx_encoder.h} for more
|
||||
@ -1860,13 +1906,38 @@ details.
|
||||
@item ts_number_layers
|
||||
Number of temporal coding layers.
|
||||
@item ts_target_bitrate
|
||||
Target bitrate for each temporal layer.
|
||||
Target bitrate for each temporal layer (in kbps).
|
||||
(bitrate should be inclusive of the lower temporal layer).
|
||||
@item ts_rate_decimator
|
||||
Frame rate decimation factor for each temporal layer.
|
||||
@item ts_periodicity
|
||||
Length of the sequence defining frame temporal layer membership.
|
||||
@item ts_layer_id
|
||||
Template defining the membership of frames to temporal layers.
|
||||
@item ts_layering_mode
|
||||
(optional) Selecting the temporal structure from a set of pre-defined temporal layering modes.
|
||||
Currently supports the following options.
|
||||
@table @option
|
||||
@item 0
|
||||
No temporal layering flags are provided internally,
|
||||
relies on flags being passed in using @code{metadata} field in @code{AVFrame}
|
||||
with following keys.
|
||||
@table @option
|
||||
@item vp8-flags
|
||||
Sets the flags passed into the encoder to indicate the referencing scheme for
|
||||
the current frame.
|
||||
Refer to function @code{vpx_codec_encode} in @code{vpx/vpx_encoder.h} for more
|
||||
details.
|
||||
@item temporal_id
|
||||
Explicitly sets the temporal id of the current frame to encode.
|
||||
@end table
|
||||
@item 2
|
||||
Two temporal layers. 0-1...
|
||||
@item 3
|
||||
Three temporal layers. 0-2-1-2...; with single reference frame.
|
||||
@item 4
|
||||
Same as option "3", except there is a dependency between
|
||||
the two temporal layer 2 frames within the temporal period.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@ -2371,6 +2442,20 @@ during configuration. You need to explicitly configure the build with
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
Sets target video bitrate.
|
||||
|
||||
@item bf
|
||||
|
||||
@item g
|
||||
Set the GOP size.
|
||||
|
||||
@item keyint_min
|
||||
Minimum GOP size.
|
||||
|
||||
@item refs
|
||||
Number of reference frames each P-frame can use. The range is from @var{1-16}.
|
||||
|
||||
@item preset
|
||||
Set the x265 preset.
|
||||
|
||||
@ -2383,6 +2468,28 @@ Set profile restrictions.
|
||||
@item crf
|
||||
Set the quality for constant quality mode.
|
||||
|
||||
@item qp
|
||||
Set constant quantization rate control method parameter.
|
||||
|
||||
@item qmin
|
||||
Minimum quantizer scale.
|
||||
|
||||
@item qmax
|
||||
Maximum quantizer scale.
|
||||
|
||||
@item qdiff
|
||||
Maximum difference between quantizer scales.
|
||||
|
||||
@item qblur
|
||||
Quantizer curve blur
|
||||
|
||||
@item qcomp
|
||||
Quantizer curve compression factor
|
||||
|
||||
@item i_qfactor
|
||||
|
||||
@item b_qfactor
|
||||
|
||||
@item forced-idr
|
||||
Normally, when forcing a I-frame type, the encoder can select any type
|
||||
of I-frame. This option forces it to choose an IDR-frame.
|
||||
@ -2618,6 +2725,14 @@ fastest.
|
||||
|
||||
@end table
|
||||
|
||||
@section MediaFoundation
|
||||
|
||||
This provides wrappers to encoders (both audio and video) in the
|
||||
MediaFoundation framework. It can access both SW and HW encoders.
|
||||
Video encoders can take input in either of nv12 or yuv420p form
|
||||
(some encoders support both, some support only either - in practice,
|
||||
nv12 is the safer choice, especially among HW encoders).
|
||||
|
||||
@section mpeg2
|
||||
|
||||
MPEG-2 video encoder.
|
||||
@ -2625,6 +2740,20 @@ MPEG-2 video encoder.
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item profile @var{integer}
|
||||
Select the mpeg2 profile to encode:
|
||||
|
||||
@table @samp
|
||||
@item 422
|
||||
@item main
|
||||
@item ss
|
||||
Spatially Scalable
|
||||
@item snr
|
||||
SNR Scalable
|
||||
@item high
|
||||
@item simple
|
||||
@end table
|
||||
|
||||
@item seq_disp_ext @var{integer}
|
||||
Specifies if the encoder should write a sequence_display_extension to the
|
||||
output.
|
||||
@ -2733,7 +2862,7 @@ recommended value) and do not set a size constraint.
|
||||
|
||||
@section QSV encoders
|
||||
|
||||
The family of Intel QuickSync Video encoders (MPEG-2, H.264 and HEVC)
|
||||
The family of Intel QuickSync Video encoders (MPEG-2, H.264, HEVC, JPEG/MJPEG and VP9)
|
||||
|
||||
The ratecontrol method is selected as follows:
|
||||
|
||||
@ -3116,6 +3245,14 @@ and they can also be used in Matroska files.
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item palette
|
||||
Specify the global palette used by the bitmaps.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by commas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
|
||||
@item even_rows_fix
|
||||
When set to 1, enable a work-around that makes the number of pixel rows
|
||||
even in all subtitles. This fixes a problem with some players that
|
||||
|
24
doc/examples/.gitignore
vendored
Normal file
24
doc/examples/.gitignore
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
/avio_list_dir
|
||||
/avio_reading
|
||||
/decode_audio
|
||||
/decode_video
|
||||
/demuxing_decoding
|
||||
/encode_audio
|
||||
/encode_video
|
||||
/extract_mvs
|
||||
/filter_audio
|
||||
/filtering_audio
|
||||
/filtering_video
|
||||
/http_multiclient
|
||||
/hw_decode
|
||||
/metadata
|
||||
/muxing
|
||||
/pc-uninstalled
|
||||
/qsvdec
|
||||
/remuxing
|
||||
/resampling_audio
|
||||
/scaling_video
|
||||
/transcode_aac
|
||||
/transcoding
|
||||
/vaapi_encode
|
||||
/vaapi_transcode
|
@ -1,4 +1,4 @@
|
||||
EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
|
||||
EXAMPLES-$(CONFIG_AVIO_LIST_DIR_EXAMPLE) += avio_list_dir
|
||||
EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
EXAMPLES-$(CONFIG_DECODE_AUDIO_EXAMPLE) += decode_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_VIDEO_EXAMPLE) += decode_video
|
||||
|
@ -11,7 +11,7 @@ CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_dir_cmd \
|
||||
EXAMPLES= avio_list_dir \
|
||||
avio_reading \
|
||||
decode_audio \
|
||||
decode_video \
|
||||
|
@ -102,38 +102,15 @@ static int list_op(const char *input_dir)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int del_op(const char *url)
|
||||
{
|
||||
int ret = avpriv_io_delete(url);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int move_op(const char *src, const char *dst)
|
||||
{
|
||||
int ret = avpriv_io_move(src, dst);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void usage(const char *program_name)
|
||||
{
|
||||
fprintf(stderr, "usage: %s OPERATION entry1 [entry2]\n"
|
||||
"API example program to show how to manipulate resources "
|
||||
"accessed through AVIOContext.\n"
|
||||
"OPERATIONS:\n"
|
||||
"list list content of the directory\n"
|
||||
"move rename content in directory\n"
|
||||
"del delete content in directory\n",
|
||||
program_name);
|
||||
fprintf(stderr, "usage: %s input_dir\n"
|
||||
"API example program to show how to list files in directory "
|
||||
"accessed through AVIOContext.\n", program_name);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const char *op = NULL;
|
||||
int ret;
|
||||
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
@ -145,32 +122,7 @@ int main(int argc, char *argv[])
|
||||
|
||||
avformat_network_init();
|
||||
|
||||
op = argv[1];
|
||||
if (strcmp(op, "list") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = list_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "del") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = del_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "move") == 0) {
|
||||
if (argc < 4) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = move_op(argv[2], argv[3]);
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op);
|
||||
ret = AVERROR(EINVAL);
|
||||
}
|
||||
ret = list_op(argv[1]);
|
||||
|
||||
avformat_network_deinit();
|
||||
|
@ -39,6 +39,35 @@
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
FILE *outfile)
|
||||
{
|
||||
@ -86,6 +115,9 @@ int main(int argc, char **argv)
|
||||
size_t data_size;
|
||||
AVPacket *pkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
enum AVSampleFormat sfmt;
|
||||
int n_channels = 0;
|
||||
const char *fmt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
@ -172,6 +204,26 @@ int main(int argc, char **argv)
|
||||
pkt->size = 0;
|
||||
decode(c, pkt, decoded_frame, outfile);
|
||||
|
||||
/* print output pcm infomations, because there have no metadata of pcm */
|
||||
sfmt = c->sample_fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
}
|
||||
|
||||
n_channels = c->channels;
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, c->sample_rate,
|
||||
outfilename);
|
||||
end:
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
|
@ -95,7 +95,8 @@ int main(int argc, char **argv)
|
||||
AVPacket *pkt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n"
|
||||
"And check your input file is encoded by mpeg1video please.\n", argv[0]);
|
||||
exit(0);
|
||||
}
|
||||
filename = argv[1];
|
||||
|
@ -55,95 +55,93 @@ static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
/* Enable or disable frame reference counting. You are not supposed to support
|
||||
* both paths in your application but pick the one most appropriate to your
|
||||
* needs. Look for the use of refcount in this example to see what are the
|
||||
* differences of API usage between them. */
|
||||
static int refcount = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
static int output_video_frame(AVFrame *frame)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* If we use frame reference counting, we own the data and need
|
||||
* to de-reference it when we don't use it anymore */
|
||||
if (*got_frame && refcount)
|
||||
av_frame_unref(frame);
|
||||
printf("video_frame n:%d coded_n:%d\n",
|
||||
video_frame_count++, frame->coded_picture_number);
|
||||
|
||||
return decoded;
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int output_audio_frame(AVFrame *frame)
|
||||
{
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame n:%d nb_samples:%d pts:%s\n",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *dec, const AVPacket *pkt)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
// submit the packet to the decoder
|
||||
ret = avcodec_send_packet(dec, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// get all the available frames from the decoder
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec, frame);
|
||||
if (ret < 0) {
|
||||
// those two return values are special and mean there is no output
|
||||
// frame available, but there were no errors during decoding
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
return 0;
|
||||
|
||||
fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// write the frame data to output file
|
||||
if (dec->codec->type == AVMEDIA_TYPE_VIDEO)
|
||||
ret = output_video_frame(frame);
|
||||
else
|
||||
ret = output_audio_frame(frame);
|
||||
|
||||
av_frame_unref(frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
@ -186,8 +184,7 @@ static int open_codec_context(int *stream_idx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Init the decoders, with or without reference counting */
|
||||
av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
|
||||
/* Init the decoders */
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
@ -230,24 +227,17 @@ static int get_format_from_sample_fmt(const char **fmt,
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
int ret = 0;
|
||||
|
||||
if (argc != 4 && argc != 5) {
|
||||
fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n\n"
|
||||
"If the -refcount option is specified, the program use the\n"
|
||||
"reference counting frame system which allows keeping a copy of\n"
|
||||
"the data for longer than one decode call.\n"
|
||||
"\n", argv[0]);
|
||||
"audio frames to a rawaudio file named audio_output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (argc == 5 && !strcmp(argv[1], "-refcount")) {
|
||||
refcount = 1;
|
||||
argv++;
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
@ -325,23 +315,22 @@ int main (int argc, char **argv)
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_packet_unref(&orig_pkt);
|
||||
// check if the packet belongs to a stream we are interested in, otherwise
|
||||
// skip it
|
||||
if (pkt.stream_index == video_stream_idx)
|
||||
ret = decode_packet(video_dec_ctx, &pkt);
|
||||
else if (pkt.stream_index == audio_stream_idx)
|
||||
ret = decode_packet(audio_dec_ctx, &pkt);
|
||||
av_packet_unref(&pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
/* flush the decoders */
|
||||
if (video_dec_ctx)
|
||||
decode_packet(video_dec_ctx, NULL);
|
||||
if (audio_dec_ctx)
|
||||
decode_packet(audio_dec_ctx, NULL);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
|
@ -145,7 +145,7 @@ int main(int argc, char **argv)
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
ret = av_frame_get_buffer(frame, 32);
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate the video frame data\n");
|
||||
exit(1);
|
||||
@ -186,7 +186,8 @@ int main(int argc, char **argv)
|
||||
encode(c, NULL, pkt, f);
|
||||
|
||||
/* add sequence end code to have a real MPEG file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_free_context(&c);
|
||||
|
@ -78,15 +78,45 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
||||
AVStream *st, AVFrame *frame)
|
||||
{
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
int ret;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
return av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
// send the frame to the encoder
|
||||
ret = avcodec_send_frame(c, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
ret = avcodec_receive_packet(c, &pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(&pkt, c->time_base, st->time_base);
|
||||
pkt.stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, &pkt);
|
||||
ret = av_interleaved_write_frame(fmt_ctx, &pkt);
|
||||
av_packet_unref(&pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return ret == AVERROR_EOF ? 1 : 0;
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
@ -285,7 +315,7 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
@ -309,13 +339,10 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
@ -349,22 +376,7 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
return write_frame(oc, c, ost->st, frame);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@ -384,7 +396,7 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
ret = av_frame_get_buffer(picture, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
@ -464,7 +476,7 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, c->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
return NULL;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
@ -506,37 +518,8 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
AVPacket pkt = { 0 };
|
||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost));
|
||||
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
|
@ -172,7 +172,7 @@ int main(int argc, char *argv[])
|
||||
sw_frame->width = width;
|
||||
sw_frame->height = height;
|
||||
sw_frame->format = AV_PIX_FMT_NV12;
|
||||
if ((err = av_frame_get_buffer(sw_frame, 32)) < 0)
|
||||
if ((err = av_frame_get_buffer(sw_frame, 0)) < 0)
|
||||
goto close;
|
||||
if ((err = fread((uint8_t*)(sw_frame->data[0]), size, 1, fin)) <= 0)
|
||||
break;
|
||||
|
@ -149,6 +149,8 @@ the synchronisation of the samples directory.
|
||||
|
||||
@chapter Uploading new samples to the fate suite
|
||||
|
||||
If you need a sample uploaded send a mail to samples-request.
|
||||
|
||||
This is for developers who have an account on the fate suite server.
|
||||
If you upload new samples, please make sure they are as small as possible,
|
||||
space on each client, network bandwidth and so on benefit from smaller test cases.
|
||||
@ -157,6 +159,8 @@ practice generally do not replace, remove or overwrite files as it likely would
|
||||
break older checkouts or releases.
|
||||
Also all needed samples for a commit should be uploaded, ideally 24
|
||||
hours, before the push.
|
||||
If you need an account for frequently uploading samples or you wish to help
|
||||
others by doing that send a mail to ffmpeg-devel.
|
||||
|
||||
@example
|
||||
#First update your local samples copy:
|
||||
|
@ -879,12 +879,19 @@ Deprecated see -bsf
|
||||
|
||||
@item -force_key_frames[:@var{stream_specifier}] @var{time}[,@var{time}...] (@emph{output,per-stream})
|
||||
@item -force_key_frames[:@var{stream_specifier}] expr:@var{expr} (@emph{output,per-stream})
|
||||
Force key frames at the specified timestamps, more precisely at the first
|
||||
frames after each specified time.
|
||||
@item -force_key_frames[:@var{stream_specifier}] source (@emph{output,per-stream})
|
||||
|
||||
If the argument is prefixed with @code{expr:}, the string @var{expr}
|
||||
is interpreted like an expression and is evaluated for each frame. A
|
||||
key frame is forced in case the evaluation is non-zero.
|
||||
@var{force_key_frames} can take arguments of the following form:
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{time}[,@var{time}...]
|
||||
If the argument consists of timestamps, ffmpeg will round the specified times to the nearest
|
||||
output timestamp as per the encoder time base and force a keyframe at the first frame having
|
||||
timestamp equal or greater than the computed timestamp. Note that if the encoder time base is too
|
||||
coarse, then the keyframes may be forced on frames with timestamps lower than the specified time.
|
||||
The default encoder time base is the inverse of the output framerate but may be set otherwise
|
||||
via @code{-enc_time_base}.
|
||||
|
||||
If one of the times is "@code{chapters}[@var{delta}]", it is expanded into
|
||||
the time of the beginning of all chapters in the file, shifted by
|
||||
@ -898,6 +905,11 @@ before the beginning of every chapter:
|
||||
-force_key_frames 0:05:00,chapters-0.1
|
||||
@end example
|
||||
|
||||
@item expr:@var{expr}
|
||||
If the argument is prefixed with @code{expr:}, the string @var{expr}
|
||||
is interpreted like an expression and is evaluated for each frame. A
|
||||
key frame is forced in case the evaluation is non-zero.
|
||||
|
||||
The expression in @var{expr} can contain the following constants:
|
||||
@table @option
|
||||
@item n
|
||||
@ -925,6 +937,12 @@ starting from second 13:
|
||||
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
|
||||
@end example
|
||||
|
||||
@item source
|
||||
If the argument is @code{source}, ffmpeg will force a key frame if
|
||||
the current frame being encoded is marked as a key frame in its source.
|
||||
|
||||
@end table
|
||||
|
||||
Note that forcing too many keyframes is very harmful for the lookahead
|
||||
algorithms of certain encoders: using fixed-GOP options or similar
|
||||
would be more efficient.
|
||||
@ -1011,6 +1029,35 @@ Choose the GPU device on the second platform supporting the @emph{cl_khr_fp16}
|
||||
extension.
|
||||
@end table
|
||||
|
||||
@item vulkan
|
||||
If @var{device} is an integer, it selects the device by its index in a
|
||||
system-dependent list of devices. If @var{device} is any other string, it
|
||||
selects the first device with a name containing that string as a substring.
|
||||
|
||||
The following options are recognized:
|
||||
@table @option
|
||||
@item debug
|
||||
If set to 1, enables the validation layer, if installed.
|
||||
@item linear_images
|
||||
If set to 1, images allocated by the hwcontext will be linear and locally mappable.
|
||||
@item instance_extensions
|
||||
A plus separated list of additional instance extensions to enable.
|
||||
@item device_extensions
|
||||
A plus separated list of additional device extensions to enable.
|
||||
@end table
|
||||
|
||||
Examples:
|
||||
@table @emph
|
||||
@item -init_hw_device vulkan:1
|
||||
Choose the second device on the system.
|
||||
|
||||
@item -init_hw_device vulkan:RADV
|
||||
Choose the first device with a name containing the string @emph{RADV}.
|
||||
|
||||
@item -init_hw_device vulkan:0,instance_extensions=VK_KHR_wayland_surface+VK_KHR_xcb_surface
|
||||
Choose the first device and enable the Wayland and XCB instance extensions.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@item -init_hw_device @var{type}[=@var{name}]@@@var{source}
|
||||
@ -1383,7 +1430,7 @@ it will usually display as 0 if not supported.
|
||||
Show benchmarking information during the encode.
|
||||
Shows real, system and user time used in various steps (audio/video encode/decode).
|
||||
@item -timelimit @var{duration} (@emph{global})
|
||||
Exit after ffmpeg has been running for @var{duration} seconds.
|
||||
Exit after ffmpeg has been running for @var{duration} seconds in CPU user time.
|
||||
@item -dump (@emph{global})
|
||||
Dump each input packet to stderr.
|
||||
@item -hex (@emph{global})
|
||||
@ -1515,6 +1562,10 @@ Enable bitexact mode for (de)muxer and (de/en)coder
|
||||
Finish encoding when the shortest input stream ends.
|
||||
@item -dts_delta_threshold
|
||||
Timestamp discontinuity delta threshold.
|
||||
@item -dts_error_threshold @var{seconds}
|
||||
Timestamp error delta threshold. This threshold use to discard crazy/damaged
|
||||
timestamps and the default is 30 hours which is arbitrarily picked and quite
|
||||
conservative.
|
||||
@item -muxdelay @var{seconds} (@emph{output})
|
||||
Set the maximum demux-decode delay.
|
||||
@item -muxpreload @var{seconds} (@emph{output})
|
||||
@ -1670,6 +1721,8 @@ Stop and abort on various conditions. The following flags are available:
|
||||
@table @option
|
||||
@item empty_output
|
||||
No packets were passed to the muxer, the output is empty.
|
||||
@item empty_output_stream
|
||||
No packets were passed to the muxer in some of the output streams.
|
||||
@end table
|
||||
|
||||
@item -xerror (@emph{global})
|
||||
|
@ -133,8 +133,9 @@ This option has been deprecated in favor of private options, try -pixel_format.
|
||||
@item -stats
|
||||
Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
the audio/video synchronisation drift. It is shown by default, unless the
|
||||
log level is lower than @code{info}. Its display can be forced by manually
|
||||
specifying this option. To disable it, you need to specify @code{-nostats}.
|
||||
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
|
@ -226,6 +226,7 @@
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="closed_captions" type="xsd:boolean"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
|
@ -236,13 +236,11 @@ ffmpeg [...] -loglevel +repeat
|
||||
By default the program logs to stderr. If coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
@env{AV_LOG_FORCE_NOCOLOR}, or can be forced setting
|
||||
the environment variable @env{AV_LOG_FORCE_COLOR}.
|
||||
The use of the environment variable @env{NO_COLOR} is deprecated and
|
||||
will be dropped in a future FFmpeg version.
|
||||
|
||||
@item -report
|
||||
Dump full command line and console output to a file named
|
||||
Dump full command line and log output to a file named
|
||||
@code{@var{program}-@var{YYYYMMDD}-@var{HHMMSS}.log} in the current
|
||||
directory.
|
||||
This file can be useful for bug reports.
|
||||
|
3719
doc/filters.texi
3719
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@ -27,6 +27,10 @@ stream information. A higher value will enable detecting more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@item max_probe_packets @var{integer} (@emph{input})
|
||||
Set the maximum number of buffered packets when probing a codec.
|
||||
Default is 2500 packets.
|
||||
|
||||
@item packetsize @var{integer} (@emph{output})
|
||||
Set packet size.
|
||||
|
||||
@ -139,7 +143,7 @@ Consider things that a sane encoder should not do as an error.
|
||||
|
||||
@item max_interleave_delta @var{integer} (@emph{output})
|
||||
Set maximum buffering duration for interleaving. The duration is
|
||||
expressed in microseconds, and defaults to 1000000 (1 second).
|
||||
expressed in microseconds, and defaults to 10000000 (10 seconds).
|
||||
|
||||
To ensure all the streams are interleaved correctly, libavformat will
|
||||
wait until it has at least one packet for each stream before actually
|
||||
|
@ -27,29 +27,41 @@ enable it.
|
||||
|
||||
@section AMD AMF/VCE
|
||||
|
||||
FFmpeg can use the AMD Advanced Media Framework library under Windows
|
||||
for accelerated H.264 and HEVC encoding on hardware with Video Coding Engine (VCE).
|
||||
FFmpeg can use the AMD Advanced Media Framework library
|
||||
for accelerated H.264 and HEVC(only windows) encoding on hardware with Video Coding Engine (VCE).
|
||||
|
||||
To enable support you must obtain the AMF framework header files from
|
||||
To enable support you must obtain the AMF framework header files(version 1.4.9+) from
|
||||
@url{https://github.com/GPUOpen-LibrariesAndSDKs/AMF.git}.
|
||||
|
||||
Create an @code{AMF/} directory in the system include path.
|
||||
Copy the contents of @code{AMF/amf/public/include/} into that directory.
|
||||
Then configure FFmpeg with @code{--enable-amf}.
|
||||
|
||||
Initialization of amf encoder occurs in this order:
|
||||
1) trying to initialize through dx11(only windows)
|
||||
2) trying to initialize through dx9(only windows)
|
||||
3) trying to initialize through vulkan
|
||||
|
||||
To use h.264(AMD VCE) encoder on linux amdgru-pro version 19.20+ and amf-amdgpu-pro
|
||||
package(amdgru-pro contains, but does not install automatically) are required.
|
||||
|
||||
This driver can be installed using amdgpu-pro-install script in official amd driver archive.
|
||||
|
||||
@section AviSynth
|
||||
|
||||
FFmpeg can read AviSynth scripts as input. To enable support, pass
|
||||
@code{--enable-avisynth} to configure. The correct headers are
|
||||
included in compat/avisynth/, which allows the user to enable support
|
||||
without needing to search for these headers themselves.
|
||||
@code{--enable-avisynth} to configure after installing the headers
|
||||
provided by @url{https://github.com/AviSynth/AviSynthPlus, AviSynth+}.
|
||||
AviSynth+ can be configured to install only the headers by either
|
||||
passing @code{-DHEADERS_ONLY:bool=on} to the normal CMake-based build
|
||||
system, or by using the supplied @code{GNUmakefile}.
|
||||
|
||||
For Windows, supported AviSynth variants are
|
||||
@url{http://avisynth.nl, AviSynth 2.6 RC1 or higher} for 32-bit builds and
|
||||
@url{http://avisynth.nl/index.php/AviSynth+, AviSynth+ r1718 or higher} for 32-bit and 64-bit builds.
|
||||
|
||||
For Linux and OS X, the supported AviSynth variant is
|
||||
@url{https://github.com/avxsynth/avxsynth, AvxSynth}.
|
||||
For Linux, macOS, and BSD, the only supported AviSynth variant is
|
||||
@url{https://github.com/AviSynth/AviSynthPlus, AviSynth+}, starting with version 3.5.
|
||||
|
||||
@float NOTE
|
||||
In 2016, AviSynth+ added support for building with GCC. However, due to
|
||||
@ -67,10 +79,11 @@ GCC builds of AviSynth+ without any special flags.
|
||||
@end float
|
||||
|
||||
@float NOTE
|
||||
AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
|
||||
with @code{--enable-avisynth}, and the binaries will work regardless of the
|
||||
end user having AviSynth or AvxSynth installed - they'll only need to be
|
||||
installed to use AviSynth scripts (obviously).
|
||||
AviSynth(+) is loaded dynamically. Distributors can build FFmpeg
|
||||
with @code{--enable-avisynth}, and the binaries will work regardless
|
||||
of the end user having AviSynth installed. If/when an end user
|
||||
would like to use AviSynth scripts, then they can install AviSynth(+)
|
||||
and FFmpeg will be able to find and use it to open scripts.
|
||||
@end float
|
||||
|
||||
@section Chromaprint
|
||||
@ -243,6 +256,13 @@ FFmpeg can use the OpenJPEG libraries for decoding/encoding J2K videos. Go to
|
||||
instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjpeg} to
|
||||
@file{./configure}.
|
||||
|
||||
@section rav1e
|
||||
|
||||
FFmpeg can make use of rav1e (Rust AV1 Encoder) via its C bindings to encode videos.
|
||||
Go to @url{https://github.com/xiph/rav1e/} and follow the instructions to build
|
||||
the C library. To enable using rav1e in FFmpeg, pass @code{--enable-librav1e}
|
||||
to @file{./configure}.
|
||||
|
||||
@section TwoLAME
|
||||
|
||||
FFmpeg can make use of the TwoLAME library for MP2 encoding.
|
||||
@ -399,6 +419,9 @@ library:
|
||||
@tab Contains header with version and mode info, simplifying playback.
|
||||
@item CRI ADX @tab X @tab X
|
||||
@tab Audio-only format used in console video games.
|
||||
@item CRI AIX @tab @tab X
|
||||
@item CRI HCA @tab @tab X
|
||||
@tab Audio-only format used in console video games.
|
||||
@item Discworld II BMV @tab @tab X
|
||||
@item Interplay C93 @tab @tab X
|
||||
@tab Used in the game Cyberia from Interplay.
|
||||
@ -554,7 +577,6 @@ library:
|
||||
@item raw aptX @tab X @tab X
|
||||
@item raw aptX HD @tab X @tab X
|
||||
@item raw Chinese AVS video @tab X @tab X
|
||||
@item raw CRI ADX @tab X @tab X
|
||||
@item raw Dirac @tab X @tab X
|
||||
@item raw DNxHD @tab X @tab X
|
||||
@item raw DTS @tab X @tab X
|
||||
@ -797,11 +819,13 @@ following image formats are supported:
|
||||
@item Autodesk RLE @tab @tab X
|
||||
@tab fourcc: AASC
|
||||
@item AV1 @tab E @tab E
|
||||
@tab Supported through external libraries libaom and libdav1d
|
||||
@tab Supported through external libraries libaom, libdav1d and librav1e
|
||||
@item Avid 1:1 10-bit RGB Packer @tab X @tab X
|
||||
@tab fourcc: AVrp
|
||||
@item AVS (Audio Video Standard) video @tab @tab X
|
||||
@tab Video encoding used by the Creature Shock game.
|
||||
@item AVS2-P2/IEEE1857.4 @tab E @tab E
|
||||
@tab Supported through external libraries libxavs2 and libdavs2
|
||||
@item AYUV @tab X @tab X
|
||||
@tab Microsoft uncompressed packed 4:4:4:4
|
||||
@item Beam Software VB @tab @tab X
|
||||
@ -827,6 +851,8 @@ following image formats are supported:
|
||||
@tab Codec used in Delphine Software International games.
|
||||
@item Discworld II BMV Video @tab @tab X
|
||||
@item Canopus Lossless Codec @tab @tab X
|
||||
@item CDToons @tab @tab X
|
||||
@tab Codec used in various Broderbund games.
|
||||
@item Cinepak @tab @tab X
|
||||
@item Cirrus Logic AccuPak @tab X @tab X
|
||||
@tab fourcc: CLJR
|
||||
@ -1057,8 +1083,10 @@ following image formats are supported:
|
||||
@item AAC+ @tab E @tab IX
|
||||
@tab encoding supported through external library libfdk-aac
|
||||
@item AC-3 @tab IX @tab IX
|
||||
@item ACELP.KELVIN @tab @tab X
|
||||
@item ADPCM 4X Movie @tab @tab X
|
||||
@item APDCM Yamaha AICA @tab @tab X
|
||||
@item ADPCM Argonaut Games @tab @tab X
|
||||
@item ADPCM CDROM XA @tab @tab X
|
||||
@item ADPCM Creative Technology @tab @tab X
|
||||
@tab 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2
|
||||
@ -1074,10 +1102,14 @@ following image formats are supported:
|
||||
@item ADPCM G.726 @tab X @tab X
|
||||
@item ADPCM IMA AMV @tab @tab X
|
||||
@tab Used in AMV files
|
||||
@item ADPCM IMA Cunning Developments @tab @tab X
|
||||
@item ADPCM IMA Electronic Arts EACS @tab @tab X
|
||||
@item ADPCM IMA Electronic Arts SEAD @tab @tab X
|
||||
@item ADPCM IMA Funcom @tab @tab X
|
||||
@item ADPCM IMA High Voltage Software ALP @tab @tab X
|
||||
@item ADPCM IMA QuickTime @tab X @tab X
|
||||
@item ADPCM IMA Simon & Schuster Interactive @tab X @tab X
|
||||
@item ADPCM IMA Ubisoft APM @tab @tab X
|
||||
@item ADPCM IMA Loki SDL MJPEG @tab @tab X
|
||||
@item ADPCM IMA WAV @tab X @tab X
|
||||
@item ADPCM IMA Westwood @tab @tab X
|
||||
@ -1107,6 +1139,7 @@ following image formats are supported:
|
||||
@item ADPCM Westwood Studios IMA @tab @tab X
|
||||
@tab Used in Westwood Studios games like Command and Conquer.
|
||||
@item ADPCM Yamaha @tab X @tab X
|
||||
@item ADPCM Zork @tab @tab X
|
||||
@item AMR-NB @tab E @tab X
|
||||
@tab encoding supported through external library libopencore-amrnb
|
||||
@item AMR-WB @tab E @tab X
|
||||
@ -1128,6 +1161,7 @@ following image formats are supported:
|
||||
@tab decoding supported through external library libcelt
|
||||
@item codec2 @tab E @tab E
|
||||
@tab en/decoding supported through external library libcodec2
|
||||
@item CRI HCA @tab @tab X
|
||||
@item Delphine Software International CIN audio @tab @tab X
|
||||
@tab Codec used in Delphine Software International games.
|
||||
@item Digital Speech Standard - Standard Play mode (DSS SP) @tab @tab X
|
||||
@ -1137,6 +1171,7 @@ following image formats are supported:
|
||||
@item DCA (DTS Coherent Acoustics) @tab X @tab X
|
||||
@tab supported extensions: XCh, XXCH, X96, XBR, XLL, LBR (partially)
|
||||
@item Dolby E @tab @tab X
|
||||
@item DPCM Gremlin @tab @tab X
|
||||
@item DPCM id RoQ @tab X @tab X
|
||||
@tab Used in Quake III, Jedi Knight 2 and other computer games.
|
||||
@item DPCM Interplay @tab @tab X
|
||||
@ -1148,6 +1183,7 @@ following image formats are supported:
|
||||
@item DPCM Sol @tab @tab X
|
||||
@item DPCM Xan @tab @tab X
|
||||
@tab Used in Origin's Wing Commander IV AVI files.
|
||||
@item DPCM Xilam DERF @tab @tab X
|
||||
@item DSD (Direct Stream Digital), least significant bit first @tab @tab X
|
||||
@item DSD (Direct Stream Digital), most significant bit first @tab @tab X
|
||||
@item DSD (Direct Stream Digital), least significant bit first, planar @tab @tab X
|
||||
@ -1214,7 +1250,6 @@ following image formats are supported:
|
||||
@item PCM unsigned 24-bit little-endian @tab X @tab X
|
||||
@item PCM unsigned 32-bit big-endian @tab X @tab X
|
||||
@item PCM unsigned 32-bit little-endian @tab X @tab X
|
||||
@item PCM Zork @tab @tab X
|
||||
@item QCELP / PureVoice @tab @tab X
|
||||
@item QDesign Music Codec 1 @tab @tab X
|
||||
@item QDesign Music Codec 2 @tab @tab X
|
||||
@ -1305,6 +1340,7 @@ performance on systems without hardware floating point support).
|
||||
|
||||
@multitable @columnfractions .4 .1
|
||||
@item Name @tab Support
|
||||
@item AMQP @tab E
|
||||
@item file @tab X
|
||||
@item FTP @tab X
|
||||
@item Gopher @tab X
|
||||
@ -1329,6 +1365,7 @@ performance on systems without hardware floating point support).
|
||||
@item TCP @tab X
|
||||
@item TLS @tab X
|
||||
@item UDP @tab X
|
||||
@item ZMQ @tab E
|
||||
@end multitable
|
||||
|
||||
@code{X} means that the protocol is supported.
|
||||
|
@ -277,8 +277,8 @@ audio track.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}. Alternatively you can use the @code{-sources}
|
||||
option of ffmpeg to list the available input devices.
|
||||
Defaults to @option{false}. This option is deprecated, please use the
|
||||
@code{-sources} option of ffmpeg to list the available input devices.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@ -292,11 +292,6 @@ as @option{pal} (3 letters).
|
||||
Default behavior is autodetection of the input video format, if the hardware
|
||||
supports it.
|
||||
|
||||
@item bm_v210
|
||||
This is a deprecated option, you can use @option{raw_format} instead.
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@item raw_format
|
||||
Set the pixel format of the captured video.
|
||||
Available values are:
|
||||
@ -395,6 +390,14 @@ Either sync could go wrong by 1 frame or in a rarer case
|
||||
@option{timestamp_align} seconds.
|
||||
Defaults to @samp{0}.
|
||||
|
||||
@item wait_for_tc (@emph{bool})
|
||||
Drop frames till a frame with timecode is received. Sometimes serial timecode
|
||||
isn't received with the first input frame. If that happens, the stored stream
|
||||
timecode will be inaccurate. If this option is set to @option{true}, input frames
|
||||
are dropped till a frame with timecode is received.
|
||||
Option @var{timecode_format} must be specified.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@ -404,7 +407,7 @@ Defaults to @samp{0}.
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
ffmpeg -sources decklink
|
||||
@end example
|
||||
|
||||
@item
|
||||
@ -422,7 +425,7 @@ ffmpeg -format_code Hi50 -f decklink -i 'Intensity Pro' -c:a copy -c:v copy outp
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -bm_v210 1 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
ffmpeg -raw_format yuv422p10 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
@ -1524,7 +1527,7 @@ ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_siz
|
||||
@end example
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
Set the video frame size. Default is the full desktop.
|
||||
|
||||
@item grab_x
|
||||
@item grab_y
|
||||
|
@ -51,16 +51,14 @@ the decode process starts. Call ff_thread_finish_setup() afterwards. If
|
||||
some code can't be moved, have update_thread_context() run it in the next
|
||||
thread.
|
||||
|
||||
If the codec allocates writable tables in its init(), add an init_thread_copy()
|
||||
which re-allocates them for other threads.
|
||||
|
||||
Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
|
||||
speed gain at this point but it should work.
|
||||
|
||||
If there are inter-frame dependencies, so the codec calls
|
||||
ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
|
||||
ff_thread_report/await_progress(), set FF_CODEC_CAP_ALLOCATE_PROGRESS in
|
||||
AVCodec.caps_internal and use ff_thread_get_buffer() to allocate frames. The
|
||||
frames must then be freed with ff_thread_release_buffer().
|
||||
Otherwise leave it at zero and decode directly into the user-supplied frames.
|
||||
Otherwise decode directly into the user-supplied frames.
|
||||
|
||||
Call ff_thread_report_progress() after some part of the current picture has decoded.
|
||||
A good place to put this is where draw_horiz_band() is called - add this if it isn't
|
||||
|
247
doc/muxers.texi
247
doc/muxers.texi
@ -105,12 +105,14 @@ It takes a single signed native-endian 16-bit raw audio stream of at most 2 chan
|
||||
|
||||
@table @option
|
||||
@item silence_threshold
|
||||
Threshold for detecting silence, ranges from -1 to 32767. -1 disables silence detection and
|
||||
is required for use with the AcoustID service. Default is -1.
|
||||
Threshold for detecting silence. Range is from -1 to 32767, where -1 disables
|
||||
silence detection. Silence detection can only be used with version 3 of the
|
||||
algorithm.
|
||||
Silence detection must be disabled for use with the AcoustID service. Default is -1.
|
||||
|
||||
@item algorithm
|
||||
Version of algorithm to fingerprint with. Range is 0 to 4. Version 2 requires that silence
|
||||
detection be enabled. Default is 1.
|
||||
Version of algorithm to fingerprint with. Range is 0 to 4.
|
||||
Version 3 enables silence detection. Default is 1.
|
||||
|
||||
@item fp_format
|
||||
Format to output the fingerprint as. Accepts the following options:
|
||||
@ -234,8 +236,10 @@ This is a deprecated option to set the segment length in microseconds, use @var{
|
||||
@item seg_duration @var{duration}
|
||||
Set the segment length in seconds (fractional value can be set). The value is
|
||||
treated as average segment duration when @var{use_template} is enabled and
|
||||
@var{use_timeline} is disabled and as minimum segment duration for all the other
|
||||
use cases.
|
||||
@item frag_duration @var{duration}
|
||||
Set the length in seconds of fragments within segments (fractional value can be set).
|
||||
@item frag_type @var{type}
|
||||
Set the type of interval for fragmentation.
|
||||
@item window_size @var{size}
|
||||
Set the maximum number of segments kept in the manifest.
|
||||
@item extra_window_size @var{size}
|
||||
@ -275,6 +279,15 @@ of the adaptation sets and a,b,c,d and e are the indices of the mapped streams.
|
||||
To map all video (or audio) streams to an AdaptationSet, "v" (or "a") can be used as stream identifier instead of IDs.
|
||||
|
||||
When no assignment is defined, this defaults to an AdaptationSet for each stream.
|
||||
|
||||
Optional syntax is "id=x,seg_duration=x,frag_duration=x,frag_type=type,descriptor=descriptor_string,streams=a,b,c id=y,seg_duration=y,frag_type=type,streams=d,e" and so on,
|
||||
descriptor is useful to the scheme defined by ISO/IEC 23009-1:2014/Amd.2:2015.
|
||||
For example, -adaptation_sets "id=0,descriptor=<SupplementalProperty schemeIdUri=\"urn:mpeg:dash:srd:2014\" value=\"0,0,0,1,1,2,2\"/>,streams=v".
|
||||
Please note that descriptor string should be a self-closing xml tag.
|
||||
seg_duration, frag_duration and frag_type override the global option values for each adaptation set.
|
||||
For example, -adaptation_sets "id=0,seg_duration=2,frag_duration=1,frag_type=duration,streams=v id=1,seg_duration=2,frag_type=none,streams=a"
|
||||
type_id marks an adaptation set as containing streams meant to be used for Trick Mode for the referenced adaptation set.
|
||||
For example, -adaptation_sets "id=0,seg_duration=2,frag_type=none,streams=0 id=1,seg_duration=10,frag_type=none,trick_id=0,streams=1"
|
||||
@item timeout @var{timeout}
|
||||
Set timeout for socket I/O operations. Applicable only for HTTP output.
|
||||
@item index_correction @var{index_correction}
|
||||
@ -320,9 +333,37 @@ This option will also try to comply with the above open spec, till Apple's spec
|
||||
Applicable only when @var{streaming} and @var{hls_playlist} options are enabled.
|
||||
This is an experimental feature.
|
||||
|
||||
@item ldash @var{ldash}
|
||||
Enable Low-latency Dash by constraining the presence and values of some elements.
|
||||
|
||||
@item master_m3u8_publish_rate @var{master_m3u8_publish_rate}
|
||||
Publish master playlist repeatedly every after specified number of segment intervals.
|
||||
|
||||
@item write_prft @var{write_prft}
|
||||
Write Producer Reference Time elements on supported streams. This also enables writing
|
||||
prft boxes in the underlying muxer. Applicable only when the @var{utc_url} option is enabled.
|
||||
It's set to auto by default, in which case the muxer will attempt to enable it only in modes
|
||||
that require it.
|
||||
|
||||
@item mpd_profile @var{mpd_profile}
|
||||
Set one or more manifest profiles.
|
||||
|
||||
@item http_opts @var{http_opts}
|
||||
A :-separated list of key=value options to pass to the underlying HTTP
|
||||
protocol. Applicable only for HTTP output.
|
||||
|
||||
@item target_latency @var{target_latency}
|
||||
Set an intended target latency in seconds (fractional value can be set) for serving. Applicable only when @var{streaming} and @var{write_prft} options are enabled.
|
||||
This is an informative fields clients can use to measure the latency of the service.
|
||||
|
||||
@item min_playback_rate @var{min_playback_rate}
|
||||
Set the minimum playback rate indicated as appropriate for the purposes of automatically
|
||||
adjusting playback latency and buffer occupancy during normal playback by clients.
|
||||
|
||||
@item max_playback_rate @var{max_playback_rate}
|
||||
Set the maximum playback rate indicated as appropriate for the purposes of automatically
|
||||
adjusting playback latency and buffer occupancy during normal playback by clients.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{framecrc}
|
||||
@ -607,6 +648,9 @@ Set the starting sequence numbers according to @var{start_number} option value.
|
||||
@item epoch
|
||||
The start number will be the seconds since epoch (1970-01-01 00:00:00)
|
||||
|
||||
@item epoch_us
|
||||
The start number will be the microseconds since epoch (1970-01-01 00:00:00)
|
||||
|
||||
@item datetime
|
||||
The start number will be based on the current date/time as YYYYmmddHHMMSS. e.g. 20161231235759.
|
||||
|
||||
@ -803,6 +847,9 @@ fmp4 files may be used in HLS version 7 and above.
|
||||
@item hls_fmp4_init_filename @var{filename}
|
||||
Set filename to the fragment files header file, default filename is @file{init.mp4}.
|
||||
|
||||
@item hls_fmp4_init_resend
|
||||
Resend init file after m3u8 file refresh every time, default is @var{0}.
|
||||
|
||||
When @code{var_stream_map} is set with two or more variant streams, the
|
||||
@var{filename} pattern must contain the string "%v", this string specifies
|
||||
the position of variant stream index in the generated init file names.
|
||||
@ -898,8 +945,8 @@ serving up segments can be configured to reject requests to *.tmp to prevent acc
|
||||
before they have been added to the m3u8 playlist. This flag also affects how m3u8 playlist files are created.
|
||||
If this flag is set, all playlist files will written into temporary file and renamed after they are complete, similarly as segments are handled.
|
||||
But playlists with @code{file} protocol and with type (@code{hls_playlist_type}) other than @code{vod}
|
||||
are always written into temporary file regardles of this flag. Master playlist files (@code{master_pl_name}), if any, with @code{file} protocol,
|
||||
are always written into temporary file regardles of this flag if @code{master_pl_publish_rate} value is other than zero.
|
||||
are always written into temporary file regardless of this flag. Master playlist files (@code{master_pl_name}), if any, with @code{file} protocol,
|
||||
are always written into temporary file regardless of this flag if @code{master_pl_publish_rate} value is other than zero.
|
||||
|
||||
@end table
|
||||
|
||||
@ -1029,6 +1076,21 @@ have and language is named ENG, the other audio language is named CHN.
|
||||
|
||||
By default, a single hls variant containing all the encoded streams is created.
|
||||
|
||||
@example
|
||||
ffmpeg -y -i input_with_subtitle.mkv \
|
||||
-b:v:0 5250k -c:v h264 -pix_fmt yuv420p -profile:v main -level 4.1 \
|
||||
-b:a:0 256k \
|
||||
-c:s webvtt -c:a mp2 -ar 48000 -ac 2 -map 0:v -map 0:a:0 -map 0:s:0 \
|
||||
-f hls -var_stream_map "v:0,a:0,s:0,sgroup:subtitle" \
|
||||
-master_pl_name master.m3u8 -t 300 -hls_time 10 -hls_init_time 4 -hls_list_size \
|
||||
10 -master_pl_publish_rate 10 -hls_flags \
|
||||
delete_segments+discont_start+split_by_time ./tmp/video.m3u8
|
||||
@end example
|
||||
|
||||
This example adds @code{#EXT-X-MEDIA} tag with @code{TYPE=SUBTITLES} in
|
||||
the master playlist with webvtt subtitle group name 'subtitle'. Please make sure
|
||||
the input file has one text subtitle stream at least.
|
||||
|
||||
@item cc_stream_map
|
||||
Map string which specifies different closed captions groups and their
|
||||
attributes. The closed captions stream groups are separated by space.
|
||||
@ -1163,6 +1225,37 @@ The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
|
||||
form @file{img%-1.jpg}, @file{img%-2.jpg}, ..., @file{img%-10.jpg},
|
||||
etc.
|
||||
|
||||
The image muxer supports the .Y.U.V image file format. This format is
|
||||
special in that that each image frame consists of three files, for
|
||||
each of the YUV420P components. To read or write this image file format,
|
||||
specify the name of the '.Y' file. The muxer will automatically open the
|
||||
'.U' and '.V' files as required.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item frame_pts
|
||||
If set to 1, expand the filename with pts from pkt->pts.
|
||||
Default value is 0.
|
||||
|
||||
@item start_number
|
||||
Start the sequence from the specified number. Default value is 1.
|
||||
|
||||
@item update
|
||||
If set to 1, the filename will always be interpreted as just a
|
||||
filename, not a pattern, and the corresponding file will be continuously
|
||||
overwritten with new images. Default value is 0.
|
||||
|
||||
@item strftime
|
||||
If set to 1, expand the filename with date and time information from
|
||||
@code{strftime()}. Default value is 0.
|
||||
|
||||
@item protocol_opts @var{options_list}
|
||||
Set protocol options as a :-separated list of key=value parameters. Values
|
||||
containing the @code{:} special character must be escaped.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
The following example shows how to use @command{ffmpeg} for creating a
|
||||
@ -1203,31 +1296,11 @@ You can set the file name with current frame's PTS:
|
||||
ffmpeg -f v4l2 -r 1 -i /dev/video0 -copyts -f image2 -frame_pts true %d.jpg"
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item frame_pts
|
||||
If set to 1, expand the filename with pts from pkt->pts.
|
||||
Default value is 0.
|
||||
|
||||
@item start_number
|
||||
Start the sequence from the specified number. Default value is 1.
|
||||
|
||||
@item update
|
||||
If set to 1, the filename will always be interpreted as just a
|
||||
filename, not a pattern, and the corresponding file will be continuously
|
||||
overwritten with new images. Default value is 0.
|
||||
|
||||
@item strftime
|
||||
If set to 1, expand the filename with date and time information from
|
||||
@code{strftime()}. Default value is 0.
|
||||
@end table
|
||||
|
||||
The image muxer supports the .Y.U.V image file format. This format is
|
||||
special in that that each image frame consists of three files, for
|
||||
each of the YUV420P components. To read or write this image file format,
|
||||
specify the name of the '.Y' file. The muxer will automatically open the
|
||||
'.U' and '.V' files as required.
|
||||
A more complex example is to publish contents of your desktop directly to a
|
||||
WebDAV server every second:
|
||||
@example
|
||||
ffmpeg -f x11grab -framerate 1 -i :0.0 -q:v 6 -update 1 -protocol_opts method=PUT http://example.com/desktop.jpg
|
||||
@end example
|
||||
|
||||
@section matroska
|
||||
|
||||
@ -1241,7 +1314,8 @@ The recognized metadata settings in this muxer are:
|
||||
|
||||
@table @option
|
||||
@item title
|
||||
Set title name provided to a single track.
|
||||
Set title name provided to a single track. This gets mapped to
|
||||
the FileDescription element for a stream written as attachment.
|
||||
|
||||
@item language
|
||||
Specify the language of the track in the Matroska languages form.
|
||||
@ -1308,11 +1382,31 @@ index at the beginning of the file.
|
||||
|
||||
If this option is set to a non-zero value, the muxer will reserve a given amount
|
||||
of space in the file header and then try to write the cues there when the muxing
|
||||
finishes. If the available space does not suffice, muxing will fail. A safe size
|
||||
for most use cases should be about 50kB per hour of video.
|
||||
finishes. If the reserved space does not suffice, no Cues will be written, the
|
||||
file will be finalized and writing the trailer will return an error.
|
||||
A safe size for most use cases should be about 50kB per hour of video.
|
||||
|
||||
Note that cues are only written if the output is seekable and this option will
|
||||
have no effect if it is not.
|
||||
@item default_mode
|
||||
This option controls how the FlagDefault of the output tracks will be set.
|
||||
It influences which tracks players should play by default. The default mode
|
||||
is @samp{infer}.
|
||||
@table @samp
|
||||
@item infer
|
||||
In this mode, for each type of track (audio, video or subtitle), if there is
|
||||
a track with disposition default of this type, then the first such track
|
||||
(i.e. the one with the lowest index) will be marked as default; if no such
|
||||
track exists, the first track of this type will be marked as default instead
|
||||
(if existing). This ensures that the default flag is set in a sensible way even
|
||||
if the input originated from containers that lack the concept of default tracks.
|
||||
@item infer_no_subs
|
||||
This mode is the same as infer except that if no subtitle track with
|
||||
disposition default exists, no subtitle track will be marked as default.
|
||||
@item passthrough
|
||||
In this mode the FlagDefault is set if and only if the AV_DISPOSITION_DEFAULT
|
||||
flag is set in the disposition of the corresponding stream.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@anchor{md5}
|
||||
@ -1462,13 +1556,6 @@ point on IIS with this muxer. Example:
|
||||
ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
|
||||
@end example
|
||||
|
||||
@subsection Audible AAX
|
||||
|
||||
Audible AAX files are encrypted M4B files, and they can be decrypted by specifying a 4 byte activation secret.
|
||||
@example
|
||||
ffmpeg -activation_bytes 1CEB00DA -i test.aax -vn -c:a copy output.mp4
|
||||
@end example
|
||||
|
||||
@section mp3
|
||||
|
||||
The MP3 muxer writes a raw MP3 stream with the following optional features:
|
||||
@ -1576,11 +1663,14 @@ Advanced Codec Digital HDTV service.
|
||||
@end table
|
||||
|
||||
@item mpegts_pmt_start_pid @var{integer}
|
||||
Set the first PID for PMT. Default is @code{0x1000}. Max is @code{0x1f00}.
|
||||
Set the first PID for PMTs. Default is @code{0x1000}, minimum is @code{0x0020},
|
||||
maximum is @code{0x1ffa}. This option has no effect in m2ts mode where the PMT
|
||||
PID is fixed @code{0x0100}.
|
||||
|
||||
@item mpegts_start_pid @var{integer}
|
||||
Set the first PID for data packets. Default is @code{0x0100}. Max is
|
||||
@code{0x0f00}.
|
||||
Set the first PID for elementary streams. Default is @code{0x0100}, minimum is
|
||||
@code{0x0020}, maximum is @code{0x1ffa}. This option has no effect in m2ts mode
|
||||
where the elementary stream PIDs are fixed.
|
||||
|
||||
@item mpegts_m2ts_mode @var{boolean}
|
||||
Enable m2ts mode if set to @code{1}. Default value is @code{-1} which
|
||||
@ -1607,10 +1697,6 @@ Conform to System B (DVB) instead of System A (ATSC).
|
||||
Mark the initial packet of each stream as discontinuity.
|
||||
@end table
|
||||
|
||||
@item resend_headers @var{integer}
|
||||
Reemit PAT/PMT before writing the next packet. This option is deprecated:
|
||||
use @option{mpegts_flags} instead.
|
||||
|
||||
@item mpegts_copyts @var{boolean}
|
||||
Preserve original timestamps, if value is set to @code{1}. Default value
|
||||
is @code{-1}, which results in shifting timestamps so that they start from 0.
|
||||
@ -1619,14 +1705,16 @@ is @code{-1}, which results in shifting timestamps so that they start from 0.
|
||||
Omit the PES packet length for video packets. Default is @code{1} (true).
|
||||
|
||||
@item pcr_period @var{integer}
|
||||
Override the default PCR retransmission time in milliseconds. Ignored if
|
||||
variable muxrate is selected. Default is @code{20}.
|
||||
Override the default PCR retransmission time in milliseconds. Default is
|
||||
@code{-1} which means that the PCR interval will be determined automatically:
|
||||
20 ms is used for CBR streams, the highest multiple of the frame duration which
|
||||
is less than 100 ms is used for VBR streams.
|
||||
|
||||
@item pat_period @var{double}
|
||||
Maximum time in seconds between PAT/PMT tables.
|
||||
@item pat_period @var{duration}
|
||||
Maximum time in seconds between PAT/PMT tables. Default is @code{0.1}.
|
||||
|
||||
@item sdt_period @var{double}
|
||||
Maximum time in seconds between SDT tables.
|
||||
@item sdt_period @var{duration}
|
||||
Maximum time in seconds between SDT tables. Default is @code{0.5}.
|
||||
|
||||
@item tables_version @var{integer}
|
||||
Set PAT, PMT and SDT version (default @code{0}, valid values are from 0 to 31, inclusively).
|
||||
@ -2062,6 +2150,53 @@ Specify whether to remove all fragments when finished. Default 0 (do not remove)
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{streamhash}
|
||||
@section streamhash
|
||||
|
||||
Per stream hash testing format.
|
||||
|
||||
This muxer computes and prints a cryptographic hash of all the input frames,
|
||||
on a per-stream basis. This can be used for equality checks without having
|
||||
to do a complete binary comparison.
|
||||
|
||||
By default audio frames are converted to signed 16-bit raw audio and
|
||||
video frames to raw video before computing the hash, but the output
|
||||
of explicit conversions to other codecs can also be used. Timestamps
|
||||
are ignored. It uses the SHA-256 cryptographic hash function by default,
|
||||
but supports several other algorithms.
|
||||
|
||||
The output of the muxer consists of one line per stream of the form:
|
||||
@var{streamindex},@var{streamtype},@var{algo}=@var{hash}, where
|
||||
@var{streamindex} is the index of the mapped stream, @var{streamtype} is a
|
||||
single character indicating the type of stream, @var{algo} is a short string
|
||||
representing the hash function used, and @var{hash} is a hexadecimal number
|
||||
representing the computed hash.
|
||||
|
||||
@table @option
|
||||
@item hash @var{algorithm}
|
||||
Use the cryptographic hash function specified by the string @var{algorithm}.
|
||||
Supported values include @code{MD5}, @code{murmur3}, @code{RIPEMD128},
|
||||
@code{RIPEMD160}, @code{RIPEMD256}, @code{RIPEMD320}, @code{SHA160},
|
||||
@code{SHA224}, @code{SHA256} (default), @code{SHA512/224}, @code{SHA512/256},
|
||||
@code{SHA384}, @code{SHA512}, @code{CRC32} and @code{adler32}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
To compute the SHA-256 hash of the input converted to raw audio and
|
||||
video, and store it in the file @file{out.sha256}:
|
||||
@example
|
||||
ffmpeg -i INPUT -f streamhash out.sha256
|
||||
@end example
|
||||
|
||||
To print an MD5 hash to stdout use the command:
|
||||
@example
|
||||
ffmpeg -i INPUT -f streamhash -hash md5 -
|
||||
@end example
|
||||
|
||||
See also the @ref{hash} and @ref{framehash} muxers.
|
||||
|
||||
@anchor{fifo}
|
||||
@section fifo
|
||||
|
||||
|
@ -140,8 +140,8 @@ device with @command{-list_formats 1}. Audio sample rate is always 48 kHz.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}. Alternatively you can use the @code{-sinks}
|
||||
option of ffmpeg to list the available output devices.
|
||||
Defaults to @option{false}. This option is deprecated, please use the
|
||||
@code{-sinks} option of ffmpeg to list the available output devices.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@ -168,7 +168,7 @@ Defaults to @samp{unset}.
|
||||
@item
|
||||
List output devices:
|
||||
@example
|
||||
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
|
||||
ffmpeg -sinks decklink
|
||||
@end example
|
||||
|
||||
@item
|
||||
@ -329,6 +329,8 @@ ffmpeg -i INPUT -f pulse "stream name"
|
||||
|
||||
SDL (Simple DirectMedia Layer) output device.
|
||||
|
||||
"sdl2" can be used as alias for "sdl".
|
||||
|
||||
This output device allows one to show a video stream in an SDL
|
||||
window. Only one SDL window is allowed per application, so you can
|
||||
have only one instance of this output device in an application.
|
||||
|
@ -51,6 +51,66 @@ in microseconds.
|
||||
|
||||
A description of the currently available protocols follows.
|
||||
|
||||
@section amqp
|
||||
|
||||
Advanced Message Queueing Protocol (AMQP) version 0-9-1 is a broker based
|
||||
publish-subscribe communication protocol.
|
||||
|
||||
FFmpeg must be compiled with --enable-librabbitmq to support AMQP. A separate
|
||||
AMQP broker must also be run. An example open-source AMQP broker is RabbitMQ.
|
||||
|
||||
After starting the broker, an FFmpeg client may stream data to the broker using
|
||||
the command:
|
||||
|
||||
@example
|
||||
ffmpeg -re -i input -f mpegts amqp://[[user]:[password]@@]hostname[:port]
|
||||
@end example
|
||||
|
||||
Where hostname and port (default is 5672) is the address of the broker. The
|
||||
client may also set a user/password for authentication. The default for both
|
||||
fields is "guest".
|
||||
|
||||
Muliple subscribers may stream from the broker using the command:
|
||||
@example
|
||||
ffplay amqp://[[user]:[password]@@]hostname[:port]
|
||||
@end example
|
||||
|
||||
In RabbitMQ all data published to the broker flows through a specific exchange,
|
||||
and each subscribing client has an assigned queue/buffer. When a packet arrives
|
||||
at an exchange, it may be copied to a client's queue depending on the exchange
|
||||
and routing_key fields.
|
||||
|
||||
The following options are supported:
|
||||
|
||||
@table @option
|
||||
|
||||
@item exchange
|
||||
Sets the exchange to use on the broker. RabbitMQ has several predefined
|
||||
exchanges: "amq.direct" is the default exchange, where the publisher and
|
||||
subscriber must have a matching routing_key; "amq.fanout" is the same as a
|
||||
broadcast operation (i.e. the data is forwarded to all queues on the fanout
|
||||
exchange independent of the routing_key); and "amq.topic" is similar to
|
||||
"amq.direct", but allows for more complex pattern matching (refer to the RabbitMQ
|
||||
documentation).
|
||||
|
||||
@item routing_key
|
||||
Sets the routing key. The default value is "amqp". The routing key is used on
|
||||
the "amq.direct" and "amq.topic" exchanges to decide whether packets are written
|
||||
to the queue of a subscriber.
|
||||
|
||||
@item pkt_size
|
||||
Maximum size of each packet sent/received to the broker. Default is 131072.
|
||||
Minimum is 4096 and max is any large value (representable by an int). When
|
||||
receiving packets, this sets an internal buffer size in FFmpeg. It should be
|
||||
equal to or greater than the size of the published packets to the broker. Otherwise
|
||||
the received message may be truncated causing decoding errors.
|
||||
|
||||
@item connection_timeout
|
||||
The timeout in seconds during the initial connection to the broker. The
|
||||
default value is rw_timeout, or 5 seconds if rw_timeout is not set.
|
||||
|
||||
@end table
|
||||
|
||||
@section async
|
||||
|
||||
Asynchronous data filling wrapper for input stream.
|
||||
@ -228,6 +288,14 @@ Set timeout in microseconds of socket I/O operations used by the underlying low
|
||||
operation. By default it is set to -1, which means that the timeout is
|
||||
not specified.
|
||||
|
||||
@item ftp-user
|
||||
Set a user to be used for authenticating to the FTP server. This is overridden by the
|
||||
user in the FTP URL.
|
||||
|
||||
@item ftp-password
|
||||
Set a password to be used for authenticating to the FTP server. This is overridden by
|
||||
the password in the FTP URL, or by @option{ftp-anonymous-password} if no user is set.
|
||||
|
||||
@item ftp-anonymous-password
|
||||
Password used when login as anonymous user. Typically an e-mail address
|
||||
should be used.
|
||||
@ -1187,7 +1255,7 @@ options.
|
||||
This protocol accepts the following options.
|
||||
|
||||
@table @option
|
||||
@item connect_timeout
|
||||
@item connect_timeout=@var{milliseconds}
|
||||
Connection timeout; SRT cannot connect for RTT > 1500 msec
|
||||
(2 handshake exchanges) with the default connect timeout of
|
||||
3 seconds. This option applies to the caller and rendezvous
|
||||
@ -1218,7 +1286,7 @@ IP Type of Service. Applies to sender only. Default value is 0xB8.
|
||||
@item ipttl=@var{ttl}
|
||||
IP Time To Live. Applies to sender only. Default value is 64.
|
||||
|
||||
@item latency
|
||||
@item latency=@var{microseconds}
|
||||
Timestamp-based Packet Delivery Delay.
|
||||
Used to absorb bursts of missed packet retransmissions.
|
||||
This flag sets both @option{rcvlatency} and @option{peerlatency}
|
||||
@ -1229,7 +1297,7 @@ when side is sender and @option{rcvlatency}
|
||||
when side is receiver, and the bidirectional stream
|
||||
sending is not supported.
|
||||
|
||||
@item listen_timeout
|
||||
@item listen_timeout=@var{microseconds}
|
||||
Set socket listen timeout.
|
||||
|
||||
@item maxbw=@var{bytes/seconds}
|
||||
@ -1274,6 +1342,26 @@ only if @option{pbkeylen} is non-zero. It is used on
|
||||
the receiver only if the received data is encrypted.
|
||||
The configured passphrase cannot be recovered (write-only).
|
||||
|
||||
@item enforced_encryption=@var{1|0}
|
||||
If true, both connection parties must have the same password
|
||||
set (including empty, that is, with no encryption). If the
|
||||
password doesn't match or only one side is unencrypted,
|
||||
the connection is rejected. Default is true.
|
||||
|
||||
@item kmrefreshrate=@var{packets}
|
||||
The number of packets to be transmitted after which the
|
||||
encryption key is switched to a new key. Default is -1.
|
||||
-1 means auto (0x1000000 in srt library). The range for
|
||||
this option is integers in the 0 - @code{INT_MAX}.
|
||||
|
||||
@item kmpreannounce=@var{packets}
|
||||
The interval between when a new encryption key is sent and
|
||||
when switchover occurs. This value also applies to the
|
||||
subsequent interval between when switchover occurs and
|
||||
when the old encryption key is decommissioned. Default is -1.
|
||||
-1 means auto (0x1000 in srt library). The range for
|
||||
this option is integers in the 0 - @code{INT_MAX}.
|
||||
|
||||
@item payload_size=@var{bytes}
|
||||
Sets the maximum declared size of a packet transferred
|
||||
during the single call to the sending function in Live
|
||||
@ -1289,7 +1377,7 @@ use a bigger maximum frame size, though not greater than
|
||||
@item pkt_size=@var{bytes}
|
||||
Alias for @samp{payload_size}.
|
||||
|
||||
@item peerlatency
|
||||
@item peerlatency=@var{microseconds}
|
||||
The latency value (as described in @option{rcvlatency}) that is
|
||||
set by the sender side as a minimum value for the receiver.
|
||||
|
||||
@ -1301,7 +1389,7 @@ Not required on receiver (set to 0),
|
||||
key size obtained from sender in HaiCrypt handshake.
|
||||
Default value is 0.
|
||||
|
||||
@item rcvlatency
|
||||
@item rcvlatency=@var{microseconds}
|
||||
The time that should elapse since the moment when the
|
||||
packet was sent and the moment when it's delivered to
|
||||
the receiver application in the receiving function.
|
||||
@ -1319,12 +1407,10 @@ Set UDP receive buffer size, expressed in bytes.
|
||||
@item send_buffer_size=@var{bytes}
|
||||
Set UDP send buffer size, expressed in bytes.
|
||||
|
||||
@item rw_timeout
|
||||
Set raise error timeout for read/write optations.
|
||||
|
||||
This option is only relevant in read mode:
|
||||
if no data arrived in more than this time
|
||||
interval, raise error.
|
||||
@item timeout=@var{microseconds}
|
||||
Set raise error timeouts for read, write and connect operations. Note that the
|
||||
SRT library has internal timeouts which can be controlled separately, the
|
||||
value set here is only a cap on those.
|
||||
|
||||
@item tlpktdrop=@var{1|0}
|
||||
Too-late Packet Drop. When enabled on receiver, it skips
|
||||
@ -1418,6 +1504,12 @@ the overhead transmission (retransmitted and control packets).
|
||||
file: Set options as for non-live transmission. See @option{messageapi}
|
||||
for further explanations
|
||||
|
||||
@item linger=@var{seconds}
|
||||
The number of seconds that the socket waits for unsent data when closing.
|
||||
Default is -1. -1 means auto (off with 0 seconds in live mode, on with 180
|
||||
seconds in file mode). The range for this option is integers in the
|
||||
0 - @code{INT_MAX}.
|
||||
|
||||
@end table
|
||||
|
||||
For more information see: @url{https://github.com/Haivision/srt}.
|
||||
@ -1619,7 +1711,7 @@ The list of supported options follows.
|
||||
@item buffer_size=@var{size}
|
||||
Set the UDP maximum socket buffer size in bytes. This is used to set either
|
||||
the receive or send buffer size, depending on what the socket is used for.
|
||||
Default is 64KB. See also @var{fifo_size}.
|
||||
Default is 32 KB for output, 384 KB for input. See also @var{fifo_size}.
|
||||
|
||||
@item bitrate=@var{bitrate}
|
||||
If set to nonzero, the output will have the specified constant bitrate if the
|
||||
@ -1728,4 +1820,51 @@ Timeout in ms.
|
||||
Create the Unix socket in listening mode.
|
||||
@end table
|
||||
|
||||
@section zmq
|
||||
|
||||
ZeroMQ asynchronous messaging using the libzmq library.
|
||||
|
||||
This library supports unicast streaming to multiple clients without relying on
|
||||
an external server.
|
||||
|
||||
The required syntax for streaming or connecting to a stream is:
|
||||
@example
|
||||
zmq:tcp://ip-address:port
|
||||
@end example
|
||||
|
||||
Example:
|
||||
Create a localhost stream on port 5555:
|
||||
@example
|
||||
ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
|
||||
@end example
|
||||
|
||||
Multiple clients may connect to the stream using:
|
||||
@example
|
||||
ffplay zmq:tcp://127.0.0.1:5555
|
||||
@end example
|
||||
|
||||
Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
|
||||
The server side binds to a port and publishes data. Clients connect to the
|
||||
server (via IP address/port) and subscribe to the stream. The order in which
|
||||
the server and client start generally does not matter.
|
||||
|
||||
ffmpeg must be compiled with the --enable-libzmq option to support
|
||||
this protocol.
|
||||
|
||||
Options can be set on the @command{ffmpeg}/@command{ffplay} command
|
||||
line. The following options are supported:
|
||||
|
||||
@table @option
|
||||
|
||||
@item pkt_size
|
||||
Forces the maximum packet size for sending/receiving data. The default value is
|
||||
131,072 bytes. On the server side, this sets the maximum size of sent packets
|
||||
via ZeroMQ. On the clients, it sets an internal buffer size for receiving
|
||||
packets. Note that pkt_size on the clients should be equal to or greater than
|
||||
pkt_size on the server. Otherwise the received message may be truncated causing
|
||||
decoding errors.
|
||||
|
||||
@end table
|
||||
|
||||
|
||||
@c man end PROTOCOLS
|
||||
|
@ -126,6 +126,15 @@ The following examples are all valid time duration:
|
||||
@item 55
|
||||
55 seconds
|
||||
|
||||
@item 0.2
|
||||
0.2 seconds
|
||||
|
||||
@item 200ms
|
||||
200 milliseconds, that's 0.2s
|
||||
|
||||
@item 200000us
|
||||
200000 microseconds, that's 0.2s
|
||||
|
||||
@item 12:03:45
|
||||
12 hours, 03 minutes and 45 seconds
|
||||
|
||||
@ -704,6 +713,8 @@ FL+FR+FC+LFE+BL+BR+FLC+FRC
|
||||
FL+FR+FC+LFE+FLC+FRC+SL+SR
|
||||
@item octagonal
|
||||
FL+FR+FC+BL+BR+BC+SL+SR
|
||||
@item hexadecagonal
|
||||
FL+FR+FC+BL+BR+BC+SL+SR+WL+WR+TBL+TBR+TBC+TFC+TFL+TFR
|
||||
@item downmix
|
||||
DL+DR
|
||||
@end table
|
||||
@ -920,6 +931,9 @@ corresponding input value will be returned.
|
||||
@item round(expr)
|
||||
Round the value of expression @var{expr} to the nearest integer. For example, "round(1.5)" is "2.0".
|
||||
|
||||
@item sgn(x)
|
||||
Compute sign of @var{x}.
|
||||
|
||||
@item sin(x)
|
||||
Compute sine of @var{x}.
|
||||
|
||||
|
5
ffbuild/.gitignore
vendored
Normal file
5
ffbuild/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
/.config
|
||||
/config.fate
|
||||
/config.log
|
||||
/config.mak
|
||||
/config.sh
|
@ -162,7 +162,7 @@ $(TOOLOBJS): | tools
|
||||
|
||||
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~
|
||||
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
define RULES
|
||||
|
@ -10,7 +10,6 @@ ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
||||
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||
|
||||
OBJS-ffmpeg += fftools/ffmpeg_opt.o fftools/ffmpeg_filter.o fftools/ffmpeg_hw.o
|
||||
OBJS-ffmpeg-$(CONFIG_CUVID) += fftools/ffmpeg_cuvid.o
|
||||
OBJS-ffmpeg-$(CONFIG_LIBMFX) += fftools/ffmpeg_qsv.o
|
||||
ifndef CONFIG_VIDEOTOOLBOX
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += fftools/ffmpeg_videotoolbox.o
|
||||
|
@ -55,9 +55,6 @@
|
||||
#include "libavutil/ffversion.h"
|
||||
#include "libavutil/version.h"
|
||||
#include "cmdutils.h"
|
||||
#if CONFIG_NETWORK
|
||||
#include "libavformat/network.h"
|
||||
#endif
|
||||
#if HAVE_SYS_RESOURCE_H
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
@ -119,7 +116,7 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
|
||||
|
||||
void init_dynload(void)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
#if HAVE_SETDLLDIRECTORY && defined(_WIN32)
|
||||
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
|
||||
* current working directory from the DLL search path as a security pre-caution. */
|
||||
SetDllDirectory("");
|
||||
@ -182,7 +179,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
|
||||
first = 1;
|
||||
for (po = options; po->name; po++) {
|
||||
char buf[64];
|
||||
char buf[128];
|
||||
|
||||
if (((po->flags & req_flags) != req_flags) ||
|
||||
(alt_flags && !(po->flags & alt_flags)) ||
|
||||
@ -848,8 +845,8 @@ do { \
|
||||
}
|
||||
|
||||
if (octx->cur_group.nb_opts || codec_opts || format_opts || resample_opts)
|
||||
av_log(NULL, AV_LOG_WARNING, "Trailing options were found on the "
|
||||
"commandline.\n");
|
||||
av_log(NULL, AV_LOG_WARNING, "Trailing option(s) found in the "
|
||||
"command: may be ignored.\n");
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Finished splitting the commandline.\n");
|
||||
|
||||
@ -980,6 +977,7 @@ static int init_report(const char *env)
|
||||
char *filename_template = NULL;
|
||||
char *key, *val;
|
||||
int ret, count = 0;
|
||||
int prog_loglevel, envlevel = 0;
|
||||
time_t now;
|
||||
struct tm *tm;
|
||||
AVBPrint filename;
|
||||
@ -1011,6 +1009,7 @@ static int init_report(const char *env)
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid report file level\n");
|
||||
exit_program(1);
|
||||
}
|
||||
envlevel = 1;
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown key '%s' in FFREPORT\n", key);
|
||||
}
|
||||
@ -1027,6 +1026,10 @@ static int init_report(const char *env)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
prog_loglevel = av_log_get_level();
|
||||
if (!envlevel)
|
||||
report_file_level = FFMAX(report_file_level, prog_loglevel);
|
||||
|
||||
report_file = fopen(filename.str, "w");
|
||||
if (!report_file) {
|
||||
int ret = AVERROR(errno);
|
||||
@ -1037,16 +1040,17 @@ static int init_report(const char *env)
|
||||
av_log_set_callback(log_callback_report);
|
||||
av_log(NULL, AV_LOG_INFO,
|
||||
"%s started on %04d-%02d-%02d at %02d:%02d:%02d\n"
|
||||
"Report written to \"%s\"\n",
|
||||
"Report written to \"%s\"\n"
|
||||
"Log level: %d\n",
|
||||
program_name,
|
||||
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
|
||||
tm->tm_hour, tm->tm_min, tm->tm_sec,
|
||||
filename.str);
|
||||
filename.str, report_file_level);
|
||||
av_bprint_finalize(&filename, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int opt_report(const char *opt)
|
||||
int opt_report(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
return init_report(NULL);
|
||||
}
|
||||
@ -1416,10 +1420,6 @@ static void print_codec(const AVCodec *c)
|
||||
printf("threads ");
|
||||
if (c->capabilities & AV_CODEC_CAP_AVOID_PROBING)
|
||||
printf("avoidprobe ");
|
||||
if (c->capabilities & AV_CODEC_CAP_INTRA_ONLY)
|
||||
printf("intraonly ");
|
||||
if (c->capabilities & AV_CODEC_CAP_LOSSLESS)
|
||||
printf("lossless ");
|
||||
if (c->capabilities & AV_CODEC_CAP_HARDWARE)
|
||||
printf("hardware ");
|
||||
if (c->capabilities & AV_CODEC_CAP_HYBRID)
|
||||
@ -1493,13 +1493,14 @@ static char get_media_type_char(enum AVMediaType type)
|
||||
}
|
||||
}
|
||||
|
||||
static const AVCodec *next_codec_for_id(enum AVCodecID id, const AVCodec *prev,
|
||||
static const AVCodec *next_codec_for_id(enum AVCodecID id, void **iter,
|
||||
int encoder)
|
||||
{
|
||||
while ((prev = av_codec_next(prev))) {
|
||||
if (prev->id == id &&
|
||||
(encoder ? av_codec_is_encoder(prev) : av_codec_is_decoder(prev)))
|
||||
return prev;
|
||||
const AVCodec *c;
|
||||
while ((c = av_codec_iterate(iter))) {
|
||||
if (c->id == id &&
|
||||
(encoder ? av_codec_is_encoder(c) : av_codec_is_decoder(c)))
|
||||
return c;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -1536,11 +1537,12 @@ static unsigned get_codecs_sorted(const AVCodecDescriptor ***rcodecs)
|
||||
|
||||
static void print_codecs_for_id(enum AVCodecID id, int encoder)
|
||||
{
|
||||
const AVCodec *codec = NULL;
|
||||
void *iter = NULL;
|
||||
const AVCodec *codec;
|
||||
|
||||
printf(" (%s: ", encoder ? "encoders" : "decoders");
|
||||
|
||||
while ((codec = next_codec_for_id(id, codec, encoder)))
|
||||
while ((codec = next_codec_for_id(id, &iter, encoder)))
|
||||
printf("%s ", codec->name);
|
||||
|
||||
printf(")");
|
||||
@ -1563,7 +1565,8 @@ int show_codecs(void *optctx, const char *opt, const char *arg)
|
||||
" -------\n");
|
||||
for (i = 0; i < nb_codecs; i++) {
|
||||
const AVCodecDescriptor *desc = codecs[i];
|
||||
const AVCodec *codec = NULL;
|
||||
const AVCodec *codec;
|
||||
void *iter = NULL;
|
||||
|
||||
if (strstr(desc->name, "_deprecated"))
|
||||
continue;
|
||||
@ -1581,14 +1584,14 @@ int show_codecs(void *optctx, const char *opt, const char *arg)
|
||||
|
||||
/* print decoders/encoders when there's more than one or their
|
||||
* names are different from codec name */
|
||||
while ((codec = next_codec_for_id(desc->id, codec, 0))) {
|
||||
while ((codec = next_codec_for_id(desc->id, &iter, 0))) {
|
||||
if (strcmp(codec->name, desc->name)) {
|
||||
print_codecs_for_id(desc->id, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
codec = NULL;
|
||||
while ((codec = next_codec_for_id(desc->id, codec, 1))) {
|
||||
iter = NULL;
|
||||
while ((codec = next_codec_for_id(desc->id, &iter, 1))) {
|
||||
if (strcmp(codec->name, desc->name)) {
|
||||
print_codecs_for_id(desc->id, 1);
|
||||
break;
|
||||
@ -1619,9 +1622,10 @@ static void print_codecs(int encoder)
|
||||
encoder ? "Encoders" : "Decoders");
|
||||
for (i = 0; i < nb_codecs; i++) {
|
||||
const AVCodecDescriptor *desc = codecs[i];
|
||||
const AVCodec *codec = NULL;
|
||||
const AVCodec *codec;
|
||||
void *iter = NULL;
|
||||
|
||||
while ((codec = next_codec_for_id(desc->id, codec, encoder))) {
|
||||
while ((codec = next_codec_for_id(desc->id, &iter, encoder))) {
|
||||
printf(" %c", get_media_type_char(desc->type));
|
||||
printf((codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) ? "F" : ".");
|
||||
printf((codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) ? "S" : ".");
|
||||
@ -1826,9 +1830,10 @@ static void show_help_codec(const char *name, int encoder)
|
||||
if (codec)
|
||||
print_codec(codec);
|
||||
else if ((desc = avcodec_descriptor_get_by_name(name))) {
|
||||
void *iter = NULL;
|
||||
int printed = 0;
|
||||
|
||||
while ((codec = next_codec_for_id(desc->id, codec, encoder))) {
|
||||
while ((codec = next_codec_for_id(desc->id, &iter, encoder))) {
|
||||
printed = 1;
|
||||
print_codec(codec);
|
||||
}
|
||||
@ -1863,6 +1868,24 @@ static void show_help_demuxer(const char *name)
|
||||
show_help_children(fmt->priv_class, AV_OPT_FLAG_DECODING_PARAM);
|
||||
}
|
||||
|
||||
static void show_help_protocol(const char *name)
|
||||
{
|
||||
const AVClass *proto_class;
|
||||
|
||||
if (!name) {
|
||||
av_log(NULL, AV_LOG_ERROR, "No protocol name specified.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
proto_class = avio_protocol_get_class(name);
|
||||
if (!proto_class) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown protocol '%s'.\n", name);
|
||||
return;
|
||||
}
|
||||
|
||||
show_help_children(proto_class, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM);
|
||||
}
|
||||
|
||||
static void show_help_muxer(const char *name)
|
||||
{
|
||||
const AVCodecDescriptor *desc;
|
||||
@ -1993,6 +2016,8 @@ int show_help(void *optctx, const char *opt, const char *arg)
|
||||
show_help_demuxer(par);
|
||||
} else if (!strcmp(topic, "muxer")) {
|
||||
show_help_muxer(par);
|
||||
} else if (!strcmp(topic, "protocol")) {
|
||||
show_help_protocol(par);
|
||||
#if CONFIG_AVFILTER
|
||||
} else if (!strcmp(topic, "filter")) {
|
||||
show_help_filter(par);
|
||||
@ -2032,7 +2057,7 @@ FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
av_strlcpy(filename, preset_name, filename_size);
|
||||
f = fopen(filename, "r");
|
||||
} else {
|
||||
#ifdef _WIN32
|
||||
#if HAVE_GETMODULEHANDLE && defined(_WIN32)
|
||||
char datadir[MAX_PATH], *ls;
|
||||
base[2] = NULL;
|
||||
|
||||
@ -2185,7 +2210,7 @@ double get_rotation(AVStream *st)
|
||||
if (fabs(theta - 90*round(theta/90)) > 2)
|
||||
av_log(NULL, AV_LOG_WARNING, "Odd rotation angle.\n"
|
||||
"If you want to help, upload a sample "
|
||||
"of this file to ftp://upload.ffmpeg.org/incoming/ "
|
||||
"of this file to https://streams.videolan.org/upload/ "
|
||||
"and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)");
|
||||
|
||||
return theta;
|
||||
|
@ -99,7 +99,7 @@ int opt_default(void *optctx, const char *opt, const char *arg);
|
||||
*/
|
||||
int opt_loglevel(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
int opt_report(const char *opt);
|
||||
int opt_report(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
int opt_max_alloc(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
@ -236,7 +236,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
{ "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" }, \
|
||||
{ "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
|
||||
{ "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
|
||||
{ "report", 0, { (void*)opt_report }, "generate a report" }, \
|
||||
{ "report", 0, { .func_arg = opt_report }, "generate a report" }, \
|
||||
{ "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" }, \
|
||||
{ "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" }, \
|
||||
{ "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" }, \
|
||||
|
262
fftools/ffmpeg.c
262
fftools/ffmpeg.c
@ -182,7 +182,7 @@ static int sub2video_get_blank_frame(InputStream *ist)
|
||||
ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
|
||||
ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
|
||||
ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
|
||||
if ((ret = av_frame_get_buffer(frame, 32)) < 0)
|
||||
if ((ret = av_frame_get_buffer(frame, 0)) < 0)
|
||||
return ret;
|
||||
memset(frame->data[0], 0, frame->height * frame->linesize[0]);
|
||||
return 0;
|
||||
@ -237,7 +237,7 @@ static void sub2video_push_ref(InputStream *ist, int64_t pts)
|
||||
}
|
||||
}
|
||||
|
||||
void sub2video_update(InputStream *ist, AVSubtitle *sub)
|
||||
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
|
||||
{
|
||||
AVFrame *frame = ist->sub2video.frame;
|
||||
int8_t *dst;
|
||||
@ -254,7 +254,12 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub)
|
||||
AV_TIME_BASE_Q, ist->st->time_base);
|
||||
num_rects = sub->num_rects;
|
||||
} else {
|
||||
pts = ist->sub2video.end_pts;
|
||||
/* If we are initializing the system, utilize current heartbeat
|
||||
PTS as the start time, and show until the following subpicture
|
||||
is received. Otherwise, utilize the previous subpicture's end time
|
||||
as the fall-back value. */
|
||||
pts = ist->sub2video.initialize ?
|
||||
heartbeat_pts : ist->sub2video.end_pts;
|
||||
end_pts = INT64_MAX;
|
||||
num_rects = 0;
|
||||
}
|
||||
@ -269,6 +274,7 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub)
|
||||
sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
|
||||
sub2video_push_ref(ist, pts);
|
||||
ist->sub2video.end_pts = end_pts;
|
||||
ist->sub2video.initialize = 0;
|
||||
}
|
||||
|
||||
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
|
||||
@ -291,9 +297,11 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts)
|
||||
/* do not send the heartbeat frame if the subtitle is already ahead */
|
||||
if (pts2 <= ist2->sub2video.last_pts)
|
||||
continue;
|
||||
if (pts2 >= ist2->sub2video.end_pts ||
|
||||
(!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
|
||||
sub2video_update(ist2, NULL);
|
||||
if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
|
||||
/* if we have hit the end of the current displayed subpicture,
|
||||
or if we need to initialize the system, update the
|
||||
overlayed subpicture and its start/end times */
|
||||
sub2video_update(ist2, pts2 + 1, NULL);
|
||||
for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
|
||||
nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
|
||||
if (nb_reqs)
|
||||
@ -307,7 +315,7 @@ static void sub2video_flush(InputStream *ist)
|
||||
int ret;
|
||||
|
||||
if (ist->sub2video.end_pts < INT64_MAX)
|
||||
sub2video_update(ist, NULL);
|
||||
sub2video_update(ist, INT64_MAX, NULL);
|
||||
for (i = 0; i < ist->nb_filters; i++) {
|
||||
ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
|
||||
if (ret != AVERROR_EOF && ret < 0)
|
||||
@ -493,32 +501,37 @@ static void ffmpeg_cleanup(int ret)
|
||||
FilterGraph *fg = filtergraphs[i];
|
||||
avfilter_graph_free(&fg->graph);
|
||||
for (j = 0; j < fg->nb_inputs; j++) {
|
||||
while (av_fifo_size(fg->inputs[j]->frame_queue)) {
|
||||
InputFilter *ifilter = fg->inputs[j];
|
||||
struct InputStream *ist = ifilter->ist;
|
||||
|
||||
while (av_fifo_size(ifilter->frame_queue)) {
|
||||
AVFrame *frame;
|
||||
av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
|
||||
av_fifo_generic_read(ifilter->frame_queue, &frame,
|
||||
sizeof(frame), NULL);
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
av_fifo_freep(&fg->inputs[j]->frame_queue);
|
||||
if (fg->inputs[j]->ist->sub2video.sub_queue) {
|
||||
while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
|
||||
av_fifo_freep(&ifilter->frame_queue);
|
||||
if (ist->sub2video.sub_queue) {
|
||||
while (av_fifo_size(ist->sub2video.sub_queue)) {
|
||||
AVSubtitle sub;
|
||||
av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
|
||||
av_fifo_generic_read(ist->sub2video.sub_queue,
|
||||
&sub, sizeof(sub), NULL);
|
||||
avsubtitle_free(&sub);
|
||||
}
|
||||
av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
|
||||
av_fifo_freep(&ist->sub2video.sub_queue);
|
||||
}
|
||||
av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
|
||||
av_freep(&fg->inputs[j]->name);
|
||||
av_buffer_unref(&ifilter->hw_frames_ctx);
|
||||
av_freep(&ifilter->name);
|
||||
av_freep(&fg->inputs[j]);
|
||||
}
|
||||
av_freep(&fg->inputs);
|
||||
for (j = 0; j < fg->nb_outputs; j++) {
|
||||
av_freep(&fg->outputs[j]->name);
|
||||
av_freep(&fg->outputs[j]->formats);
|
||||
av_freep(&fg->outputs[j]->channel_layouts);
|
||||
av_freep(&fg->outputs[j]->sample_rates);
|
||||
OutputFilter *ofilter = fg->outputs[j];
|
||||
|
||||
av_freep(&ofilter->name);
|
||||
av_freep(&ofilter->formats);
|
||||
av_freep(&ofilter->channel_layouts);
|
||||
av_freep(&ofilter->sample_rates);
|
||||
av_freep(&fg->outputs[j]);
|
||||
}
|
||||
av_freep(&fg->outputs);
|
||||
@ -550,9 +563,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
if (!ost)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < ost->nb_bitstream_filters; j++)
|
||||
av_bsf_free(&ost->bsf_ctx[j]);
|
||||
av_freep(&ost->bsf_ctx);
|
||||
av_bsf_free(&ost->bsf_ctx);
|
||||
|
||||
av_frame_free(&ost->filtered_frame);
|
||||
av_frame_free(&ost->last_frame);
|
||||
@ -567,6 +578,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
ost->audio_channels_mapped = 0;
|
||||
|
||||
av_dict_free(&ost->sws_dict);
|
||||
av_dict_free(&ost->swr_opts);
|
||||
|
||||
avcodec_free_context(&ost->enc_ctx);
|
||||
avcodec_parameters_free(&ost->ref_par);
|
||||
@ -779,6 +791,8 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
|
||||
int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
|
||||
if (pkt->dts < max) {
|
||||
int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
|
||||
if (exit_on_error)
|
||||
loglevel = AV_LOG_ERROR;
|
||||
av_log(s, loglevel, "Non-monotonous DTS in output stream "
|
||||
"%d:%d; previous: %"PRId64", current: %"PRId64"; ",
|
||||
ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
|
||||
@ -848,40 +862,15 @@ static void output_packet(OutputFile *of, AVPacket *pkt,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* apply the output bitstream filters, if any */
|
||||
if (ost->nb_bitstream_filters) {
|
||||
int idx;
|
||||
|
||||
ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
|
||||
/* apply the output bitstream filters */
|
||||
if (ost->bsf_ctx) {
|
||||
ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
|
||||
if (ret < 0)
|
||||
goto finish;
|
||||
|
||||
eof = 0;
|
||||
idx = 1;
|
||||
while (idx) {
|
||||
/* get a packet from the previous filter up the chain */
|
||||
ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
|
||||
if (ret == AVERROR(EAGAIN)) {
|
||||
ret = 0;
|
||||
idx--;
|
||||
continue;
|
||||
} else if (ret == AVERROR_EOF) {
|
||||
eof = 1;
|
||||
} else if (ret < 0)
|
||||
goto finish;
|
||||
|
||||
/* send it to the next filter down the chain or to the muxer */
|
||||
if (idx < ost->nb_bitstream_filters) {
|
||||
ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
|
||||
if (ret < 0)
|
||||
goto finish;
|
||||
idx++;
|
||||
eof = 0;
|
||||
} else if (eof)
|
||||
goto finish;
|
||||
else
|
||||
write_packet(of, pkt, ost, 0);
|
||||
}
|
||||
while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
|
||||
write_packet(of, pkt, ost, 0);
|
||||
if (ret == AVERROR(EAGAIN))
|
||||
ret = 0;
|
||||
} else if (!eof)
|
||||
write_packet(of, pkt, ost, 0);
|
||||
|
||||
@ -1136,7 +1125,7 @@ static void do_video_out(OutputFile *of,
|
||||
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
|
||||
delta = duration;
|
||||
delta0 = 0;
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
ost->sync_opts = llrint(sync_ipts);
|
||||
}
|
||||
case VSYNC_CFR:
|
||||
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
||||
@ -1147,18 +1136,18 @@ static void do_video_out(OutputFile *of,
|
||||
else if (delta > 1.1) {
|
||||
nb_frames = lrintf(delta);
|
||||
if (delta0 > 1.1)
|
||||
nb0_frames = lrintf(delta0 - 0.6);
|
||||
nb0_frames = llrintf(delta0 - 0.6);
|
||||
}
|
||||
break;
|
||||
case VSYNC_VFR:
|
||||
if (delta <= -0.6)
|
||||
nb_frames = 0;
|
||||
else if (delta > 0.6)
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
ost->sync_opts = llrint(sync_ipts);
|
||||
break;
|
||||
case VSYNC_DROP:
|
||||
case VSYNC_PASSTHROUGH:
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
ost->sync_opts = llrint(sync_ipts);
|
||||
break;
|
||||
default:
|
||||
av_assert0(0);
|
||||
@ -1265,7 +1254,8 @@ static void do_video_out(OutputFile *of,
|
||||
ost->forced_keyframes_expr_const_values[FKF_N] += 1;
|
||||
} else if ( ost->forced_keyframes
|
||||
&& !strncmp(ost->forced_keyframes, "source", 6)
|
||||
&& in_picture->key_frame==1) {
|
||||
&& in_picture->key_frame==1
|
||||
&& !i) {
|
||||
forced_keyframe = 1;
|
||||
}
|
||||
|
||||
@ -1903,9 +1893,6 @@ static void flush_encoders(void)
|
||||
}
|
||||
}
|
||||
|
||||
if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
|
||||
continue;
|
||||
|
||||
if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
|
||||
continue;
|
||||
|
||||
@ -1995,12 +1982,13 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
||||
InputFile *f = input_files [ist->file_index];
|
||||
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
|
||||
int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
|
||||
AVPacket opkt = { 0 };
|
||||
|
||||
av_init_packet(&opkt);
|
||||
AVPacket opkt;
|
||||
|
||||
// EOF: flush output bitstream filters.
|
||||
if (!pkt) {
|
||||
av_init_packet(&opkt);
|
||||
opkt.data = NULL;
|
||||
opkt.size = 0;
|
||||
output_packet(of, &opkt, ost, 1);
|
||||
return;
|
||||
}
|
||||
@ -2039,40 +2027,29 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
||||
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
ost->sync_opts++;
|
||||
|
||||
if (av_packet_ref(&opkt, pkt) < 0)
|
||||
exit_program(1);
|
||||
|
||||
if (pkt->pts != AV_NOPTS_VALUE)
|
||||
opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
|
||||
else
|
||||
opkt.pts = AV_NOPTS_VALUE;
|
||||
|
||||
if (pkt->dts == AV_NOPTS_VALUE)
|
||||
if (pkt->dts == AV_NOPTS_VALUE) {
|
||||
opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
|
||||
else
|
||||
opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
|
||||
opkt.dts -= ost_tb_start_time;
|
||||
|
||||
if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
|
||||
} else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
|
||||
if(!duration)
|
||||
duration = ist->dec_ctx->frame_size;
|
||||
opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
|
||||
(AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
|
||||
ost->mux_timebase) - ost_tb_start_time;
|
||||
}
|
||||
opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
|
||||
(AVRational){1, ist->dec_ctx->sample_rate}, duration,
|
||||
&ist->filter_in_rescale_delta_last, ost->mux_timebase);
|
||||
/* dts will be set immediately afterwards to what pts is now */
|
||||
opkt.pts = opkt.dts - ost_tb_start_time;
|
||||
} else
|
||||
opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
|
||||
opkt.dts -= ost_tb_start_time;
|
||||
|
||||
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
|
||||
|
||||
opkt.flags = pkt->flags;
|
||||
|
||||
if (pkt->buf) {
|
||||
opkt.buf = av_buffer_ref(pkt->buf);
|
||||
if (!opkt.buf)
|
||||
exit_program(1);
|
||||
}
|
||||
opkt.data = pkt->data;
|
||||
opkt.size = pkt->size;
|
||||
|
||||
av_copy_packet_side_data(&opkt, pkt);
|
||||
|
||||
output_packet(of, &opkt, ost, 0);
|
||||
}
|
||||
|
||||
@ -2393,7 +2370,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
|
||||
av_log(ist->dec_ctx, AV_LOG_WARNING,
|
||||
"video_delay is larger in decoder than demuxer %d > %d.\n"
|
||||
"If you want to help, upload a sample "
|
||||
"of this file to ftp://upload.ffmpeg.org/incoming/ "
|
||||
"of this file to https://streams.videolan.org/upload/ "
|
||||
"and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
|
||||
ist->dec_ctx->has_b_frames,
|
||||
ist->st->codecpar->video_delay);
|
||||
@ -2515,7 +2492,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
|
||||
return ret;
|
||||
|
||||
if (ist->sub2video.frame) {
|
||||
sub2video_update(ist, &subtitle);
|
||||
sub2video_update(ist, INT64_MIN, &subtitle);
|
||||
} else if (ist->nb_filters) {
|
||||
if (!ist->sub2video.sub_queue)
|
||||
ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
|
||||
@ -2784,7 +2761,7 @@ static void print_sdp(void)
|
||||
if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
|
||||
} else {
|
||||
avio_printf(sdp_pb, "SDP:\n%s", sdp);
|
||||
avio_print(sdp_pb, sdp);
|
||||
avio_closep(&sdp_pb);
|
||||
av_freep(&sdp_filename);
|
||||
}
|
||||
@ -3016,35 +2993,28 @@ static int check_init_output_file(OutputFile *of, int file_index)
|
||||
|
||||
static int init_output_bsfs(OutputStream *ost)
|
||||
{
|
||||
AVBSFContext *ctx;
|
||||
int i, ret;
|
||||
AVBSFContext *ctx = ost->bsf_ctx;
|
||||
int ret;
|
||||
|
||||
if (!ost->nb_bitstream_filters)
|
||||
if (!ctx)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ost->nb_bitstream_filters; i++) {
|
||||
ctx = ost->bsf_ctx[i];
|
||||
|
||||
ret = avcodec_parameters_copy(ctx->par_in,
|
||||
i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
|
||||
|
||||
ret = av_bsf_init(ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
|
||||
ost->bsf_ctx[i]->filter->name);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
|
||||
ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
|
||||
ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ctx->time_base_in = ost->st->time_base;
|
||||
|
||||
ret = av_bsf_init(ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
|
||||
ctx->filter->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ost->st->time_base = ctx->time_base_out;
|
||||
|
||||
return 0;
|
||||
@ -3376,10 +3346,6 @@ static int init_output_stream_encode(OutputStream *ost)
|
||||
av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
|
||||
"Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
|
||||
}
|
||||
for (j = 0; j < ost->forced_kf_count; j++)
|
||||
ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
|
||||
AV_TIME_BASE_Q,
|
||||
enc_ctx->time_base);
|
||||
|
||||
enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
|
||||
enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
|
||||
@ -3481,21 +3447,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
|
||||
!av_dict_get(ost->encoder_opts, "ab", NULL, 0))
|
||||
av_dict_set(&ost->encoder_opts, "b", "128000", 0);
|
||||
|
||||
if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
|
||||
((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
|
||||
av_buffersink_get_format(ost->filter->filter)) {
|
||||
ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
|
||||
if (!ost->enc_ctx->hw_frames_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
} else {
|
||||
ret = hw_device_setup_for_encode(ost);
|
||||
if (ret < 0) {
|
||||
snprintf(error, error_len, "Device setup failed for "
|
||||
"encoder on output stream #%d:%d : %s",
|
||||
ret = hw_device_setup_for_encode(ost);
|
||||
if (ret < 0) {
|
||||
snprintf(error, error_len, "Device setup failed for "
|
||||
"encoder on output stream #%d:%d : %s",
|
||||
ost->file_index, ost->index, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
int input_props = 0, output_props = 0;
|
||||
AVCodecDescriptor const *input_descriptor =
|
||||
@ -3571,12 +3530,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
|
||||
int i;
|
||||
for (i = 0; i < ist->st->nb_side_data; i++) {
|
||||
AVPacketSideData *sd = &ist->st->side_data[i];
|
||||
uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
|
||||
if (!dst)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(dst, sd->data, sd->size);
|
||||
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
|
||||
av_display_rotation_set((uint32_t *)dst, 0);
|
||||
if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
|
||||
uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
|
||||
if (!dst)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(dst, sd->data, sd->size);
|
||||
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
|
||||
av_display_rotation_set((uint32_t *)dst, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4195,7 +4156,7 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is)
|
||||
int i, ret, has_audio = 0;
|
||||
int64_t duration = 0;
|
||||
|
||||
ret = av_seek_frame(is, -1, is->start_time, 0);
|
||||
ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -4235,7 +4196,8 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is)
|
||||
ifile->time_base = ist->st->time_base;
|
||||
/* the total duration of the stream, max_pts - min_pts is
|
||||
* the duration of the stream without the last frame */
|
||||
duration += ist->max_pts - ist->min_pts;
|
||||
if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
|
||||
duration += ist->max_pts - ist->min_pts;
|
||||
ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
|
||||
ifile->time_base);
|
||||
}
|
||||
@ -4262,6 +4224,7 @@ static int process_input(int file_index)
|
||||
int ret, thread_ret, i, j;
|
||||
int64_t duration;
|
||||
int64_t pkt_dts;
|
||||
int disable_discontinuity_correction = copy_ts;
|
||||
|
||||
is = ifile->ctx;
|
||||
ret = get_input_packet(ifile, &pkt);
|
||||
@ -4463,10 +4426,20 @@ static int process_input(int file_index)
|
||||
pkt.dts += duration;
|
||||
|
||||
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
|
||||
if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
|
||||
(is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
|
||||
int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
|
||||
ist->st->time_base, AV_TIME_BASE_Q,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
|
||||
disable_discontinuity_correction = 0;
|
||||
}
|
||||
|
||||
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
|
||||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
|
||||
pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
|
||||
!copy_ts) {
|
||||
!disable_discontinuity_correction) {
|
||||
int64_t delta = pkt_dts - ist->next_dts;
|
||||
if (is->iformat->flags & AVFMT_TS_DISCONT) {
|
||||
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
@ -4740,6 +4713,10 @@ static int transcode(void)
|
||||
av_freep(&ost->enc_ctx->stats_in);
|
||||
}
|
||||
total_packets_written += ost->packets_written;
|
||||
if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
|
||||
@ -4757,7 +4734,6 @@ static int transcode(void)
|
||||
}
|
||||
}
|
||||
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
hw_device_free_all();
|
||||
|
||||
/* finished ! */
|
||||
|
@ -61,7 +61,6 @@ enum HWAccelID {
|
||||
HWACCEL_GENERIC,
|
||||
HWACCEL_VIDEOTOOLBOX,
|
||||
HWACCEL_QSV,
|
||||
HWACCEL_CUVID,
|
||||
};
|
||||
|
||||
typedef struct HWAccel {
|
||||
@ -349,6 +348,7 @@ typedef struct InputStream {
|
||||
AVFifoBuffer *sub_queue; ///< queue of AVSubtitle* before filter init
|
||||
AVFrame *frame;
|
||||
int w, h;
|
||||
unsigned int initialize; ///< marks if sub2video_update should force an initialization
|
||||
} sub2video;
|
||||
|
||||
int dr1;
|
||||
@ -430,7 +430,8 @@ enum forced_keyframes_const {
|
||||
FKF_NB
|
||||
};
|
||||
|
||||
#define ABORT_ON_FLAG_EMPTY_OUTPUT (1 << 0)
|
||||
#define ABORT_ON_FLAG_EMPTY_OUTPUT (1 << 0)
|
||||
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM (1 << 1)
|
||||
|
||||
extern const char *const forced_keyframes_const_names[];
|
||||
|
||||
@ -459,8 +460,7 @@ typedef struct OutputStream {
|
||||
AVRational mux_timebase;
|
||||
AVRational enc_timebase;
|
||||
|
||||
int nb_bitstream_filters;
|
||||
AVBSFContext **bsf_ctx;
|
||||
AVBSFContext *bsf_ctx;
|
||||
|
||||
AVCodecContext *enc_ctx;
|
||||
AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */
|
||||
@ -615,7 +615,6 @@ extern const AVIOInterruptCB int_cb;
|
||||
|
||||
extern const OptionDef options[];
|
||||
extern const HWAccel hwaccels[];
|
||||
extern AVBufferRef *hw_device_ctx;
|
||||
#if CONFIG_QSV
|
||||
extern char *qsv_device;
|
||||
#endif
|
||||
@ -646,7 +645,7 @@ int filtergraph_is_simple(FilterGraph *fg);
|
||||
int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
|
||||
int init_complex_filtergraph(FilterGraph *fg);
|
||||
|
||||
void sub2video_update(InputStream *ist, AVSubtitle *sub);
|
||||
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub);
|
||||
|
||||
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
|
||||
|
||||
@ -654,7 +653,6 @@ int ffmpeg_parse_options(int argc, char **argv);
|
||||
|
||||
int videotoolbox_init(AVCodecContext *s);
|
||||
int qsv_init(AVCodecContext *s);
|
||||
int cuvid_init(AVCodecContext *s);
|
||||
|
||||
HWDevice *hw_device_get_by_name(const char *name);
|
||||
int hw_device_init_from_string(const char *arg, HWDevice **dev);
|
||||
@ -662,6 +660,7 @@ void hw_device_free_all(void);
|
||||
|
||||
int hw_device_setup_for_decode(InputStream *ist);
|
||||
int hw_device_setup_for_encode(OutputStream *ost);
|
||||
int hw_device_setup_for_filter(FilterGraph *fg);
|
||||
|
||||
int hwaccel_decode_init(AVCodecContext *avctx);
|
||||
|
||||
|
@ -1,73 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/hwcontext.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
static void cuvid_uninit(AVCodecContext *avctx)
|
||||
{
|
||||
InputStream *ist = avctx->opaque;
|
||||
av_buffer_unref(&ist->hw_frames_ctx);
|
||||
}
|
||||
|
||||
int cuvid_init(AVCodecContext *avctx)
|
||||
{
|
||||
InputStream *ist = avctx->opaque;
|
||||
AVHWFramesContext *frames_ctx;
|
||||
int ret;
|
||||
|
||||
av_log(avctx, AV_LOG_VERBOSE, "Initializing cuvid hwaccel\n");
|
||||
|
||||
if (!hw_device_ctx) {
|
||||
ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_CUDA,
|
||||
ist->hwaccel_device, NULL, 0);
|
||||
if (ret < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error creating a CUDA device\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
av_buffer_unref(&ist->hw_frames_ctx);
|
||||
ist->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
|
||||
if (!ist->hw_frames_ctx) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error creating a CUDA frames context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
frames_ctx = (AVHWFramesContext*)ist->hw_frames_ctx->data;
|
||||
|
||||
frames_ctx->format = AV_PIX_FMT_CUDA;
|
||||
frames_ctx->sw_format = avctx->sw_pix_fmt;
|
||||
frames_ctx->width = avctx->width;
|
||||
frames_ctx->height = avctx->height;
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG, "Initializing CUDA frames context: sw_format = %s, width = %d, height = %d\n",
|
||||
av_get_pix_fmt_name(frames_ctx->sw_format), frames_ctx->width, frames_ctx->height);
|
||||
|
||||
ret = av_hwframe_ctx_init(ist->hw_frames_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error initializing a CUDA frame pool\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ist->hwaccel_uninit = cuvid_uninit;
|
||||
|
||||
return 0;
|
||||
}
|
@ -99,7 +99,8 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
|
||||
break;
|
||||
}
|
||||
if (*p == -1) {
|
||||
if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0]))
|
||||
const AVCodecDescriptor *desc = avcodec_descriptor_get(codec->id);
|
||||
if(desc && (desc->props & AV_CODEC_PROP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0]))
|
||||
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
|
||||
if(av_get_sample_fmt_name(st->codecpar->format))
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
@ -740,6 +741,12 @@ static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
|
||||
return AVERROR(ENOMEM);
|
||||
ist->sub2video.last_pts = INT64_MIN;
|
||||
ist->sub2video.end_pts = INT64_MIN;
|
||||
|
||||
/* sub2video structure has been (re-)initialized.
|
||||
Mark it as such so that the system will be
|
||||
initialized with the first received heartbeat. */
|
||||
ist->sub2video.initialize = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -786,10 +793,9 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
av_bprintf(&args,
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
|
||||
"pixel_aspect=%d/%d:sws_param=flags=%d",
|
||||
"pixel_aspect=%d/%d",
|
||||
ifilter->width, ifilter->height, ifilter->format,
|
||||
tb.num, tb.den, sar.num, sar.den,
|
||||
SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
|
||||
tb.num, tb.den, sar.num, sar.den);
|
||||
if (fr.num && fr.den)
|
||||
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||
@ -1056,17 +1062,9 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
|
||||
goto fail;
|
||||
|
||||
if (filter_hw_device || hw_device_ctx) {
|
||||
AVBufferRef *device = filter_hw_device ? filter_hw_device->device_ref
|
||||
: hw_device_ctx;
|
||||
for (i = 0; i < fg->graph->nb_filters; i++) {
|
||||
fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(device);
|
||||
if (!fg->graph->filters[i]->hw_device_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = hw_device_setup_for_filter(fg);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
|
||||
const char *num_inputs;
|
||||
@ -1169,7 +1167,7 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
while (av_fifo_size(ist->sub2video.sub_queue)) {
|
||||
AVSubtitle tmp;
|
||||
av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
|
||||
sub2video_update(ist, &tmp);
|
||||
sub2video_update(ist, INT64_MIN, &tmp);
|
||||
avsubtitle_free(&tmp);
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,8 @@
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavfilter/buffersink.h"
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
@ -416,18 +418,57 @@ int hw_device_setup_for_decode(InputStream *ist)
|
||||
|
||||
int hw_device_setup_for_encode(OutputStream *ost)
|
||||
{
|
||||
HWDevice *dev;
|
||||
const AVCodecHWConfig *config;
|
||||
HWDevice *dev = NULL;
|
||||
AVBufferRef *frames_ref = NULL;
|
||||
int i;
|
||||
|
||||
if (ost->filter) {
|
||||
frames_ref = av_buffersink_get_hw_frames_ctx(ost->filter->filter);
|
||||
if (frames_ref &&
|
||||
((AVHWFramesContext*)frames_ref->data)->format ==
|
||||
ost->enc_ctx->pix_fmt) {
|
||||
// Matching format, will try to use hw_frames_ctx.
|
||||
} else {
|
||||
frames_ref = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0;; i++) {
|
||||
config = avcodec_get_hw_config(ost->enc, i);
|
||||
if (!config)
|
||||
break;
|
||||
|
||||
if (frames_ref &&
|
||||
config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
|
||||
(config->pix_fmt == AV_PIX_FMT_NONE ||
|
||||
config->pix_fmt == ost->enc_ctx->pix_fmt)) {
|
||||
av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using input "
|
||||
"frames context (format %s) with %s encoder.\n",
|
||||
av_get_pix_fmt_name(ost->enc_ctx->pix_fmt),
|
||||
ost->enc->name);
|
||||
ost->enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref);
|
||||
if (!ost->enc_ctx->hw_frames_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dev &&
|
||||
config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX)
|
||||
dev = hw_device_get_by_type(config->device_type);
|
||||
}
|
||||
|
||||
dev = hw_device_match_by_codec(ost->enc);
|
||||
if (dev) {
|
||||
av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using device %s "
|
||||
"(type %s) with %s encoder.\n", dev->name,
|
||||
av_hwdevice_get_type_name(dev->type), ost->enc->name);
|
||||
ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!ost->enc_ctx->hw_device_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
} else {
|
||||
// No device required, or no device available.
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hwaccel_retrieve_data(AVCodecContext *avctx, AVFrame *input)
|
||||
@ -480,3 +521,31 @@ int hwaccel_decode_init(AVCodecContext *avctx)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hw_device_setup_for_filter(FilterGraph *fg)
|
||||
{
|
||||
HWDevice *dev;
|
||||
int i;
|
||||
|
||||
// If the user has supplied exactly one hardware device then just
|
||||
// give it straight to every filter for convenience. If more than
|
||||
// one device is available then the user needs to pick one explcitly
|
||||
// with the filter_hw_device option.
|
||||
if (filter_hw_device)
|
||||
dev = filter_hw_device;
|
||||
else if (nb_hw_devices == 1)
|
||||
dev = hw_devices[0];
|
||||
else
|
||||
dev = NULL;
|
||||
|
||||
if (dev) {
|
||||
for (i = 0; i < fg->graph->nb_filters; i++) {
|
||||
fg->graph->filters[i]->hw_device_ctx =
|
||||
av_buffer_ref(dev->device_ref);
|
||||
if (!fg->graph->filters[i]->hw_device_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
/*
|
||||
* ffmpeg option parsing
|
||||
*
|
||||
@ -43,16 +44,80 @@
|
||||
|
||||
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
|
||||
|
||||
#define SPECIFIER_OPT_FMT_str "%s"
|
||||
#define SPECIFIER_OPT_FMT_i "%i"
|
||||
#define SPECIFIER_OPT_FMT_i64 "%"PRId64
|
||||
#define SPECIFIER_OPT_FMT_ui64 "%"PRIu64
|
||||
#define SPECIFIER_OPT_FMT_f "%f"
|
||||
#define SPECIFIER_OPT_FMT_dbl "%lf"
|
||||
|
||||
static const char *opt_name_codec_names[] = {"c", "codec", "acodec", "vcodec", "scodec", "dcodec", NULL};
|
||||
static const char *opt_name_audio_channels[] = {"ac", NULL};
|
||||
static const char *opt_name_audio_sample_rate[] = {"ar", NULL};
|
||||
static const char *opt_name_frame_rates[] = {"r", NULL};
|
||||
static const char *opt_name_frame_sizes[] = {"s", NULL};
|
||||
static const char *opt_name_frame_pix_fmts[] = {"pix_fmt", NULL};
|
||||
static const char *opt_name_ts_scale[] = {"itsscale", NULL};
|
||||
static const char *opt_name_hwaccels[] = {"hwaccel", NULL};
|
||||
static const char *opt_name_hwaccel_devices[] = {"hwaccel_device", NULL};
|
||||
static const char *opt_name_hwaccel_output_formats[] = {"hwaccel_output_format", NULL};
|
||||
static const char *opt_name_autorotate[] = {"autorotate", NULL};
|
||||
static const char *opt_name_max_frames[] = {"frames", "aframes", "vframes", "dframes", NULL};
|
||||
static const char *opt_name_bitstream_filters[] = {"bsf", "absf", "vbsf", NULL};
|
||||
static const char *opt_name_codec_tags[] = {"tag", "atag", "vtag", "stag", NULL};
|
||||
static const char *opt_name_sample_fmts[] = {"sample_fmt", NULL};
|
||||
static const char *opt_name_qscale[] = {"q", "qscale", NULL};
|
||||
static const char *opt_name_forced_key_frames[] = {"forced_key_frames", NULL};
|
||||
static const char *opt_name_force_fps[] = {"force_fps", NULL};
|
||||
static const char *opt_name_frame_aspect_ratios[] = {"aspect", NULL};
|
||||
static const char *opt_name_rc_overrides[] = {"rc_override", NULL};
|
||||
static const char *opt_name_intra_matrices[] = {"intra_matrix", NULL};
|
||||
static const char *opt_name_inter_matrices[] = {"inter_matrix", NULL};
|
||||
static const char *opt_name_chroma_intra_matrices[] = {"chroma_intra_matrix", NULL};
|
||||
static const char *opt_name_top_field_first[] = {"top", NULL};
|
||||
static const char *opt_name_presets[] = {"pre", "apre", "vpre", "spre", NULL};
|
||||
static const char *opt_name_copy_initial_nonkeyframes[] = {"copyinkfr", NULL};
|
||||
static const char *opt_name_copy_prior_start[] = {"copypriorss", NULL};
|
||||
static const char *opt_name_filters[] = {"filter", "af", "vf", NULL};
|
||||
static const char *opt_name_filter_scripts[] = {"filter_script", NULL};
|
||||
static const char *opt_name_reinit_filters[] = {"reinit_filter", NULL};
|
||||
static const char *opt_name_fix_sub_duration[] = {"fix_sub_duration", NULL};
|
||||
static const char *opt_name_canvas_sizes[] = {"canvas_size", NULL};
|
||||
static const char *opt_name_pass[] = {"pass", NULL};
|
||||
static const char *opt_name_passlogfiles[] = {"passlogfile", NULL};
|
||||
static const char *opt_name_max_muxing_queue_size[] = {"max_muxing_queue_size", NULL};
|
||||
static const char *opt_name_guess_layout_max[] = {"guess_layout_max", NULL};
|
||||
static const char *opt_name_apad[] = {"apad", NULL};
|
||||
static const char *opt_name_discard[] = {"discard", NULL};
|
||||
static const char *opt_name_disposition[] = {"disposition", NULL};
|
||||
static const char *opt_name_time_bases[] = {"time_base", NULL};
|
||||
static const char *opt_name_enc_time_bases[] = {"enc_time_base", NULL};
|
||||
|
||||
#define WARN_MULTIPLE_OPT_USAGE(name, type, so, st)\
|
||||
{\
|
||||
char namestr[128] = "";\
|
||||
const char *spec = so->specifier && so->specifier[0] ? so->specifier : "";\
|
||||
for (i = 0; opt_name_##name[i]; i++)\
|
||||
av_strlcatf(namestr, sizeof(namestr), "-%s%s", opt_name_##name[i], opt_name_##name[i+1] ? (opt_name_##name[i+2] ? ", " : " or ") : "");\
|
||||
av_log(NULL, AV_LOG_WARNING, "Multiple %s options specified for stream %d, only the last option '-%s%s%s "SPECIFIER_OPT_FMT_##type"' will be used.\n",\
|
||||
namestr, st->index, opt_name_##name[0], spec[0] ? ":" : "", spec, so->u.type);\
|
||||
}
|
||||
|
||||
#define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
|
||||
{\
|
||||
int i, ret;\
|
||||
int i, ret, matches = 0;\
|
||||
SpecifierOpt *so;\
|
||||
for (i = 0; i < o->nb_ ## name; i++) {\
|
||||
char *spec = o->name[i].specifier;\
|
||||
if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
|
||||
if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0) {\
|
||||
outvar = o->name[i].u.type;\
|
||||
else if (ret < 0)\
|
||||
so = &o->name[i];\
|
||||
matches++;\
|
||||
} else if (ret < 0)\
|
||||
exit_program(1);\
|
||||
}\
|
||||
if (matches > 1)\
|
||||
WARN_MULTIPLE_OPT_USAGE(name, type, so, st);\
|
||||
}
|
||||
|
||||
#define MATCH_PER_TYPE_OPT(name, type, outvar, fmtctx, mediatype)\
|
||||
@ -71,13 +136,9 @@ const HWAccel hwaccels[] = {
|
||||
#endif
|
||||
#if CONFIG_LIBMFX
|
||||
{ "qsv", qsv_init, HWACCEL_QSV, AV_PIX_FMT_QSV },
|
||||
#endif
|
||||
#if CONFIG_CUVID
|
||||
{ "cuvid", cuvid_init, HWACCEL_CUVID, AV_PIX_FMT_CUDA },
|
||||
#endif
|
||||
{ 0 },
|
||||
};
|
||||
AVBufferRef *hw_device_ctx;
|
||||
HWDevice *filter_hw_device;
|
||||
|
||||
char *vstats_filename;
|
||||
@ -171,14 +232,11 @@ static void init_options(OptionsContext *o)
|
||||
static int show_hwaccels(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
enum AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
|
||||
int i;
|
||||
|
||||
printf("Hardware acceleration methods:\n");
|
||||
while ((type = av_hwdevice_iterate_types(type)) !=
|
||||
AV_HWDEVICE_TYPE_NONE)
|
||||
printf("%s\n", av_hwdevice_get_type_name(type));
|
||||
for (i = 0; hwaccels[i].name; i++)
|
||||
printf("%s\n", hwaccels[i].name);
|
||||
printf("\n");
|
||||
return 0;
|
||||
}
|
||||
@ -204,8 +262,9 @@ static AVDictionary *strip_specifiers(AVDictionary *dict)
|
||||
static int opt_abort_on(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
static const AVOption opts[] = {
|
||||
{ "abort_on" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
|
||||
{ "empty_output" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = ABORT_ON_FLAG_EMPTY_OUTPUT }, .unit = "flags" },
|
||||
{ "abort_on" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
|
||||
{ "empty_output" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = ABORT_ON_FLAG_EMPTY_OUTPUT }, .unit = "flags" },
|
||||
{ "empty_output_stream", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM }, .unit = "flags" },
|
||||
{ NULL },
|
||||
};
|
||||
static const AVClass class = {
|
||||
@ -477,21 +536,15 @@ static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
|
||||
#if CONFIG_VAAPI
|
||||
static int opt_vaapi_device(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
HWDevice *dev;
|
||||
const char *prefix = "vaapi:";
|
||||
char *tmp;
|
||||
int err;
|
||||
tmp = av_asprintf("%s%s", prefix, arg);
|
||||
if (!tmp)
|
||||
return AVERROR(ENOMEM);
|
||||
err = hw_device_init_from_string(tmp, &dev);
|
||||
err = hw_device_init_from_string(tmp, NULL);
|
||||
av_free(tmp);
|
||||
if (err < 0)
|
||||
return err;
|
||||
hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!hw_device_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -819,9 +872,28 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
MATCH_PER_STREAM_OPT(top_field_first, i, ist->top_field_first, ic, st);
|
||||
|
||||
MATCH_PER_STREAM_OPT(hwaccels, str, hwaccel, ic, st);
|
||||
MATCH_PER_STREAM_OPT(hwaccel_output_formats, str,
|
||||
hwaccel_output_format, ic, st);
|
||||
|
||||
if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "cuvid")) {
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"WARNING: defaulting hwaccel_output_format to cuda for compatibility "
|
||||
"with old commandlines. This behaviour is DEPRECATED and will be removed "
|
||||
"in the future. Please explicitly set \"-hwaccel_output_format cuda\".\n");
|
||||
ist->hwaccel_output_format = AV_PIX_FMT_CUDA;
|
||||
} else if (hwaccel_output_format) {
|
||||
ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format);
|
||||
if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Unrecognised hwaccel output "
|
||||
"format: %s", hwaccel_output_format);
|
||||
}
|
||||
} else {
|
||||
ist->hwaccel_output_format = AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
if (hwaccel) {
|
||||
// The NVDEC hwaccels use a CUDA device, so remap the name here.
|
||||
if (!strcmp(hwaccel, "nvdec"))
|
||||
if (!strcmp(hwaccel, "nvdec") || !strcmp(hwaccel, "cuvid"))
|
||||
hwaccel = "cuda";
|
||||
|
||||
if (!strcmp(hwaccel, "none"))
|
||||
@ -855,8 +927,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
AV_HWDEVICE_TYPE_NONE)
|
||||
av_log(NULL, AV_LOG_FATAL, "%s ",
|
||||
av_hwdevice_get_type_name(type));
|
||||
for (i = 0; hwaccels[i].name; i++)
|
||||
av_log(NULL, AV_LOG_FATAL, "%s ", hwaccels[i].name);
|
||||
av_log(NULL, AV_LOG_FATAL, "\n");
|
||||
exit_program(1);
|
||||
}
|
||||
@ -870,18 +940,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(hwaccel_output_formats, str,
|
||||
hwaccel_output_format, ic, st);
|
||||
if (hwaccel_output_format) {
|
||||
ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format);
|
||||
if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Unrecognised hwaccel output "
|
||||
"format: %s", hwaccel_output_format);
|
||||
}
|
||||
} else {
|
||||
ist->hwaccel_output_format = AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
ist->hwaccel_pix_fmt = AV_PIX_FMT_NONE;
|
||||
|
||||
break;
|
||||
@ -931,7 +989,7 @@ static void assert_file_overwrite(const char *filename)
|
||||
if (!file_overwrite) {
|
||||
if (proto_name && !strcmp(proto_name, "file") && avio_check(filename, 0) == 0) {
|
||||
if (stdin_interaction && !no_file_overwrite) {
|
||||
fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
|
||||
fprintf(stderr,"File '%s' already exists. Overwrite? [y/N] ", filename);
|
||||
fflush(stderr);
|
||||
term_exit();
|
||||
signal(SIGINT, SIG_DFL);
|
||||
@ -1471,54 +1529,12 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
MATCH_PER_STREAM_OPT(copy_prior_start, i, ost->copy_prior_start, oc ,st);
|
||||
|
||||
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsfs, oc, st);
|
||||
while (bsfs && *bsfs) {
|
||||
const AVBitStreamFilter *filter;
|
||||
char *bsf, *bsf_options_str, *bsf_name;
|
||||
|
||||
bsf = av_get_token(&bsfs, ",");
|
||||
if (!bsf)
|
||||
exit_program(1);
|
||||
bsf_name = av_strtok(bsf, "=", &bsf_options_str);
|
||||
if (!bsf_name)
|
||||
exit_program(1);
|
||||
|
||||
filter = av_bsf_get_by_name(bsf_name);
|
||||
if (!filter) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf_name);
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
ost->bsf_ctx = av_realloc_array(ost->bsf_ctx,
|
||||
ost->nb_bitstream_filters + 1,
|
||||
sizeof(*ost->bsf_ctx));
|
||||
if (!ost->bsf_ctx)
|
||||
exit_program(1);
|
||||
|
||||
ret = av_bsf_alloc(filter, &ost->bsf_ctx[ost->nb_bitstream_filters]);
|
||||
if (bsfs && *bsfs) {
|
||||
ret = av_bsf_list_parse_str(bsfs, &ost->bsf_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error allocating a bitstream filter context\n");
|
||||
av_log(NULL, AV_LOG_ERROR, "Error parsing bitstream filter sequence '%s': %s\n", bsfs, av_err2str(ret));
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
ost->nb_bitstream_filters++;
|
||||
|
||||
if (bsf_options_str && filter->priv_class) {
|
||||
const AVOption *opt = av_opt_next(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, NULL);
|
||||
const char * shorthand[2] = {NULL};
|
||||
|
||||
if (opt)
|
||||
shorthand[0] = opt->name;
|
||||
|
||||
ret = av_opt_set_from_string(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, bsf_options_str, shorthand, "=", ":");
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error parsing options for bitstream filter %s\n", bsf_name);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
av_freep(&bsf);
|
||||
|
||||
if (*bsfs)
|
||||
bsfs++;
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
|
||||
@ -1681,8 +1697,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
|
||||
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
|
||||
MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st);
|
||||
if (o->nb_filters > 1)
|
||||
av_log(NULL, AV_LOG_ERROR, "Only '-vf %s' read, ignoring remaining -vf options: Use ',' to separate filters\n", ost->filters);
|
||||
|
||||
if (!ost->stream_copy) {
|
||||
const char *p = NULL;
|
||||
@ -1864,8 +1878,6 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
|
||||
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
|
||||
MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st);
|
||||
if (o->nb_filters > 1)
|
||||
av_log(NULL, AV_LOG_ERROR, "Only '-af %s' read, ignoring remaining -af options: Use ',' to separate filters\n", ost->filters);
|
||||
|
||||
if (!ost->stream_copy) {
|
||||
char *sample_fmt = NULL;
|
||||
@ -2372,12 +2384,14 @@ loop_end:
|
||||
o->attachments[i]);
|
||||
exit_program(1);
|
||||
}
|
||||
if (!(attachment = av_malloc(len))) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
|
||||
if (len > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE ||
|
||||
!(attachment = av_malloc(len + AV_INPUT_BUFFER_PADDING_SIZE))) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Attachment %s too large.\n",
|
||||
o->attachments[i]);
|
||||
exit_program(1);
|
||||
}
|
||||
avio_read(pb, attachment, len);
|
||||
memset(attachment + len, 0, AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
ost = new_attachment_stream(o, oc, -1);
|
||||
ost->stream_copy = 0;
|
||||
@ -2769,13 +2783,14 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
} else {
|
||||
/* Try to determine PAL/NTSC by peeking in the input files */
|
||||
if (nb_input_files) {
|
||||
int i, j, fr;
|
||||
int i, j;
|
||||
for (j = 0; j < nb_input_files; j++) {
|
||||
for (i = 0; i < input_files[j]->nb_streams; i++) {
|
||||
AVStream *st = input_files[j]->ctx->streams[i];
|
||||
int64_t fr;
|
||||
if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
|
||||
continue;
|
||||
fr = st->time_base.den * 1000 / st->time_base.num;
|
||||
fr = st->time_base.den * 1000LL / st->time_base.num;
|
||||
if (fr == 25000) {
|
||||
norm = PAL;
|
||||
break;
|
||||
@ -3005,8 +3020,11 @@ static int opt_preset(void *optctx, const char *opt, const char *arg)
|
||||
static int opt_old2new(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
OptionsContext *o = optctx;
|
||||
int ret;
|
||||
char *s = av_asprintf("%s:%c", opt + 1, *opt);
|
||||
int ret = parse_option(o, s, arg, options);
|
||||
if (!s)
|
||||
return AVERROR(ENOMEM);
|
||||
ret = parse_option(o, s, arg, options);
|
||||
av_free(s);
|
||||
return ret;
|
||||
}
|
||||
@ -3037,6 +3055,8 @@ static int opt_qscale(void *optctx, const char *opt, const char *arg)
|
||||
return parse_option(o, "q:v", arg, options);
|
||||
}
|
||||
s = av_asprintf("q%s", opt + 6);
|
||||
if (!s)
|
||||
return AVERROR(ENOMEM);
|
||||
ret = parse_option(o, s, arg, options);
|
||||
av_free(s);
|
||||
return ret;
|
||||
@ -3081,8 +3101,11 @@ static int opt_vsync(void *optctx, const char *opt, const char *arg)
|
||||
static int opt_timecode(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
OptionsContext *o = optctx;
|
||||
int ret;
|
||||
char *tcr = av_asprintf("timecode=%s", arg);
|
||||
int ret = parse_option(o, "metadata:g", tcr, options);
|
||||
if (!tcr)
|
||||
return AVERROR(ENOMEM);
|
||||
ret = parse_option(o, "metadata:g", tcr, options);
|
||||
if (ret >= 0)
|
||||
ret = av_dict_set(&o->g->codec_opts, "gop_timecode", arg, 0);
|
||||
av_free(tcr);
|
||||
@ -3184,7 +3207,7 @@ void show_help_default(const char *opt, const char *arg)
|
||||
" -h -- print basic options\n"
|
||||
" -h long -- print more options\n"
|
||||
" -h full -- print all options (including all format and codec specific options, very long)\n"
|
||||
" -h type=name -- print all options for the named decoder/encoder/demuxer/muxer/filter/bsf\n"
|
||||
" -h type=name -- print all options for the named decoder/encoder/demuxer/muxer/filter/bsf/protocol\n"
|
||||
" See man %s for detailed description of the options.\n"
|
||||
"\n", program_name);
|
||||
|
||||
@ -3192,7 +3215,7 @@ void show_help_default(const char *opt, const char *arg)
|
||||
OPT_EXIT, 0, 0);
|
||||
|
||||
show_help_options(options, "Global options (affect whole program "
|
||||
"instead of just one file:",
|
||||
"instead of just one file):",
|
||||
0, per_file | OPT_EXIT | OPT_EXPERT, 0);
|
||||
if (show_advanced)
|
||||
show_help_options(options, "Advanced global options:", OPT_EXPERT,
|
||||
@ -3268,6 +3291,7 @@ static int open_files(OptionGroupList *l, const char *inout,
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error parsing options for %s file "
|
||||
"%s.\n", inout, g->arg);
|
||||
uninit_options(&o);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3441,7 +3465,7 @@ const OptionDef options[] = {
|
||||
{ "stdin", OPT_BOOL | OPT_EXPERT, { &stdin_interaction },
|
||||
"enable or disable interaction on standard input" },
|
||||
{ "timelimit", HAS_ARG | OPT_EXPERT, { .func_arg = opt_timelimit },
|
||||
"set max runtime in seconds", "limit" },
|
||||
"set max runtime in seconds in CPU user time", "limit" },
|
||||
{ "dump", OPT_BOOL | OPT_EXPERT, { &do_pkt_dump },
|
||||
"dump each input packet" },
|
||||
{ "hex", OPT_BOOL | OPT_EXPERT, { &do_hex_dump },
|
||||
|
@ -28,6 +28,7 @@
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
static AVBufferRef *hw_device_ctx;
|
||||
char *qsv_device = NULL;
|
||||
|
||||
static int qsv_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
|
@ -51,10 +51,12 @@ static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
case kCVPixelFormatType_422YpCbCr8: vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
|
||||
case kCVPixelFormatType_32BGRA: vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
|
||||
#ifdef kCFCoreFoundationVersionNumber10_7
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
|
||||
#endif
|
||||
#if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
|
||||
case kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_P010; break;
|
||||
case kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange:
|
||||
case kCVPixelFormatType_420YpCbCr10BiPlanarFullRange: vt->tmp_frame->format = AV_PIX_FMT_P010; break;
|
||||
#endif
|
||||
default:
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
@ -65,7 +67,7 @@ static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
|
||||
vt->tmp_frame->width = frame->width;
|
||||
vt->tmp_frame->height = frame->height;
|
||||
ret = av_frame_get_buffer(vt->tmp_frame, 32);
|
||||
ret = av_frame_get_buffer(vt->tmp_frame, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "libavutil/samplefmt.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
@ -326,7 +327,7 @@ static int display_disable;
|
||||
static int borderless;
|
||||
static int alwaysontop;
|
||||
static int startup_volume = 100;
|
||||
static int show_status = 1;
|
||||
static int show_status = -1;
|
||||
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
|
||||
static int64_t start_time = AV_NOPTS_VALUE;
|
||||
static int64_t duration = AV_NOPTS_VALUE;
|
||||
@ -644,7 +645,10 @@ static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
|
||||
if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
|
||||
return -1;
|
||||
}
|
||||
} while (d->queue->serial != d->pkt_serial);
|
||||
if (d->queue->serial == d->pkt_serial)
|
||||
break;
|
||||
av_packet_unref(&pkt);
|
||||
} while (1);
|
||||
|
||||
if (pkt.data == flush_pkt.data) {
|
||||
avcodec_flush_buffers(d->avctx);
|
||||
@ -1689,6 +1693,7 @@ display:
|
||||
}
|
||||
is->force_refresh = 0;
|
||||
if (show_status) {
|
||||
AVBPrint buf;
|
||||
static int64_t last_time;
|
||||
int64_t cur_time;
|
||||
int aqsize, vqsize, sqsize;
|
||||
@ -1712,18 +1717,28 @@ display:
|
||||
av_diff = get_master_clock(is) - get_clock(&is->vidclk);
|
||||
else if (is->audio_st)
|
||||
av_diff = get_master_clock(is) - get_clock(&is->audclk);
|
||||
av_log(NULL, AV_LOG_INFO,
|
||||
"%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
|
||||
get_master_clock(is),
|
||||
(is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
|
||||
av_diff,
|
||||
is->frame_drops_early + is->frame_drops_late,
|
||||
aqsize / 1024,
|
||||
vqsize / 1024,
|
||||
sqsize,
|
||||
is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
|
||||
is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
|
||||
fflush(stdout);
|
||||
|
||||
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
av_bprintf(&buf,
|
||||
"%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
|
||||
get_master_clock(is),
|
||||
(is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
|
||||
av_diff,
|
||||
is->frame_drops_early + is->frame_drops_late,
|
||||
aqsize / 1024,
|
||||
vqsize / 1024,
|
||||
sqsize,
|
||||
is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
|
||||
is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
|
||||
|
||||
if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
|
||||
fprintf(stderr, "%s", buf.str);
|
||||
else
|
||||
av_log(NULL, AV_LOG_INFO, "%s", buf.str);
|
||||
|
||||
fflush(stderr);
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
|
||||
last_time = cur_time;
|
||||
}
|
||||
}
|
||||
@ -2760,9 +2775,6 @@ static int read_thread(void *arg)
|
||||
}
|
||||
|
||||
memset(st_index, -1, sizeof(st_index));
|
||||
is->last_video_stream = is->video_stream = -1;
|
||||
is->last_audio_stream = is->audio_stream = -1;
|
||||
is->last_subtitle_stream = is->subtitle_stream = -1;
|
||||
is->eof = 0;
|
||||
|
||||
ic = avformat_alloc_context();
|
||||
@ -2974,7 +2986,7 @@ static int read_thread(void *arg)
|
||||
}
|
||||
if (is->queue_attachments_req) {
|
||||
if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
|
||||
AVPacket copy = { 0 };
|
||||
AVPacket copy;
|
||||
if ((ret = av_packet_ref(©, &is->video_st->attached_pic)) < 0)
|
||||
goto fail;
|
||||
packet_queue_put(&is->videoq, ©);
|
||||
@ -3068,6 +3080,9 @@ static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
|
||||
is = av_mallocz(sizeof(VideoState));
|
||||
if (!is)
|
||||
return NULL;
|
||||
is->last_video_stream = is->video_stream = -1;
|
||||
is->last_audio_stream = is->audio_stream = -1;
|
||||
is->last_subtitle_stream = is->subtitle_stream = -1;
|
||||
is->filename = av_strdup(filename);
|
||||
if (!is->filename)
|
||||
goto fail;
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "libavutil/display.h"
|
||||
#include "libavutil/hash.h"
|
||||
#include "libavutil/mastering_display_metadata.h"
|
||||
#include "libavutil/dovi_meta.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/spherical.h"
|
||||
@ -254,6 +255,7 @@ static const OptionDef *options;
|
||||
|
||||
/* FFprobe context */
|
||||
static const char *input_filename;
|
||||
static const char *print_input_filename;
|
||||
static AVInputFormat *iformat = NULL;
|
||||
|
||||
static struct AVHashContext *hash;
|
||||
@ -1083,12 +1085,12 @@ typedef struct CompactContext {
|
||||
#define OFFSET(x) offsetof(CompactContext, x)
|
||||
|
||||
static const AVOption compact_options[]= {
|
||||
{"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, CHAR_MIN, CHAR_MAX },
|
||||
{"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, CHAR_MIN, CHAR_MAX },
|
||||
{"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, 0, 0 },
|
||||
{"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, 0, 0 },
|
||||
{"nokey", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1 },
|
||||
{"nk", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1 },
|
||||
{"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, CHAR_MIN, CHAR_MAX },
|
||||
{"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, CHAR_MIN, CHAR_MAX },
|
||||
{"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, 0, 0 },
|
||||
{"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, 0, 0 },
|
||||
{"print_section", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 },
|
||||
{"p", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 },
|
||||
{NULL},
|
||||
@ -1199,12 +1201,12 @@ static const Writer compact_writer = {
|
||||
#define OFFSET(x) offsetof(CompactContext, x)
|
||||
|
||||
static const AVOption csv_options[] = {
|
||||
{"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, CHAR_MIN, CHAR_MAX },
|
||||
{"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, CHAR_MIN, CHAR_MAX },
|
||||
{"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, 0, 0 },
|
||||
{"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, 0, 0 },
|
||||
{"nokey", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 },
|
||||
{"nk", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 },
|
||||
{"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, CHAR_MIN, CHAR_MAX },
|
||||
{"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, CHAR_MIN, CHAR_MAX },
|
||||
{"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, 0, 0 },
|
||||
{"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, 0, 0 },
|
||||
{"print_section", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 },
|
||||
{"p", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 },
|
||||
{NULL},
|
||||
@ -1237,8 +1239,8 @@ typedef struct FlatContext {
|
||||
#define OFFSET(x) offsetof(FlatContext, x)
|
||||
|
||||
static const AVOption flat_options[]= {
|
||||
{"sep_char", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, CHAR_MIN, CHAR_MAX },
|
||||
{"s", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, CHAR_MIN, CHAR_MAX },
|
||||
{"sep_char", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, 0, 0 },
|
||||
{"s", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, 0, 0 },
|
||||
{"hierarchical", "specify if the section specification should be hierarchical", OFFSET(hierarchical), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 },
|
||||
{"h", "specify if the section specification should be hierarchical", OFFSET(hierarchical), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 },
|
||||
{NULL},
|
||||
@ -1535,7 +1537,7 @@ static void json_print_section_header(WriterContext *wctx)
|
||||
if (parent_section && parent_section->id == SECTION_ID_PACKETS_AND_FRAMES) {
|
||||
if (!json->compact)
|
||||
JSON_INDENT();
|
||||
printf("\"type\": \"%s\"%s", section->name, json->item_sep);
|
||||
printf("\"type\": \"%s\"", section->name);
|
||||
}
|
||||
}
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
@ -1579,8 +1581,10 @@ static inline void json_print_item_str(WriterContext *wctx,
|
||||
static void json_print_str(WriterContext *wctx, const char *key, const char *value)
|
||||
{
|
||||
JSONContext *json = wctx->priv;
|
||||
const struct section *parent_section = wctx->level ?
|
||||
wctx->section[wctx->level-1] : NULL;
|
||||
|
||||
if (wctx->nb_item[wctx->level])
|
||||
if (wctx->nb_item[wctx->level] || (parent_section && parent_section->id == SECTION_ID_PACKETS_AND_FRAMES))
|
||||
printf("%s", json->item_sep);
|
||||
if (!json->compact)
|
||||
JSON_INDENT();
|
||||
@ -1590,9 +1594,11 @@ static void json_print_str(WriterContext *wctx, const char *key, const char *val
|
||||
static void json_print_int(WriterContext *wctx, const char *key, long long int value)
|
||||
{
|
||||
JSONContext *json = wctx->priv;
|
||||
const struct section *parent_section = wctx->level ?
|
||||
wctx->section[wctx->level-1] : NULL;
|
||||
AVBPrint buf;
|
||||
|
||||
if (wctx->nb_item[wctx->level])
|
||||
if (wctx->nb_item[wctx->level] || (parent_section && parent_section->id == SECTION_ID_PACKETS_AND_FRAMES))
|
||||
printf("%s", json->item_sep);
|
||||
if (!json->compact)
|
||||
JSON_INDENT();
|
||||
@ -1923,6 +1929,16 @@ static void print_pkt_side_data(WriterContext *w,
|
||||
AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data;
|
||||
print_int("max_content", metadata->MaxCLL);
|
||||
print_int("max_average", metadata->MaxFALL);
|
||||
} else if (sd->type == AV_PKT_DATA_DOVI_CONF) {
|
||||
AVDOVIDecoderConfigurationRecord *dovi = (AVDOVIDecoderConfigurationRecord *)sd->data;
|
||||
print_int("dv_version_major", dovi->dv_version_major);
|
||||
print_int("dv_version_minor", dovi->dv_version_minor);
|
||||
print_int("dv_profile", dovi->dv_profile);
|
||||
print_int("dv_level", dovi->dv_level);
|
||||
print_int("rpu_present_flag", dovi->rpu_present_flag);
|
||||
print_int("el_present_flag", dovi->el_present_flag);
|
||||
print_int("bl_present_flag", dovi->bl_present_flag);
|
||||
print_int("dv_bl_signal_compatibility_id", dovi->dv_bl_signal_compatibility_id);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
@ -2531,6 +2547,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
if (dec_ctx) {
|
||||
print_int("coded_width", dec_ctx->coded_width);
|
||||
print_int("coded_height", dec_ctx->coded_height);
|
||||
print_int("closed_captions", !!(dec_ctx->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS));
|
||||
}
|
||||
#endif
|
||||
print_int("has_b_frames", par->video_delay);
|
||||
@ -2832,7 +2849,8 @@ static void show_error(WriterContext *w, int err)
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
static int open_input_file(InputFile *ifile, const char *filename)
|
||||
static int open_input_file(InputFile *ifile, const char *filename,
|
||||
const char *print_filename)
|
||||
{
|
||||
int err, i;
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
@ -2854,6 +2872,10 @@ static int open_input_file(InputFile *ifile, const char *filename)
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
if (print_filename) {
|
||||
av_freep(&fmt_ctx->url);
|
||||
fmt_ctx->url = av_strdup(print_filename);
|
||||
}
|
||||
ifile->fmt_ctx = fmt_ctx;
|
||||
if (scan_all_pmts_set)
|
||||
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
||||
@ -2930,6 +2952,7 @@ static int open_input_file(InputFile *ifile, const char *filename)
|
||||
ist->dec_ctx->pkt_timebase = stream->time_base;
|
||||
ist->dec_ctx->framerate = stream->avg_frame_rate;
|
||||
#if FF_API_LAVF_AVCTX
|
||||
ist->dec_ctx->properties = stream->codec->properties;
|
||||
ist->dec_ctx->coded_width = stream->codec->coded_width;
|
||||
ist->dec_ctx->coded_height = stream->codec->coded_height;
|
||||
#endif
|
||||
@ -2967,7 +2990,8 @@ static void close_input_file(InputFile *ifile)
|
||||
avformat_close_input(&ifile->fmt_ctx);
|
||||
}
|
||||
|
||||
static int probe_file(WriterContext *wctx, const char *filename)
|
||||
static int probe_file(WriterContext *wctx, const char *filename,
|
||||
const char *print_filename)
|
||||
{
|
||||
InputFile ifile = { 0 };
|
||||
int ret, i;
|
||||
@ -2976,7 +3000,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
do_read_frames = do_show_frames || do_count_frames;
|
||||
do_read_packets = do_show_packets || do_count_packets;
|
||||
|
||||
ret = open_input_file(&ifile, filename);
|
||||
ret = open_input_file(&ifile, filename, print_filename);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
@ -3282,6 +3306,12 @@ static int opt_input_file_i(void *optctx, const char *opt, const char *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opt_print_filename(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
print_input_filename = arg;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void show_help_default(const char *opt, const char *arg)
|
||||
{
|
||||
av_log_set_callback(log_callback_help);
|
||||
@ -3471,7 +3501,7 @@ static int opt_sections(void *optctx, const char *opt, const char *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opt_show_versions(const char *opt, const char *arg)
|
||||
static int opt_show_versions(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
mark_section_show_entries(SECTION_ID_PROGRAM_VERSION, 1, NULL);
|
||||
mark_section_show_entries(SECTION_ID_LIBRARY_VERSION, 1, NULL);
|
||||
@ -3479,7 +3509,7 @@ static int opt_show_versions(const char *opt, const char *arg)
|
||||
}
|
||||
|
||||
#define DEFINE_OPT_SHOW_SECTION(section, target_section_id) \
|
||||
static int opt_show_##section(const char *opt, const char *arg) \
|
||||
static int opt_show_##section(void *optctx, const char *opt, const char *arg) \
|
||||
{ \
|
||||
mark_section_show_entries(SECTION_ID_##target_section_id, 1, NULL); \
|
||||
return 0; \
|
||||
@ -3507,39 +3537,40 @@ static const OptionDef real_options[] = {
|
||||
"use sexagesimal format HOURS:MM:SS.MICROSECONDS for time units" },
|
||||
{ "pretty", 0, {.func_arg = opt_pretty},
|
||||
"prettify the format of displayed values, make it more human readable" },
|
||||
{ "print_format", OPT_STRING | HAS_ARG, {(void*)&print_format},
|
||||
{ "print_format", OPT_STRING | HAS_ARG, { &print_format },
|
||||
"set the output printing format (available formats are: default, compact, csv, flat, ini, json, xml)", "format" },
|
||||
{ "of", OPT_STRING | HAS_ARG, {(void*)&print_format}, "alias for -print_format", "format" },
|
||||
{ "select_streams", OPT_STRING | HAS_ARG, {(void*)&stream_specifier}, "select the specified streams", "stream_specifier" },
|
||||
{ "of", OPT_STRING | HAS_ARG, { &print_format }, "alias for -print_format", "format" },
|
||||
{ "select_streams", OPT_STRING | HAS_ARG, { &stream_specifier }, "select the specified streams", "stream_specifier" },
|
||||
{ "sections", OPT_EXIT, {.func_arg = opt_sections}, "print sections structure and section information, and exit" },
|
||||
{ "show_data", OPT_BOOL, {(void*)&do_show_data}, "show packets data" },
|
||||
{ "show_data_hash", OPT_STRING | HAS_ARG, {(void*)&show_data_hash}, "show packets data hash" },
|
||||
{ "show_error", 0, {(void*)&opt_show_error}, "show probing error" },
|
||||
{ "show_format", 0, {(void*)&opt_show_format}, "show format/container info" },
|
||||
{ "show_frames", 0, {(void*)&opt_show_frames}, "show frames info" },
|
||||
{ "show_data", OPT_BOOL, { &do_show_data }, "show packets data" },
|
||||
{ "show_data_hash", OPT_STRING | HAS_ARG, { &show_data_hash }, "show packets data hash" },
|
||||
{ "show_error", 0, { .func_arg = &opt_show_error }, "show probing error" },
|
||||
{ "show_format", 0, { .func_arg = &opt_show_format }, "show format/container info" },
|
||||
{ "show_frames", 0, { .func_arg = &opt_show_frames }, "show frames info" },
|
||||
{ "show_format_entry", HAS_ARG, {.func_arg = opt_show_format_entry},
|
||||
"show a particular entry from the format/container info", "entry" },
|
||||
{ "show_entries", HAS_ARG, {.func_arg = opt_show_entries},
|
||||
"show a set of specified entries", "entry_list" },
|
||||
#if HAVE_THREADS
|
||||
{ "show_log", OPT_INT|HAS_ARG, {(void*)&do_show_log}, "show log" },
|
||||
{ "show_log", OPT_INT|HAS_ARG, { &do_show_log }, "show log" },
|
||||
#endif
|
||||
{ "show_packets", 0, {(void*)&opt_show_packets}, "show packets info" },
|
||||
{ "show_programs", 0, {(void*)&opt_show_programs}, "show programs info" },
|
||||
{ "show_streams", 0, {(void*)&opt_show_streams}, "show streams info" },
|
||||
{ "show_chapters", 0, {(void*)&opt_show_chapters}, "show chapters info" },
|
||||
{ "count_frames", OPT_BOOL, {(void*)&do_count_frames}, "count the number of frames per stream" },
|
||||
{ "count_packets", OPT_BOOL, {(void*)&do_count_packets}, "count the number of packets per stream" },
|
||||
{ "show_program_version", 0, {(void*)&opt_show_program_version}, "show ffprobe version" },
|
||||
{ "show_library_versions", 0, {(void*)&opt_show_library_versions}, "show library versions" },
|
||||
{ "show_versions", 0, {(void*)&opt_show_versions}, "show program and library versions" },
|
||||
{ "show_pixel_formats", 0, {(void*)&opt_show_pixel_formats}, "show pixel format descriptions" },
|
||||
{ "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" },
|
||||
{ "private", OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" },
|
||||
{ "show_packets", 0, { .func_arg = &opt_show_packets }, "show packets info" },
|
||||
{ "show_programs", 0, { .func_arg = &opt_show_programs }, "show programs info" },
|
||||
{ "show_streams", 0, { .func_arg = &opt_show_streams }, "show streams info" },
|
||||
{ "show_chapters", 0, { .func_arg = &opt_show_chapters }, "show chapters info" },
|
||||
{ "count_frames", OPT_BOOL, { &do_count_frames }, "count the number of frames per stream" },
|
||||
{ "count_packets", OPT_BOOL, { &do_count_packets }, "count the number of packets per stream" },
|
||||
{ "show_program_version", 0, { .func_arg = &opt_show_program_version }, "show ffprobe version" },
|
||||
{ "show_library_versions", 0, { .func_arg = &opt_show_library_versions }, "show library versions" },
|
||||
{ "show_versions", 0, { .func_arg = &opt_show_versions }, "show program and library versions" },
|
||||
{ "show_pixel_formats", 0, { .func_arg = &opt_show_pixel_formats }, "show pixel format descriptions" },
|
||||
{ "show_private_data", OPT_BOOL, { &show_private_data }, "show private data" },
|
||||
{ "private", OPT_BOOL, { &show_private_data }, "same as show_private_data" },
|
||||
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },
|
||||
{ "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" },
|
||||
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" },
|
||||
{ "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"},
|
||||
{ "print_filename", HAS_ARG, {.func_arg = opt_print_filename}, "override the printed input filename", "print_file"},
|
||||
{ "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
||||
"read and decode the streams to fill missing information with heuristics" },
|
||||
{ NULL, },
|
||||
@ -3688,7 +3719,7 @@ int main(int argc, char **argv)
|
||||
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
|
||||
ret = AVERROR(EINVAL);
|
||||
} else if (input_filename) {
|
||||
ret = probe_file(wctx, input_filename);
|
||||
ret = probe_file(wctx, input_filename, print_input_filename);
|
||||
if (ret < 0 && do_show_error)
|
||||
show_error(wctx, ret);
|
||||
}
|
||||
|
6
libavcodec/.gitignore
vendored
Normal file
6
libavcodec/.gitignore
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
/*_tablegen
|
||||
/*_tables.c
|
||||
/*_tables.h
|
||||
/bsf_list.c
|
||||
/codec_list.c
|
||||
/parser_list.c
|
@ -525,6 +525,10 @@ static int decode_i_block(FourXContext *f, int16_t *block)
|
||||
break;
|
||||
if (code == 0xf0) {
|
||||
i += 16;
|
||||
if (i >= 64) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "run %d overflow\n", i);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (code & 0xf) {
|
||||
level = get_xbits(&f->gb, code & 0xf);
|
||||
|
@ -164,8 +164,7 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
|
||||
case AV_CODEC_ID_8SVX_FIB: esc->table = fibonacci; break;
|
||||
case AV_CODEC_ID_8SVX_EXP: esc->table = exponential; break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid codec id %d.\n", avctx->codec->id);
|
||||
return AVERROR_INVALIDDATA;
|
||||
av_assert1(0);
|
||||
}
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
|
||||
|
||||
|
@ -6,12 +6,18 @@ HEADERS = ac3_parser.h \
|
||||
avcodec.h \
|
||||
avdct.h \
|
||||
avfft.h \
|
||||
bsf.h \
|
||||
codec.h \
|
||||
codec_desc.h \
|
||||
codec_id.h \
|
||||
codec_par.h \
|
||||
d3d11va.h \
|
||||
dirac.h \
|
||||
dv_profile.h \
|
||||
dxva2.h \
|
||||
jni.h \
|
||||
mediacodec.h \
|
||||
packet.h \
|
||||
qsv.h \
|
||||
vaapi.h \
|
||||
vdpau.h \
|
||||
@ -167,12 +173,15 @@ OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o aacenctab.o \
|
||||
aacenc_ltp.o \
|
||||
aacenc_pred.o \
|
||||
psymodel.o mpeg4audio.o kbdwin.o cbrt_data.o
|
||||
OBJS-$(CONFIG_AAC_MF_ENCODER) += mfenc.o mf_utils.o
|
||||
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
|
||||
OBJS-$(CONFIG_AC3_DECODER) += ac3dec_float.o ac3dec_data.o ac3.o kbdwin.o ac3tab.o
|
||||
OBJS-$(CONFIG_AC3_FIXED_DECODER) += ac3dec_fixed.o ac3dec_data.o ac3.o kbdwin.o ac3tab.o
|
||||
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
|
||||
ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o
|
||||
OBJS-$(CONFIG_AC3_MF_ENCODER) += mfenc.o mf_utils.o
|
||||
OBJS-$(CONFIG_ACELP_KELVIN_DECODER) += g729dec.o lsp.o celp_math.o celp_filters.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
|
||||
OBJS-$(CONFIG_AGM_DECODER) += agm.o
|
||||
OBJS-$(CONFIG_AIC_DECODER) += aic.o
|
||||
OBJS-$(CONFIG_ALAC_DECODER) += alac.o alac_data.o alacdsp.o
|
||||
@ -193,10 +202,10 @@ OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpegenc_common.o \
|
||||
OBJS-$(CONFIG_ANM_DECODER) += anm.o
|
||||
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
|
||||
OBJS-$(CONFIG_APE_DECODER) += apedec.o
|
||||
OBJS-$(CONFIG_APTX_DECODER) += aptx.o
|
||||
OBJS-$(CONFIG_APTX_ENCODER) += aptx.o
|
||||
OBJS-$(CONFIG_APTX_HD_DECODER) += aptx.o
|
||||
OBJS-$(CONFIG_APTX_HD_ENCODER) += aptx.o
|
||||
OBJS-$(CONFIG_APTX_DECODER) += aptxdec.o aptx.o
|
||||
OBJS-$(CONFIG_APTX_ENCODER) += aptxenc.o aptx.o
|
||||
OBJS-$(CONFIG_APTX_HD_DECODER) += aptxdec.o aptx.o
|
||||
OBJS-$(CONFIG_APTX_HD_ENCODER) += aptxenc.o aptx.o
|
||||
OBJS-$(CONFIG_APNG_DECODER) += png.o pngdec.o pngdsp.o
|
||||
OBJS-$(CONFIG_APNG_ENCODER) += png.o pngenc.o
|
||||
OBJS-$(CONFIG_ARBC_DECODER) += arbc.o
|
||||
@ -241,8 +250,9 @@ OBJS-$(CONFIG_BRENDER_PIX_DECODER) += brenderpix.o
|
||||
OBJS-$(CONFIG_C93_DECODER) += c93.o
|
||||
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
|
||||
cavsdata.o
|
||||
OBJS-$(CONFIG_CCAPTION_DECODER) += ccaption_dec.o
|
||||
OBJS-$(CONFIG_CCAPTION_DECODER) += ccaption_dec.o ass.o
|
||||
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
|
||||
OBJS-$(CONFIG_CDTOONS_DECODER) += cdtoons.o
|
||||
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
|
||||
OBJS-$(CONFIG_CFHD_DECODER) += cfhd.o cfhddata.o
|
||||
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
|
||||
@ -263,6 +273,7 @@ OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadata.o dcahuff.o \
|
||||
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dca.o dcadata.o dcahuff.o \
|
||||
dcaadpcm.o
|
||||
OBJS-$(CONFIG_DDS_DECODER) += dds.o
|
||||
OBJS-$(CONFIG_DERF_DPCM_DECODER) += dpcm.o
|
||||
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o diractab.o \
|
||||
dirac_arith.o dirac_dwt.o dirac_vlc.o
|
||||
OBJS-$(CONFIG_DFA_DECODER) += dfa.o
|
||||
@ -281,8 +292,8 @@ OBJS-$(CONFIG_DSS_SP_DECODER) += dss_sp.o
|
||||
OBJS-$(CONFIG_DST_DECODER) += dstdec.o dsd.o
|
||||
OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
|
||||
OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
|
||||
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
|
||||
OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o
|
||||
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o dvdsub.o
|
||||
OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o dvdsub.o
|
||||
OBJS-$(CONFIG_DVAUDIO_DECODER) += dvaudiodec.o
|
||||
OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
||||
@ -350,6 +361,7 @@ OBJS-$(CONFIG_H264_DECODER) += h264dec.o h264_cabac.o h264_cavlc.o \
|
||||
OBJS-$(CONFIG_H264_AMF_ENCODER) += amfenc_h264.o
|
||||
OBJS-$(CONFIG_H264_CUVID_DECODER) += cuviddec.o
|
||||
OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
|
||||
OBJS-$(CONFIG_H264_MF_ENCODER) += mfenc.o mf_utils.o
|
||||
OBJS-$(CONFIG_H264_MMAL_DECODER) += mmaldec.o
|
||||
OBJS-$(CONFIG_H264_NVENC_ENCODER) += nvenc_h264.o
|
||||
OBJS-$(CONFIG_NVENC_ENCODER) += nvenc_h264.o
|
||||
@ -364,6 +376,7 @@ OBJS-$(CONFIG_H264_V4L2M2M_DECODER) += v4l2_m2m_dec.o
|
||||
OBJS-$(CONFIG_H264_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
|
||||
OBJS-$(CONFIG_HAP_DECODER) += hapdec.o hap.o
|
||||
OBJS-$(CONFIG_HAP_ENCODER) += hapenc.o hap.o
|
||||
OBJS-$(CONFIG_HCA_DECODER) += hcadec.o
|
||||
OBJS-$(CONFIG_HCOM_DECODER) += hcom.o
|
||||
OBJS-$(CONFIG_HEVC_DECODER) += hevcdec.o hevc_mvs.o \
|
||||
hevc_cabac.o hevc_refs.o hevcpred.o \
|
||||
@ -371,6 +384,7 @@ OBJS-$(CONFIG_HEVC_DECODER) += hevcdec.o hevc_mvs.o \
|
||||
OBJS-$(CONFIG_HEVC_AMF_ENCODER) += amfenc_hevc.o
|
||||
OBJS-$(CONFIG_HEVC_CUVID_DECODER) += cuviddec.o
|
||||
OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
|
||||
OBJS-$(CONFIG_HEVC_MF_ENCODER) += mfenc.o mf_utils.o
|
||||
OBJS-$(CONFIG_HEVC_NVENC_ENCODER) += nvenc_hevc.o
|
||||
OBJS-$(CONFIG_NVENC_HEVC_ENCODER) += nvenc_hevc.o
|
||||
OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec_h2645.o
|
||||
@ -393,6 +407,7 @@ OBJS-$(CONFIG_IFF_ILBM_DECODER) += iff.o
|
||||
OBJS-$(CONFIG_ILBC_DECODER) += ilbcdec.o
|
||||
OBJS-$(CONFIG_IMC_DECODER) += imc.o
|
||||
OBJS-$(CONFIG_IMM4_DECODER) += imm4.o
|
||||
OBJS-$(CONFIG_IMM5_DECODER) += imm5.o
|
||||
OBJS-$(CONFIG_INDEO2_DECODER) += indeo2.o
|
||||
OBJS-$(CONFIG_INDEO3_DECODER) += indeo3.o
|
||||
OBJS-$(CONFIG_INDEO4_DECODER) += indeo4.o ivi.o
|
||||
@ -425,6 +440,7 @@ OBJS-$(CONFIG_METASOUND_DECODER) += metasound.o metasound_data.o \
|
||||
OBJS-$(CONFIG_MICRODVD_DECODER) += microdvddec.o ass.o
|
||||
OBJS-$(CONFIG_MIMIC_DECODER) += mimic.o
|
||||
OBJS-$(CONFIG_MJPEG_DECODER) += mjpegdec.o
|
||||
OBJS-$(CONFIG_MJPEG_QSV_DECODER) += qsvdec_other.o
|
||||
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpegenc_common.o \
|
||||
mjpegenc_huffman.o
|
||||
OBJS-$(CONFIG_MJPEGB_DECODER) += mjpegbdec.o
|
||||
@ -446,6 +462,7 @@ OBJS-$(CONFIG_MP2FIXED_ENCODER) += mpegaudioenc_fixed.o mpegaudio.o \
|
||||
mpegaudiodata.o mpegaudiodsp_data.o
|
||||
OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o
|
||||
OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec_fixed.o
|
||||
OBJS-$(CONFIG_MP3_MF_ENCODER) += mfenc.o mf_utils.o
|
||||
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec_fixed.o
|
||||
OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += mpegaudiodec_float.o
|
||||
OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o
|
||||
@ -488,19 +505,23 @@ OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
|
||||
OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
|
||||
OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
|
||||
OBJS-$(CONFIG_MTS2_DECODER) += mss4.o
|
||||
OBJS-$(CONFIG_MV30_DECODER) += mv30.o
|
||||
OBJS-$(CONFIG_MVC1_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MVC2_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MVDV_DECODER) += midivid.o
|
||||
OBJS-$(CONFIG_MVHA_DECODER) += mvha.o
|
||||
OBJS-$(CONFIG_MWSC_DECODER) += mwsc.o
|
||||
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
||||
OBJS-$(CONFIG_NOTCHLC_DECODER) += notchlc.o
|
||||
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
||||
OBJS-$(CONFIG_ON2AVC_DECODER) += on2avc.o on2avcdata.o
|
||||
OBJS-$(CONFIG_OPUS_DECODER) += opusdec.o opus.o opus_celt.o opus_rc.o \
|
||||
opus_pvq.o opus_silk.o opustab.o vorbis_data.o \
|
||||
opusdsp.o
|
||||
OBJS-$(CONFIG_OPUS_ENCODER) += opusenc.o opus.o opus_rc.o opustab.o opus_pvq.o \
|
||||
opusenc_psy.o
|
||||
opusenc_psy.o vorbis_data.o
|
||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += pafaudio.o
|
||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += pafvideo.o
|
||||
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
|
||||
@ -509,6 +530,7 @@ OBJS-$(CONFIG_PBM_DECODER) += pnmdec.o pnm.o
|
||||
OBJS-$(CONFIG_PBM_ENCODER) += pnmenc.o
|
||||
OBJS-$(CONFIG_PCX_DECODER) += pcx.o
|
||||
OBJS-$(CONFIG_PCX_ENCODER) += pcxenc.o
|
||||
OBJS-$(CONFIG_PFM_DECODER) += pnmdec.o pnm.o
|
||||
OBJS-$(CONFIG_PGM_DECODER) += pnmdec.o pnm.o
|
||||
OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o
|
||||
OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
|
||||
@ -578,13 +600,14 @@ OBJS-$(CONFIG_SIPR_DECODER) += sipr.o acelp_pitch_delay.o \
|
||||
celp_math.o acelp_vectors.o \
|
||||
acelp_filters.o celp_filters.o \
|
||||
sipr16k.o
|
||||
OBJS-$(CONFIG_SIREN_DECODER) += siren.o
|
||||
OBJS-$(CONFIG_SMACKAUD_DECODER) += smacker.o
|
||||
OBJS-$(CONFIG_SMACKER_DECODER) += smacker.o
|
||||
OBJS-$(CONFIG_SMC_DECODER) += smc.o
|
||||
OBJS-$(CONFIG_SMVJPEG_DECODER) += smvjpegdec.o
|
||||
OBJS-$(CONFIG_SNOW_DECODER) += snowdec.o snow.o snow_dwt.o
|
||||
OBJS-$(CONFIG_SNOW_ENCODER) += snowenc.o snow.o snow_dwt.o \
|
||||
h263.o ituh263enc.o
|
||||
h263.o h263data.o ituh263enc.o
|
||||
OBJS-$(CONFIG_SOL_DPCM_DECODER) += dpcm.o
|
||||
OBJS-$(CONFIG_SONIC_DECODER) += sonic.o
|
||||
OBJS-$(CONFIG_SONIC_ENCODER) += sonic.o
|
||||
@ -604,10 +627,10 @@ OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o
|
||||
OBJS-$(CONFIG_LIBRSVG_DECODER) += librsvgdec.o
|
||||
OBJS-$(CONFIG_SBC_DECODER) += sbcdec.o sbcdec_data.o sbc.o
|
||||
OBJS-$(CONFIG_SBC_ENCODER) += sbcenc.o sbc.o sbcdsp.o sbcdsp_data.o
|
||||
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o svq13.o h263data.o
|
||||
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o h263data.o
|
||||
OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o h263data.o \
|
||||
h263.o ituh263enc.o
|
||||
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o mpegutils.o h264data.o
|
||||
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o mpegutils.o h264data.o
|
||||
OBJS-$(CONFIG_TEXT_DECODER) += textdec.o ass.o
|
||||
OBJS-$(CONFIG_TEXT_ENCODER) += srtenc.o ass_split.o
|
||||
OBJS-$(CONFIG_TAK_DECODER) += takdec.o tak.o takdsp.o
|
||||
@ -616,7 +639,7 @@ OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
|
||||
OBJS-$(CONFIG_TARGA_Y216_DECODER) += targa_y216dec.o
|
||||
OBJS-$(CONFIG_TDSC_DECODER) += tdsc.o
|
||||
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
|
||||
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o tiff_common.o
|
||||
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o tiff_common.o mjpegdec.o
|
||||
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o tiff_data.o
|
||||
OBJS-$(CONFIG_TMV_DECODER) += tmv.o cga_data.o
|
||||
OBJS-$(CONFIG_TRUEHD_DECODER) += mlpdec.o mlpdsp.o
|
||||
@ -682,10 +705,11 @@ OBJS-$(CONFIG_VP9_CUVID_DECODER) += cuviddec.o
|
||||
OBJS-$(CONFIG_VP9_MEDIACODEC_DECODER) += mediacodecdec.o
|
||||
OBJS-$(CONFIG_VP9_RKMPP_DECODER) += rkmppdec.o
|
||||
OBJS-$(CONFIG_VP9_VAAPI_ENCODER) += vaapi_encode_vp9.o
|
||||
OBJS-$(CONFIG_VP9_QSV_ENCODER) += qsvenc_vp9.o
|
||||
OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o
|
||||
OBJS-$(CONFIG_VP9_V4L2M2M_DECODER) += v4l2_m2m_dec.o
|
||||
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
|
||||
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
|
||||
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o dsd.o
|
||||
OBJS-$(CONFIG_WAVPACK_ENCODER) += wavpackenc.o
|
||||
OBJS-$(CONFIG_WCMV_DECODER) += wcmv.o
|
||||
OBJS-$(CONFIG_WEBP_DECODER) += webp.o
|
||||
@ -701,7 +725,7 @@ OBJS-$(CONFIG_WMAVOICE_DECODER) += wmavoice.o \
|
||||
celp_filters.o \
|
||||
acelp_vectors.o acelp_filters.o
|
||||
OBJS-$(CONFIG_WMV1_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
||||
OBJS-$(CONFIG_WMV1_ENCODER) += msmpeg4enc.o
|
||||
OBJS-$(CONFIG_WMV1_ENCODER) += msmpeg4enc.o msmpeg4.o msmpeg4data.o
|
||||
OBJS-$(CONFIG_WMV2_DECODER) += wmv2dec.o wmv2.o wmv2data.o \
|
||||
msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
||||
OBJS-$(CONFIG_WMV2_ENCODER) += wmv2enc.o wmv2.o wmv2data.o \
|
||||
@ -803,7 +827,6 @@ OBJS-$(CONFIG_PCM_U32LE_DECODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_U32LE_ENCODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_VIDC_DECODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_VIDC_ENCODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o
|
||||
|
||||
OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o adx.o
|
||||
@ -811,6 +834,7 @@ OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o adx.o
|
||||
OBJS-$(CONFIG_ADPCM_AFC_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_AGM_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_AICA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_ARGO_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_DTK_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o adpcm_data.o
|
||||
@ -826,17 +850,23 @@ OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
|
||||
OBJS-$(CONFIG_ADPCM_G726LE_DECODER) += g726.o
|
||||
OBJS-$(CONFIG_ADPCM_G726LE_ENCODER) += g726.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_ALP_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_APC_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_APM_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_CUNNING_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_DAT4_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_EA_SEAD_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_ISS_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_MTF_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_OKI_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_QT_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_QT_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_RAD_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_SSI_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_SSI_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
@ -856,6 +886,7 @@ OBJS-$(CONFIG_ADPCM_VIMA_DECODER) += vima.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_ZORK_DECODER) += adpcm.o adpcm_data.o
|
||||
|
||||
# hardware accelerators
|
||||
OBJS-$(CONFIG_D3D11VA) += dxva2.o
|
||||
@ -878,7 +909,7 @@ OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o
|
||||
OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o
|
||||
OBJS-$(CONFIG_HEVC_NVDEC_HWACCEL) += nvdec_hevc.o
|
||||
OBJS-$(CONFIG_HEVC_QSV_HWACCEL) += qsvdec_h2645.o
|
||||
OBJS-$(CONFIG_HEVC_VAAPI_HWACCEL) += vaapi_hevc.o
|
||||
OBJS-$(CONFIG_HEVC_VAAPI_HWACCEL) += vaapi_hevc.o h265_profile_level.o
|
||||
OBJS-$(CONFIG_HEVC_VDPAU_HWACCEL) += vdpau_hevc.o
|
||||
OBJS-$(CONFIG_MJPEG_NVDEC_HWACCEL) += nvdec_mjpeg.o
|
||||
OBJS-$(CONFIG_MJPEG_VAAPI_HWACCEL) += vaapi_mjpeg.o
|
||||
@ -910,6 +941,7 @@ OBJS-$(CONFIG_VP9_D3D11VA_HWACCEL) += dxva2_vp9.o
|
||||
OBJS-$(CONFIG_VP9_DXVA2_HWACCEL) += dxva2_vp9.o
|
||||
OBJS-$(CONFIG_VP9_NVDEC_HWACCEL) += nvdec_vp9.o
|
||||
OBJS-$(CONFIG_VP9_VAAPI_HWACCEL) += vaapi_vp9.o
|
||||
OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o
|
||||
OBJS-$(CONFIG_VP8_QSV_HWACCEL) += qsvdec_other.o
|
||||
|
||||
# libavformat dependencies
|
||||
@ -988,6 +1020,7 @@ OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o libopus.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_LIBOPUS_ENCODER) += libopusenc.o libopus.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_LIBRAV1E_ENCODER) += librav1e.o
|
||||
OBJS-$(CONFIG_LIBSHINE_ENCODER) += libshine.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o
|
||||
@ -1041,12 +1074,12 @@ OBJS-$(CONFIG_H261_PARSER) += h261_parser.o
|
||||
OBJS-$(CONFIG_H263_PARSER) += h263_parser.o
|
||||
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264_sei.o h264data.o
|
||||
OBJS-$(CONFIG_HEVC_PARSER) += hevc_parser.o hevc_data.o
|
||||
OBJS-$(CONFIG_JPEG2000_PARSER) += jpeg2000_parser.o
|
||||
OBJS-$(CONFIG_MJPEG_PARSER) += mjpeg_parser.o
|
||||
OBJS-$(CONFIG_MLP_PARSER) += mlp_parse.o mlp_parser.o mlp.o
|
||||
OBJS-$(CONFIG_MPEG4VIDEO_PARSER) += mpeg4video_parser.o h263.o \
|
||||
mpeg4videodec.o mpeg4video.o \
|
||||
ituh263dec.o h263dec.o h263data.o
|
||||
OBJS-$(CONFIG_PNG_PARSER) += png_parser.o
|
||||
OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
|
||||
mpeg12.o mpeg12data.o
|
||||
@ -1064,11 +1097,13 @@ OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o \
|
||||
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
|
||||
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
|
||||
OBJS-$(CONFIG_VP9_PARSER) += vp9_parser.o
|
||||
OBJS-$(CONFIG_WEBP_PARSER) += webp_parser.o
|
||||
OBJS-$(CONFIG_XMA_PARSER) += xma_parser.o
|
||||
|
||||
# bitstream filters
|
||||
OBJS-$(CONFIG_AAC_ADTSTOASC_BSF) += aac_adtstoasc_bsf.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_AV1_METADATA_BSF) += av1_metadata_bsf.o
|
||||
OBJS-$(CONFIG_AV1_FRAME_MERGE_BSF) += av1_frame_merge_bsf.o
|
||||
OBJS-$(CONFIG_AV1_FRAME_SPLIT_BSF) += av1_frame_split_bsf.o
|
||||
OBJS-$(CONFIG_CHOMP_BSF) += chomp_bsf.o
|
||||
OBJS-$(CONFIG_DUMP_EXTRADATA_BSF) += dump_extradata_bsf.o
|
||||
@ -1093,6 +1128,8 @@ OBJS-$(CONFIG_MP3_HEADER_DECOMPRESS_BSF) += mp3_header_decompress_bsf.o \
|
||||
OBJS-$(CONFIG_MPEG2_METADATA_BSF) += mpeg2_metadata_bsf.o
|
||||
OBJS-$(CONFIG_NOISE_BSF) += noise_bsf.o
|
||||
OBJS-$(CONFIG_NULL_BSF) += null_bsf.o
|
||||
OBJS-$(CONFIG_OPUS_METADATA_BSF) += opus_metadata_bsf.o
|
||||
OBJS-$(CONFIG_PCM_RECHUNK_BSF) += pcm_rechunk_bsf.o
|
||||
OBJS-$(CONFIG_PRORES_METADATA_BSF) += prores_metadata_bsf.o
|
||||
OBJS-$(CONFIG_REMOVE_EXTRADATA_BSF) += remove_extradata_bsf.o
|
||||
OBJS-$(CONFIG_TEXT2MOVSUB_BSF) += movsub_bsf.o
|
||||
@ -1130,6 +1167,7 @@ SKIPHEADERS-$(CONFIG_JNI) += ffjni.h
|
||||
SKIPHEADERS-$(CONFIG_LIBVPX) += libvpx.h
|
||||
SKIPHEADERS-$(CONFIG_LIBWEBP_ENCODER) += libwebpenc_common.h
|
||||
SKIPHEADERS-$(CONFIG_MEDIACODEC) += mediacodecdec_common.h mediacodec_surface.h mediacodec_wrapper.h mediacodec_sw_buffer.h
|
||||
SKIPHEADERS-$(CONFIG_MEDIAFOUNDATION) += mf_utils.h
|
||||
SKIPHEADERS-$(CONFIG_NVDEC) += nvdec.h
|
||||
SKIPHEADERS-$(CONFIG_NVENC) += nvenc.h
|
||||
SKIPHEADERS-$(CONFIG_QSV) += qsv.h qsv_internal.h
|
||||
|
@ -356,7 +356,7 @@ struct AACContext {
|
||||
OutputConfiguration oc[2];
|
||||
int warned_num_aac_frames;
|
||||
int warned_960_sbr;
|
||||
|
||||
unsigned warned_71_wide;
|
||||
int warned_gain_control;
|
||||
|
||||
/* aacdec functions pointers */
|
||||
|
@ -21,8 +21,8 @@
|
||||
|
||||
#include "adts_header.h"
|
||||
#include "adts_parser.h"
|
||||
#include "avcodec.h"
|
||||
#include "bsf.h"
|
||||
#include "bsf_internal.h"
|
||||
#include "put_bits.h"
|
||||
#include "get_bits.h"
|
||||
#include "mpeg4audio.h"
|
||||
@ -134,8 +134,8 @@ static int aac_adtstoasc_init(AVBSFContext *ctx)
|
||||
/* Validate the extradata if the stream is already MPEG-4 AudioSpecificConfig */
|
||||
if (ctx->par_in->extradata) {
|
||||
MPEG4AudioConfig mp4ac;
|
||||
int ret = avpriv_mpeg4audio_get_config(&mp4ac, ctx->par_in->extradata,
|
||||
ctx->par_in->extradata_size * 8, 1);
|
||||
int ret = avpriv_mpeg4audio_get_config2(&mp4ac, ctx->par_in->extradata,
|
||||
ctx->par_in->extradata_size, 1, ctx);
|
||||
if (ret < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Error parsing AudioSpecificConfig extradata!\n");
|
||||
return ret;
|
||||
|
@ -409,6 +409,8 @@ static int read_stream_mux_config(struct LATMContext *latmctx,
|
||||
} else {
|
||||
int esc;
|
||||
do {
|
||||
if (get_bits_left(gb) < 9)
|
||||
return AVERROR_INVALIDDATA;
|
||||
esc = get_bits(gb, 1);
|
||||
skip_bits(gb, 8);
|
||||
} while (esc);
|
||||
|
@ -461,7 +461,7 @@ AVCodec ff_aac_fixed_decoder = {
|
||||
AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE
|
||||
},
|
||||
.capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
|
||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
|
||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
|
||||
.channel_layouts = aac_channel_layout,
|
||||
.profiles = NULL_IF_CONFIG_SMALL(ff_aac_profiles),
|
||||
.flush = flush,
|
||||
|
@ -520,7 +520,7 @@ static void flush(AVCodecContext *avctx)
|
||||
*
|
||||
* @return Returns error status. 0 - OK, !0 - error
|
||||
*/
|
||||
static int set_default_channel_config(AVCodecContext *avctx,
|
||||
static int set_default_channel_config(AACContext *ac, AVCodecContext *avctx,
|
||||
uint8_t (*layout_map)[3],
|
||||
int *tags,
|
||||
int channel_config)
|
||||
@ -547,7 +547,7 @@ static int set_default_channel_config(AVCodecContext *avctx,
|
||||
* As actual intended 7.1(wide) streams are very rare, default to assuming a
|
||||
* 7.1 layout was intended.
|
||||
*/
|
||||
if (channel_config == 7 && avctx->strict_std_compliance < FF_COMPLIANCE_STRICT) {
|
||||
if (channel_config == 7 && avctx->strict_std_compliance < FF_COMPLIANCE_STRICT && (!ac || !ac->warned_71_wide++)) {
|
||||
av_log(avctx, AV_LOG_INFO, "Assuming an incorrectly encoded 7.1 channel layout"
|
||||
" instead of a spec-compliant 7.1(wide) layout, use -strict %d to decode"
|
||||
" according to the specification instead.\n", FF_COMPLIANCE_STRICT);
|
||||
@ -573,7 +573,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
|
||||
av_log(ac->avctx, AV_LOG_DEBUG, "mono with CPE\n");
|
||||
|
||||
if (set_default_channel_config(ac->avctx, layout_map,
|
||||
if (set_default_channel_config(ac, ac->avctx, layout_map,
|
||||
&layout_map_tags, 2) < 0)
|
||||
return NULL;
|
||||
if (output_configure(ac, layout_map, layout_map_tags,
|
||||
@ -592,7 +592,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
|
||||
av_log(ac->avctx, AV_LOG_DEBUG, "stereo with SCE\n");
|
||||
|
||||
if (set_default_channel_config(ac->avctx, layout_map,
|
||||
if (set_default_channel_config(ac, ac->avctx, layout_map,
|
||||
&layout_map_tags, 1) < 0)
|
||||
return NULL;
|
||||
if (output_configure(ac, layout_map, layout_map_tags,
|
||||
@ -841,7 +841,7 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
|
||||
if (tags < 0)
|
||||
return tags;
|
||||
} else {
|
||||
if ((ret = set_default_channel_config(avctx, layout_map,
|
||||
if ((ret = set_default_channel_config(ac, avctx, layout_map,
|
||||
&tags, channel_config)))
|
||||
return ret;
|
||||
}
|
||||
@ -937,7 +937,7 @@ static int decode_eld_specific_config(AACContext *ac, AVCodecContext *avctx,
|
||||
skip_bits_long(gb, 8 * len);
|
||||
}
|
||||
|
||||
if ((ret = set_default_channel_config(avctx, layout_map,
|
||||
if ((ret = set_default_channel_config(ac, avctx, layout_map,
|
||||
&tags, channel_config)))
|
||||
return ret;
|
||||
|
||||
@ -975,7 +975,7 @@ static int decode_audio_specific_config_gb(AACContext *ac,
|
||||
int i, ret;
|
||||
GetBitContext gbc = *gb;
|
||||
|
||||
if ((i = ff_mpeg4audio_get_config_gb(m4ac, &gbc, sync_extension)) < 0)
|
||||
if ((i = ff_mpeg4audio_get_config_gb(m4ac, &gbc, sync_extension, avctx)) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (m4ac->sampling_index > 12) {
|
||||
@ -1157,6 +1157,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
AACContext *ac = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
if (avctx->sample_rate > 96000)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
ret = ff_thread_once(&aac_table_init, &aac_static_table_init);
|
||||
if (ret != 0)
|
||||
return AVERROR_UNKNOWN;
|
||||
@ -1197,7 +1200,7 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
ac->oc[1].m4ac.chan_config = i;
|
||||
|
||||
if (ac->oc[1].m4ac.chan_config) {
|
||||
int ret = set_default_channel_config(avctx, layout_map,
|
||||
int ret = set_default_channel_config(ac, avctx, layout_map,
|
||||
&layout_map_tags, ac->oc[1].m4ac.chan_config);
|
||||
if (!ret)
|
||||
output_configure(ac, layout_map, layout_map_tags,
|
||||
@ -2999,7 +3002,7 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
|
||||
push_output_configuration(ac);
|
||||
if (hdr_info.chan_config) {
|
||||
ac->oc[1].m4ac.chan_config = hdr_info.chan_config;
|
||||
if ((ret = set_default_channel_config(ac->avctx,
|
||||
if ((ret = set_default_channel_config(ac, ac->avctx,
|
||||
layout_map,
|
||||
&layout_map_tags,
|
||||
hdr_info.chan_config)) < 0)
|
||||
@ -3246,9 +3249,15 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
err = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
while (elem_id > 0)
|
||||
elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, che_prev_type);
|
||||
err = 0; /* FIXME */
|
||||
err = 0;
|
||||
while (elem_id > 0) {
|
||||
int ret = decode_extension_payload(ac, gb, elem_id, che_prev, che_prev_type);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
break;
|
||||
}
|
||||
elem_id -= ret;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "mpeg4audio.h"
|
||||
#include "kbdwin.h"
|
||||
#include "sinewin.h"
|
||||
#include "profiles.h"
|
||||
|
||||
#include "aac.h"
|
||||
#include "aactab.h"
|
||||
@ -1131,6 +1132,7 @@ static const AVOption aacenc_options[] = {
|
||||
{"aac_ltp", "Long term prediction", offsetof(AACEncContext, options.ltp), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS},
|
||||
{"aac_pred", "AAC-Main prediction", offsetof(AACEncContext, options.pred), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS},
|
||||
{"aac_pce", "Forces the use of PCEs", offsetof(AACEncContext, options.pce), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS},
|
||||
FF_AAC_PROFILE_OPTS
|
||||
{NULL}
|
||||
};
|
||||
|
||||
|
@ -414,33 +414,33 @@ static void hybrid_synthesis(PSDSPContext *dsp, INTFLOAT out[2][38][64],
|
||||
memset(out[0][n], 0, 5*sizeof(out[0][n][0]));
|
||||
memset(out[1][n], 0, 5*sizeof(out[1][n][0]));
|
||||
for (i = 0; i < 12; i++) {
|
||||
out[0][n][0] += in[ i][n][0];
|
||||
out[1][n][0] += in[ i][n][1];
|
||||
out[0][n][0] += (UINTFLOAT)in[ i][n][0];
|
||||
out[1][n][0] += (UINTFLOAT)in[ i][n][1];
|
||||
}
|
||||
for (i = 0; i < 8; i++) {
|
||||
out[0][n][1] += in[12+i][n][0];
|
||||
out[1][n][1] += in[12+i][n][1];
|
||||
out[0][n][1] += (UINTFLOAT)in[12+i][n][0];
|
||||
out[1][n][1] += (UINTFLOAT)in[12+i][n][1];
|
||||
}
|
||||
for (i = 0; i < 4; i++) {
|
||||
out[0][n][2] += in[20+i][n][0];
|
||||
out[1][n][2] += in[20+i][n][1];
|
||||
out[0][n][3] += in[24+i][n][0];
|
||||
out[1][n][3] += in[24+i][n][1];
|
||||
out[0][n][4] += in[28+i][n][0];
|
||||
out[1][n][4] += in[28+i][n][1];
|
||||
out[0][n][2] += (UINTFLOAT)in[20+i][n][0];
|
||||
out[1][n][2] += (UINTFLOAT)in[20+i][n][1];
|
||||
out[0][n][3] += (UINTFLOAT)in[24+i][n][0];
|
||||
out[1][n][3] += (UINTFLOAT)in[24+i][n][1];
|
||||
out[0][n][4] += (UINTFLOAT)in[28+i][n][0];
|
||||
out[1][n][4] += (UINTFLOAT)in[28+i][n][1];
|
||||
}
|
||||
}
|
||||
dsp->hybrid_synthesis_deint(out, in + 27, 5, len);
|
||||
} else {
|
||||
for (n = 0; n < len; n++) {
|
||||
out[0][n][0] = in[0][n][0] + in[1][n][0] + in[2][n][0] +
|
||||
in[3][n][0] + in[4][n][0] + in[5][n][0];
|
||||
out[1][n][0] = in[0][n][1] + in[1][n][1] + in[2][n][1] +
|
||||
in[3][n][1] + in[4][n][1] + in[5][n][1];
|
||||
out[0][n][1] = in[6][n][0] + in[7][n][0];
|
||||
out[1][n][1] = in[6][n][1] + in[7][n][1];
|
||||
out[0][n][2] = in[8][n][0] + in[9][n][0];
|
||||
out[1][n][2] = in[8][n][1] + in[9][n][1];
|
||||
out[0][n][0] = (UINTFLOAT)in[0][n][0] + in[1][n][0] + in[2][n][0] +
|
||||
(UINTFLOAT)in[3][n][0] + in[4][n][0] + in[5][n][0];
|
||||
out[1][n][0] = (UINTFLOAT)in[0][n][1] + in[1][n][1] + in[2][n][1] +
|
||||
(UINTFLOAT)in[3][n][1] + in[4][n][1] + in[5][n][1];
|
||||
out[0][n][1] = (UINTFLOAT)in[6][n][0] + in[7][n][0];
|
||||
out[1][n][1] = (UINTFLOAT)in[6][n][1] + in[7][n][1];
|
||||
out[0][n][2] = (UINTFLOAT)in[8][n][0] + in[9][n][0];
|
||||
out[1][n][2] = (UINTFLOAT)in[8][n][1] + in[9][n][1];
|
||||
}
|
||||
dsp->hybrid_synthesis_deint(out, in + 7, 3, len);
|
||||
}
|
||||
|
@ -6,8 +6,10 @@ OBJS-$(CONFIG_H264DSP) += aarch64/h264dsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_H264PRED) += aarch64/h264pred_init.o
|
||||
OBJS-$(CONFIG_H264QPEL) += aarch64/h264qpel_init_aarch64.o
|
||||
OBJS-$(CONFIG_HPELDSP) += aarch64/hpeldsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_IDCTDSP) += aarch64/idctdsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_MPEGAUDIODSP) += aarch64/mpegaudiodsp_init.o
|
||||
OBJS-$(CONFIG_NEON_CLOBBER_TEST) += aarch64/neontest.o
|
||||
OBJS-$(CONFIG_PIXBLOCKDSP) += aarch64/pixblockdsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_VIDEODSP) += aarch64/videodsp_init.o
|
||||
OBJS-$(CONFIG_VP8DSP) += aarch64/vp8dsp_init_aarch64.o
|
||||
|
||||
@ -21,6 +23,7 @@ OBJS-$(CONFIG_VC1DSP) += aarch64/vc1dsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_VORBIS_DECODER) += aarch64/vorbisdsp_init.o
|
||||
OBJS-$(CONFIG_VP9_DECODER) += aarch64/vp9dsp_init_10bpp_aarch64.o \
|
||||
aarch64/vp9dsp_init_12bpp_aarch64.o \
|
||||
aarch64/vp9mc_aarch64.o \
|
||||
aarch64/vp9dsp_init_aarch64.o
|
||||
|
||||
# ARMv8 optimizations
|
||||
@ -41,10 +44,10 @@ NEON-OBJS-$(CONFIG_H264PRED) += aarch64/h264pred_neon.o
|
||||
NEON-OBJS-$(CONFIG_H264QPEL) += aarch64/h264qpel_neon.o \
|
||||
aarch64/hpeldsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_HPELDSP) += aarch64/hpeldsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_IDCTDSP) += aarch64/idctdsp_init_aarch64.o \
|
||||
aarch64/simple_idct_neon.o
|
||||
NEON-OBJS-$(CONFIG_IDCTDSP) += aarch64/simple_idct_neon.o
|
||||
NEON-OBJS-$(CONFIG_MDCT) += aarch64/mdct_neon.o
|
||||
NEON-OBJS-$(CONFIG_MPEGAUDIODSP) += aarch64/mpegaudiodsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_PIXBLOCKDSP) += aarch64/pixblockdsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_VP8DSP) += aarch64/vp8dsp_neon.o
|
||||
|
||||
# decoders/encoders
|
||||
|
@ -21,6 +21,8 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/cpu.h"
|
||||
#include "libavutil/arm/cpu.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/idctdsp.h"
|
||||
#include "idct.h"
|
||||
@ -28,7 +30,9 @@
|
||||
av_cold void ff_idctdsp_init_aarch64(IDCTDSPContext *c, AVCodecContext *avctx,
|
||||
unsigned high_bit_depth)
|
||||
{
|
||||
if (!avctx->lowres && !high_bit_depth) {
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_neon(cpu_flags) && !avctx->lowres && !high_bit_depth) {
|
||||
if (avctx->idct_algo == FF_IDCT_AUTO ||
|
||||
avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
|
||||
avctx->idct_algo == FF_IDCT_SIMPLENEON) {
|
||||
|
@ -95,16 +95,16 @@ function ff_opus_postfilter_neon, export=1
|
||||
fmla v3.4s, v7.4s, v2.4s
|
||||
fadd v6.4s, v6.4s, v4.4s
|
||||
|
||||
ld1 {v8.4s}, [x0]
|
||||
fmla v8.4s, v5.4s, v0.4s
|
||||
ld1 {v4.4s}, [x0]
|
||||
fmla v4.4s, v5.4s, v0.4s
|
||||
|
||||
fmul v6.4s, v6.4s, v1.4s
|
||||
fadd v6.4s, v6.4s, v3.4s
|
||||
|
||||
fadd v8.4s, v8.4s, v6.4s
|
||||
fadd v4.4s, v4.4s, v6.4s
|
||||
fmul v3.4s, v7.4s, v2.4s
|
||||
|
||||
st1 {v8.4s}, [x0], #16
|
||||
st1 {v4.4s}, [x0], #16
|
||||
|
||||
subs w3, w3, #4
|
||||
b.gt 1b
|
||||
|
46
libavcodec/aarch64/pixblockdsp_init_aarch64.c
Normal file
46
libavcodec/aarch64/pixblockdsp_init_aarch64.c
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/cpu.h"
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/pixblockdsp.h"
|
||||
|
||||
void ff_get_pixels_neon(int16_t *block, const uint8_t *pixels,
|
||||
ptrdiff_t stride);
|
||||
void ff_diff_pixels_neon(int16_t *block, const uint8_t *s1,
|
||||
const uint8_t *s2, ptrdiff_t stride);
|
||||
|
||||
av_cold void ff_pixblockdsp_init_aarch64(PixblockDSPContext *c,
|
||||
AVCodecContext *avctx,
|
||||
unsigned high_bit_depth)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_neon(cpu_flags)) {
|
||||
if (!high_bit_depth) {
|
||||
c->get_pixels_unaligned =
|
||||
c->get_pixels = ff_get_pixels_neon;
|
||||
}
|
||||
c->diff_pixels_unaligned =
|
||||
c->diff_pixels = ff_diff_pixels_neon;
|
||||
}
|
||||
}
|
51
libavcodec/aarch64/pixblockdsp_neon.S
Normal file
51
libavcodec/aarch64/pixblockdsp_neon.S
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Martin Storsjo
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
function ff_get_pixels_neon, export=1
|
||||
mov w3, #8
|
||||
1:
|
||||
ld1 {v0.8b}, [x1], x2
|
||||
subs w3, w3, #2
|
||||
ld1 {v1.8b}, [x1], x2
|
||||
uxtl v0.8h, v0.8b
|
||||
uxtl v1.8h, v1.8b
|
||||
st1 {v0.8h, v1.8h}, [x0], #32
|
||||
b.gt 1b
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_diff_pixels_neon, export=1
|
||||
mov w4, #8
|
||||
1:
|
||||
ld1 {v0.8b}, [x1], x3
|
||||
ld1 {v1.8b}, [x2], x3
|
||||
subs w4, w4, #2
|
||||
ld1 {v2.8b}, [x1], x3
|
||||
usubl v0.8h, v0.8b, v1.8b
|
||||
ld1 {v3.8b}, [x2], x3
|
||||
usubl v1.8h, v2.8b, v3.8b
|
||||
st1 {v0.8h, v1.8h}, [x0], #32
|
||||
b.gt 1b
|
||||
|
||||
ret
|
||||
endfunc
|
@ -25,31 +25,6 @@
|
||||
// const uint8_t *ref, ptrdiff_t ref_stride,
|
||||
// int h, int mx, int my);
|
||||
|
||||
function ff_vp9_copy128_aarch64, export=1
|
||||
1:
|
||||
ldp x5, x6, [x2]
|
||||
ldp x7, x8, [x2, #16]
|
||||
stp x5, x6, [x0]
|
||||
ldp x9, x10, [x2, #32]
|
||||
stp x7, x8, [x0, #16]
|
||||
subs w4, w4, #1
|
||||
ldp x11, x12, [x2, #48]
|
||||
stp x9, x10, [x0, #32]
|
||||
stp x11, x12, [x0, #48]
|
||||
ldp x5, x6, [x2, #64]
|
||||
ldp x7, x8, [x2, #80]
|
||||
stp x5, x6, [x0, #64]
|
||||
ldp x9, x10, [x2, #96]
|
||||
stp x7, x8, [x0, #80]
|
||||
ldp x11, x12, [x2, #112]
|
||||
stp x9, x10, [x0, #96]
|
||||
stp x11, x12, [x0, #112]
|
||||
add x2, x2, x3
|
||||
add x0, x0, x1
|
||||
b.ne 1b
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vp9_avg64_16_neon, export=1
|
||||
mov x5, x0
|
||||
sub x1, x1, #64
|
||||
|
81
libavcodec/aarch64/vp9mc_aarch64.S
Normal file
81
libavcodec/aarch64/vp9mc_aarch64.S
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright (c) 2016 Google Inc.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
// All public functions in this file have the following signature:
|
||||
// typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
|
||||
// const uint8_t *ref, ptrdiff_t ref_stride,
|
||||
// int h, int mx, int my);
|
||||
|
||||
function ff_vp9_copy128_aarch64, export=1
|
||||
1:
|
||||
ldp x5, x6, [x2]
|
||||
ldp x7, x8, [x2, #16]
|
||||
stp x5, x6, [x0]
|
||||
ldp x9, x10, [x2, #32]
|
||||
stp x7, x8, [x0, #16]
|
||||
subs w4, w4, #1
|
||||
ldp x11, x12, [x2, #48]
|
||||
stp x9, x10, [x0, #32]
|
||||
stp x11, x12, [x0, #48]
|
||||
ldp x5, x6, [x2, #64]
|
||||
ldp x7, x8, [x2, #80]
|
||||
stp x5, x6, [x0, #64]
|
||||
ldp x9, x10, [x2, #96]
|
||||
stp x7, x8, [x0, #80]
|
||||
ldp x11, x12, [x2, #112]
|
||||
stp x9, x10, [x0, #96]
|
||||
stp x11, x12, [x0, #112]
|
||||
add x2, x2, x3
|
||||
add x0, x0, x1
|
||||
b.ne 1b
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vp9_copy64_aarch64, export=1
|
||||
1:
|
||||
ldp x5, x6, [x2]
|
||||
ldp x7, x8, [x2, #16]
|
||||
stp x5, x6, [x0]
|
||||
ldp x9, x10, [x2, #32]
|
||||
stp x7, x8, [x0, #16]
|
||||
subs w4, w4, #1
|
||||
ldp x11, x12, [x2, #48]
|
||||
stp x9, x10, [x0, #32]
|
||||
stp x11, x12, [x0, #48]
|
||||
add x2, x2, x3
|
||||
add x0, x0, x1
|
||||
b.ne 1b
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vp9_copy32_aarch64, export=1
|
||||
1:
|
||||
ldp x5, x6, [x2]
|
||||
ldp x7, x8, [x2, #16]
|
||||
stp x5, x6, [x0]
|
||||
subs w4, w4, #1
|
||||
stp x7, x8, [x0, #16]
|
||||
add x2, x2, x3
|
||||
add x0, x0, x1
|
||||
b.ne 1b
|
||||
ret
|
||||
endfunc
|
@ -25,23 +25,6 @@
|
||||
// const uint8_t *ref, ptrdiff_t ref_stride,
|
||||
// int h, int mx, int my);
|
||||
|
||||
function ff_vp9_copy64_aarch64, export=1
|
||||
1:
|
||||
ldp x5, x6, [x2]
|
||||
ldp x7, x8, [x2, #16]
|
||||
stp x5, x6, [x0]
|
||||
ldp x9, x10, [x2, #32]
|
||||
stp x7, x8, [x0, #16]
|
||||
subs w4, w4, #1
|
||||
ldp x11, x12, [x2, #48]
|
||||
stp x9, x10, [x0, #32]
|
||||
stp x11, x12, [x0, #48]
|
||||
add x2, x2, x3
|
||||
add x0, x0, x1
|
||||
b.ne 1b
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vp9_avg64_neon, export=1
|
||||
mov x5, x0
|
||||
1:
|
||||
@ -64,19 +47,6 @@ function ff_vp9_avg64_neon, export=1
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vp9_copy32_aarch64, export=1
|
||||
1:
|
||||
ldp x5, x6, [x2]
|
||||
ldp x7, x8, [x2, #16]
|
||||
stp x5, x6, [x0]
|
||||
subs w4, w4, #1
|
||||
stp x7, x8, [x0, #16]
|
||||
add x2, x2, x3
|
||||
add x0, x0, x1
|
||||
b.ne 1b
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vp9_avg32_neon, export=1
|
||||
1:
|
||||
ld1 {v2.16b, v3.16b}, [x2], x3
|
||||
|
@ -91,7 +91,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
|
||||
if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
compr = AV_RL32(buf);
|
||||
|
@ -201,6 +201,12 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
AC3HeaderInfo hdr;
|
||||
GetBitContext gbc;
|
||||
|
||||
if (tmp.u8[1] == 0x77 && tmp.u8[2] == 0x0b) {
|
||||
FFSWAP(uint8_t, tmp.u8[1], tmp.u8[2]);
|
||||
FFSWAP(uint8_t, tmp.u8[3], tmp.u8[4]);
|
||||
FFSWAP(uint8_t, tmp.u8[5], tmp.u8[6]);
|
||||
}
|
||||
|
||||
init_get_bits(&gbc, tmp.u8+8-AC3_HEADER_SIZE, 54);
|
||||
err = ff_ac3_parse_header(&gbc, &hdr);
|
||||
|
||||
|
@ -107,30 +107,17 @@ static void scale_coefs (
|
||||
}
|
||||
} else {
|
||||
shift = -shift;
|
||||
mul <<= shift;
|
||||
for (i=0; i<len; i+=8) {
|
||||
|
||||
temp = src[i] * mul;
|
||||
temp1 = src[i+1] * mul;
|
||||
temp2 = src[i+2] * mul;
|
||||
|
||||
dst[i] = temp << shift;
|
||||
temp3 = src[i+3] * mul;
|
||||
|
||||
dst[i+1] = temp1 << shift;
|
||||
temp4 = src[i + 4] * mul;
|
||||
dst[i+2] = temp2 << shift;
|
||||
|
||||
temp5 = src[i+5] * mul;
|
||||
dst[i+3] = temp3 << shift;
|
||||
temp6 = src[i+6] * mul;
|
||||
|
||||
dst[i+4] = temp4 << shift;
|
||||
temp7 = src[i+7] * mul;
|
||||
|
||||
dst[i+5] = temp5 << shift;
|
||||
dst[i+6] = temp6 << shift;
|
||||
dst[i+7] = temp7 << shift;
|
||||
|
||||
dst[i] = src[i ] * mul;
|
||||
dst[i+1] = src[i+1] * mul;
|
||||
dst[i+2] = src[i+2] * mul;
|
||||
dst[i+3] = src[i+3] * mul;
|
||||
dst[i+4] = src[i+4] * mul;
|
||||
dst[i+5] = src[i+5] * mul;
|
||||
dst[i+6] = src[i+6] * mul;
|
||||
dst[i+7] = src[i+7] * mul;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ static const float surmixlev_options[SURMIXLEV_NUM_OPTIONS] = {
|
||||
|
||||
#define EXTMIXLEV_NUM_OPTIONS 8
|
||||
static const float extmixlev_options[EXTMIXLEV_NUM_OPTIONS] = {
|
||||
LEVEL_PLUS_3DB, LEVEL_PLUS_1POINT5DB, LEVEL_ONE, LEVEL_MINUS_4POINT5DB,
|
||||
LEVEL_PLUS_3DB, LEVEL_PLUS_1POINT5DB, LEVEL_ONE, LEVEL_MINUS_1POINT5DB,
|
||||
LEVEL_MINUS_3DB, LEVEL_MINUS_4POINT5DB, LEVEL_MINUS_6DB, LEVEL_ZERO
|
||||
};
|
||||
|
||||
@ -1065,7 +1065,7 @@ static int bit_alloc(AC3EncodeContext *s, int snr_offset)
|
||||
{
|
||||
int blk, ch;
|
||||
|
||||
snr_offset = (snr_offset - 240) << 2;
|
||||
snr_offset = (snr_offset - 240) * 4;
|
||||
|
||||
reset_block_bap(s);
|
||||
for (blk = 0; blk < s->num_blocks; blk++) {
|
||||
@ -1993,12 +1993,11 @@ int ff_ac3_validate_metadata(AC3EncodeContext *s)
|
||||
/* set bitstream id for alternate bitstream syntax */
|
||||
if (!s->eac3 && (opt->extended_bsi_1 || opt->extended_bsi_2)) {
|
||||
if (s->bitstream_id > 8 && s->bitstream_id < 11) {
|
||||
static int warn_once = 1;
|
||||
if (warn_once) {
|
||||
if (!s->warned_alternate_bitstream) {
|
||||
av_log(avctx, AV_LOG_WARNING, "alternate bitstream syntax is "
|
||||
"not compatible with reduced samplerates. writing of "
|
||||
"extended bitstream information will be disabled.\n");
|
||||
warn_once = 0;
|
||||
s->warned_alternate_bitstream = 1;
|
||||
}
|
||||
} else {
|
||||
s->bitstream_id = 6;
|
||||
@ -2051,7 +2050,8 @@ av_cold int ff_ac3_encode_close(AVCodecContext *avctx)
|
||||
av_freep(&block->cpl_coord_mant);
|
||||
}
|
||||
|
||||
s->mdct_end(s);
|
||||
if (s->mdct_end)
|
||||
s->mdct_end(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2433,7 +2433,7 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
|
||||
|
||||
ret = validate_options(s);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto init_fail;
|
||||
|
||||
avctx->frame_size = AC3_BLOCK_SIZE * s->num_blocks;
|
||||
avctx->initial_padding = AC3_BLOCK_SIZE;
|
||||
|
@ -255,6 +255,8 @@ typedef struct AC3EncodeContext {
|
||||
uint8_t *ref_bap [AC3_MAX_CHANNELS][AC3_MAX_BLOCKS]; ///< bit allocation pointers (bap)
|
||||
int ref_bap_set; ///< indicates if ref_bap pointers have been set
|
||||
|
||||
int warned_alternate_bitstream;
|
||||
|
||||
/* fixed vs. float function pointers */
|
||||
void (*mdct_end)(struct AC3EncodeContext *s);
|
||||
int (*mdct_init)(struct AC3EncodeContext *s);
|
||||
|
@ -155,6 +155,7 @@ AVCodec ff_ac3_fixed_encoder = {
|
||||
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16P,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.priv_class = &ac3enc_class,
|
||||
.supported_samplerates = ff_ac3_sample_rate_tab,
|
||||
.channel_layouts = ff_ac3_channel_layouts,
|
||||
.defaults = ac3_defaults,
|
||||
};
|
||||
|
@ -150,6 +150,7 @@ AVCodec ff_ac3_encoder = {
|
||||
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.priv_class = &ac3enc_class,
|
||||
.supported_samplerates = ff_ac3_sample_rate_tab,
|
||||
.channel_layouts = ff_ac3_channel_layouts,
|
||||
.defaults = ac3_defaults,
|
||||
};
|
||||
|
@ -126,7 +126,7 @@ const uint8_t ff_ac3_dec_channel_map[8][2][6] = {
|
||||
};
|
||||
|
||||
/* possible frequencies */
|
||||
const uint16_t ff_ac3_sample_rate_tab[3] = { 48000, 44100, 32000 };
|
||||
const int ff_ac3_sample_rate_tab[] = { 48000, 44100, 32000, 0 };
|
||||
|
||||
/* possible bitrates */
|
||||
const uint16_t ff_ac3_bitrate_tab[19] = {
|
||||
|
@ -33,7 +33,7 @@ extern const uint8_t ff_ac3_channels_tab[8];
|
||||
extern av_export_avcodec const uint16_t avpriv_ac3_channel_layout_tab[8];
|
||||
extern const uint8_t ff_ac3_enc_channel_map[8][2][6];
|
||||
extern const uint8_t ff_ac3_dec_channel_map[8][2][6];
|
||||
extern const uint16_t ff_ac3_sample_rate_tab[3];
|
||||
extern const int ff_ac3_sample_rate_tab[];
|
||||
extern const uint16_t ff_ac3_bitrate_tab[19];
|
||||
extern const uint8_t ff_ac3_rematrix_band_tab[5];
|
||||
extern const uint8_t ff_eac3_default_cpl_band_struct[18];
|
||||
|
@ -12,6 +12,11 @@
|
||||
* EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
|
||||
* MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
|
||||
* THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
|
||||
* Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
|
||||
* Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
|
||||
* Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
|
||||
* High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
|
||||
* Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@ -81,6 +86,15 @@ static const int8_t swf_index_tables[4][16] = {
|
||||
/*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
|
||||
};
|
||||
|
||||
static const int8_t zork_index_table[8] = {
|
||||
-1, -1, -1, 1, 4, 7, 10, 12,
|
||||
};
|
||||
|
||||
static const int8_t mtf_index_table[16] = {
|
||||
8, 6, 4, 2, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, 2, 4, 6, 8,
|
||||
};
|
||||
|
||||
/* end of tables */
|
||||
|
||||
typedef struct ADPCMDecodeContext {
|
||||
@ -96,6 +110,9 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
unsigned int max_channels = 2;
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
case AV_CODEC_ID_ADPCM_IMA_CUNNING:
|
||||
max_channels = 1;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_DTK:
|
||||
case AV_CODEC_ID_ADPCM_EA:
|
||||
min_channels = 2;
|
||||
@ -105,11 +122,16 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
case AV_CODEC_ID_ADPCM_EA_R2:
|
||||
case AV_CODEC_ID_ADPCM_EA_R3:
|
||||
case AV_CODEC_ID_ADPCM_EA_XAS:
|
||||
case AV_CODEC_ID_ADPCM_MS:
|
||||
max_channels = 6;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_MTAF:
|
||||
min_channels = 2;
|
||||
max_channels = 8;
|
||||
if (avctx->channels & 1) {
|
||||
avpriv_request_sample(avctx, "channel count %d\n", avctx->channels);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_PSX:
|
||||
max_channels = 8;
|
||||
@ -135,43 +157,64 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_APC:
|
||||
if (avctx->extradata && avctx->extradata_size >= 8) {
|
||||
c->status[0].predictor = AV_RL32(avctx->extradata);
|
||||
c->status[1].predictor = AV_RL32(avctx->extradata + 4);
|
||||
c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
|
||||
c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_APM:
|
||||
if (avctx->extradata && avctx->extradata_size >= 16) {
|
||||
c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
|
||||
c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
|
||||
c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
|
||||
c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_WS:
|
||||
if (avctx->extradata && avctx->extradata_size >= 2)
|
||||
c->vqa_version = AV_RL16(avctx->extradata);
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_ARGO:
|
||||
if (avctx->bits_per_coded_sample != 4)
|
||||
return AVERROR_INVALIDDATA;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_ZORK:
|
||||
if (avctx->bits_per_coded_sample != 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
case AV_CODEC_ID_ADPCM_AICA:
|
||||
case AV_CODEC_ID_ADPCM_IMA_DAT4:
|
||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||
case AV_CODEC_ID_ADPCM_IMA_WAV:
|
||||
case AV_CODEC_ID_ADPCM_4XM:
|
||||
case AV_CODEC_ID_ADPCM_XA:
|
||||
case AV_CODEC_ID_ADPCM_EA_R1:
|
||||
case AV_CODEC_ID_ADPCM_EA_R2:
|
||||
case AV_CODEC_ID_ADPCM_EA_R3:
|
||||
case AV_CODEC_ID_ADPCM_EA_XAS:
|
||||
case AV_CODEC_ID_ADPCM_THP:
|
||||
case AV_CODEC_ID_ADPCM_THP_LE:
|
||||
case AV_CODEC_ID_ADPCM_AFC:
|
||||
case AV_CODEC_ID_ADPCM_DTK:
|
||||
case AV_CODEC_ID_ADPCM_PSX:
|
||||
case AV_CODEC_ID_ADPCM_MTAF:
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_WS:
|
||||
avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
|
||||
AV_SAMPLE_FMT_S16;
|
||||
break;
|
||||
default:
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
switch (avctx->codec->id) {
|
||||
case AV_CODEC_ID_ADPCM_AICA:
|
||||
case AV_CODEC_ID_ADPCM_IMA_DAT4:
|
||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||
case AV_CODEC_ID_ADPCM_IMA_WAV:
|
||||
case AV_CODEC_ID_ADPCM_4XM:
|
||||
case AV_CODEC_ID_ADPCM_XA:
|
||||
case AV_CODEC_ID_ADPCM_EA_R1:
|
||||
case AV_CODEC_ID_ADPCM_EA_R2:
|
||||
case AV_CODEC_ID_ADPCM_EA_R3:
|
||||
case AV_CODEC_ID_ADPCM_EA_XAS:
|
||||
case AV_CODEC_ID_ADPCM_THP:
|
||||
case AV_CODEC_ID_ADPCM_THP_LE:
|
||||
case AV_CODEC_ID_ADPCM_AFC:
|
||||
case AV_CODEC_ID_ADPCM_DTK:
|
||||
case AV_CODEC_ID_ADPCM_PSX:
|
||||
case AV_CODEC_ID_ADPCM_MTAF:
|
||||
case AV_CODEC_ID_ADPCM_ARGO:
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_WS:
|
||||
avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
|
||||
AV_SAMPLE_FMT_S16;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_MS:
|
||||
avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
|
||||
AV_SAMPLE_FMT_S16;
|
||||
break;
|
||||
default:
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -247,6 +290,65 @@ static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibb
|
||||
return (int16_t)c->predictor;
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
|
||||
{
|
||||
int step_index;
|
||||
int predictor;
|
||||
int sign, delta, diff, step;
|
||||
|
||||
step = ff_adpcm_step_table[c->step_index];
|
||||
step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
|
||||
step_index = av_clip(step_index, 0, 88);
|
||||
|
||||
sign = nibble & 8;
|
||||
delta = nibble & 7;
|
||||
diff = (delta * step) >> shift;
|
||||
predictor = c->predictor;
|
||||
if (sign) predictor -= diff;
|
||||
else predictor += diff;
|
||||
|
||||
c->predictor = av_clip_int16(predictor);
|
||||
c->step_index = step_index;
|
||||
|
||||
return (int16_t)c->predictor;
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
|
||||
{
|
||||
int step_index, step, delta, predictor;
|
||||
|
||||
step = ff_adpcm_step_table[c->step_index];
|
||||
|
||||
delta = step * (2 * nibble - 15);
|
||||
predictor = c->predictor + delta;
|
||||
|
||||
step_index = c->step_index + mtf_index_table[(unsigned)nibble];
|
||||
c->predictor = av_clip_int16(predictor >> 4);
|
||||
c->step_index = av_clip(step_index, 0, 88);
|
||||
|
||||
return (int16_t)c->predictor;
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
|
||||
{
|
||||
int step_index;
|
||||
int predictor;
|
||||
int step;
|
||||
|
||||
nibble = sign_extend(nibble & 0xF, 4);
|
||||
|
||||
step = ff_adpcm_ima_cunning_step_table[c->step_index];
|
||||
step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
|
||||
step_index = av_clip(step_index, 0, 60);
|
||||
|
||||
predictor = c->predictor + step * nibble;
|
||||
|
||||
c->predictor = av_clip_int16(predictor);
|
||||
c->step_index = step_index;
|
||||
|
||||
return c->predictor;
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
|
||||
{
|
||||
int nibble, step_index, predictor, sign, delta, diff, step, shift;
|
||||
@ -270,7 +372,7 @@ static inline int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitC
|
||||
return (int16_t)c->predictor;
|
||||
}
|
||||
|
||||
static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
|
||||
static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
|
||||
{
|
||||
int step_index;
|
||||
int predictor;
|
||||
@ -333,7 +435,7 @@ static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nib
|
||||
c->predictor = av_clip_intp2(predictor, 11);
|
||||
c->step_index = step_index;
|
||||
|
||||
return c->predictor << 4;
|
||||
return c->predictor * 16;
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
|
||||
@ -400,6 +502,41 @@ static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t ni
|
||||
return c->predictor;
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
|
||||
{
|
||||
int16_t index = c->step_index;
|
||||
uint32_t lookup_sample = ff_adpcm_step_table[index];
|
||||
int32_t sample = 0;
|
||||
|
||||
if (nibble & 0x40)
|
||||
sample += lookup_sample;
|
||||
if (nibble & 0x20)
|
||||
sample += lookup_sample >> 1;
|
||||
if (nibble & 0x10)
|
||||
sample += lookup_sample >> 2;
|
||||
if (nibble & 0x08)
|
||||
sample += lookup_sample >> 3;
|
||||
if (nibble & 0x04)
|
||||
sample += lookup_sample >> 4;
|
||||
if (nibble & 0x02)
|
||||
sample += lookup_sample >> 5;
|
||||
if (nibble & 0x01)
|
||||
sample += lookup_sample >> 6;
|
||||
if (nibble & 0x80)
|
||||
sample = -sample;
|
||||
|
||||
sample += c->predictor;
|
||||
sample = av_clip_int16(sample);
|
||||
|
||||
index += zork_index_table[(nibble >> 4) & 7];
|
||||
index = av_clip(index, 0, 88);
|
||||
|
||||
c->predictor = sample;
|
||||
c->step_index = index;
|
||||
|
||||
return sample;
|
||||
}
|
||||
|
||||
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
|
||||
const uint8_t *in, ADPCMChannelStatus *left,
|
||||
ADPCMChannelStatus *right, int channels, int sample_offset)
|
||||
@ -422,6 +559,10 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
|
||||
avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
|
||||
filter=0;
|
||||
}
|
||||
if (shift < 0) {
|
||||
avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
|
||||
shift = 0;
|
||||
}
|
||||
f0 = xa_adpcm_table[filter][0];
|
||||
f1 = xa_adpcm_table[filter][1];
|
||||
|
||||
@ -432,7 +573,7 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
|
||||
d = in[16+i+j*4];
|
||||
|
||||
t = sign_extend(d, 4);
|
||||
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
|
||||
s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
|
||||
s_2 = s_1;
|
||||
s_1 = av_clip_int16(s);
|
||||
out0[j] = s_1;
|
||||
@ -447,10 +588,14 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
|
||||
|
||||
shift = 12 - (in[5+i*2] & 15);
|
||||
filter = in[5+i*2] >> 4;
|
||||
if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
|
||||
if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
|
||||
avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
|
||||
filter=0;
|
||||
}
|
||||
if (shift < 0) {
|
||||
avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
|
||||
shift = 0;
|
||||
}
|
||||
|
||||
f0 = xa_adpcm_table[filter][0];
|
||||
f1 = xa_adpcm_table[filter][1];
|
||||
@ -459,7 +604,7 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
|
||||
d = in[16+i+j*4];
|
||||
|
||||
t = sign_extend(d >> 4, 4);
|
||||
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
|
||||
s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
|
||||
s_2 = s_1;
|
||||
s_1 = av_clip_int16(s);
|
||||
out1[j] = s_1;
|
||||
@ -537,8 +682,25 @@ static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_
|
||||
}
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int control, int shift)
|
||||
{
|
||||
int sample = nibble * (1 << shift);
|
||||
|
||||
if (control & 0x04)
|
||||
sample += (8 * cs->sample1) - (4 * cs->sample2);
|
||||
else
|
||||
sample += 4 * cs->sample1;
|
||||
|
||||
sample = av_clip_int16(sample >> 2);
|
||||
|
||||
cs->sample2 = cs->sample1;
|
||||
cs->sample1 = sample;
|
||||
|
||||
return sample;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of samples that will be decoded from the packet.
|
||||
* Get the number of samples (per channel) that will be decoded from the packet.
|
||||
* In one case, this is actually the maximum number of samples possible to
|
||||
* decode with the given buf_size.
|
||||
*
|
||||
@ -575,14 +737,24 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
||||
return 0;
|
||||
nb_samples = 64;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_ARGO:
|
||||
if (buf_size < 17 * ch)
|
||||
return 0;
|
||||
nb_samples = 32;
|
||||
break;
|
||||
/* simple 4-bit adpcm */
|
||||
case AV_CODEC_ID_ADPCM_CT:
|
||||
case AV_CODEC_ID_ADPCM_IMA_APC:
|
||||
case AV_CODEC_ID_ADPCM_IMA_CUNNING:
|
||||
case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
|
||||
case AV_CODEC_ID_ADPCM_IMA_OKI:
|
||||
case AV_CODEC_ID_ADPCM_IMA_WS:
|
||||
case AV_CODEC_ID_ADPCM_YAMAHA:
|
||||
case AV_CODEC_ID_ADPCM_AICA:
|
||||
case AV_CODEC_ID_ADPCM_IMA_SSI:
|
||||
case AV_CODEC_ID_ADPCM_IMA_APM:
|
||||
case AV_CODEC_ID_ADPCM_IMA_ALP:
|
||||
case AV_CODEC_ID_ADPCM_IMA_MTF:
|
||||
nb_samples = buf_size * 2 / ch;
|
||||
break;
|
||||
}
|
||||
@ -741,6 +913,9 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
||||
case AV_CODEC_ID_ADPCM_PSX:
|
||||
nb_samples = buf_size / (16 * ch) * 28;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_ZORK:
|
||||
nb_samples = buf_size / ch;
|
||||
break;
|
||||
}
|
||||
|
||||
/* validate coded sample count */
|
||||
@ -827,8 +1002,8 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
for (m = 0; m < 64; m += 2) {
|
||||
int byte = bytestream2_get_byteu(&gb);
|
||||
samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3);
|
||||
samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3);
|
||||
samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
|
||||
samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -924,42 +1099,66 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
{
|
||||
int block_predictor;
|
||||
|
||||
block_predictor = bytestream2_get_byteu(&gb);
|
||||
if (block_predictor > 6) {
|
||||
av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
|
||||
block_predictor);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
|
||||
c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
|
||||
if (st) {
|
||||
if (avctx->channels > 2) {
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
samples = samples_p[channel];
|
||||
block_predictor = bytestream2_get_byteu(&gb);
|
||||
if (block_predictor > 6) {
|
||||
av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
|
||||
channel, block_predictor);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
|
||||
c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
|
||||
c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
*samples++ = c->status[channel].sample2;
|
||||
*samples++ = c->status[channel].sample1;
|
||||
for(n = (nb_samples - 2) >> 1; n > 0; n--) {
|
||||
int byte = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
|
||||
*samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
block_predictor = bytestream2_get_byteu(&gb);
|
||||
if (block_predictor > 6) {
|
||||
av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
|
||||
av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
|
||||
block_predictor);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
|
||||
c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
|
||||
}
|
||||
c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
if (st){
|
||||
c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
}
|
||||
c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
|
||||
c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
|
||||
if (st) {
|
||||
block_predictor = bytestream2_get_byteu(&gb);
|
||||
if (block_predictor > 6) {
|
||||
av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
|
||||
block_predictor);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
|
||||
c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
|
||||
}
|
||||
c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
if (st){
|
||||
c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
}
|
||||
|
||||
c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
|
||||
*samples++ = c->status[0].sample2;
|
||||
if (st) *samples++ = c->status[1].sample2;
|
||||
*samples++ = c->status[0].sample1;
|
||||
if (st) *samples++ = c->status[1].sample1;
|
||||
for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
|
||||
int byte = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
|
||||
*samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
|
||||
*samples++ = c->status[0].sample2;
|
||||
if (st) *samples++ = c->status[1].sample2;
|
||||
*samples++ = c->status[0].sample1;
|
||||
if (st) *samples++ = c->status[1].sample1;
|
||||
for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
|
||||
int byte = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
|
||||
*samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1105,14 +1304,48 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_APC:
|
||||
while (bytestream2_get_bytes_left(&gb) > 0) {
|
||||
for (n = nb_samples >> (1 - st); n > 0; n--) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
|
||||
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_SSI:
|
||||
for (n = nb_samples >> (1 - st); n > 0; n--) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
|
||||
*samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_APM:
|
||||
for (n = nb_samples / 2; n > 0; n--) {
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
|
||||
samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
|
||||
}
|
||||
samples += avctx->channels;
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_ALP:
|
||||
for (n = nb_samples / 2; n > 0; n--) {
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
|
||||
samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
|
||||
}
|
||||
samples += avctx->channels;
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_CUNNING:
|
||||
for (n = 0; n < nb_samples / 2; n++) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v & 0x0F);
|
||||
*samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v >> 4);
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_OKI:
|
||||
while (bytestream2_get_bytes_left(&gb) > 0) {
|
||||
for (n = nb_samples >> (1 - st); n > 0; n--) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
|
||||
*samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
|
||||
@ -1198,8 +1431,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
for (i=0; i<=st; i++)
|
||||
for (i=0; i<=st; i++) {
|
||||
c->status[i].predictor = bytestream2_get_le32u(&gb);
|
||||
if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (n = nb_samples >> (1 - st); n > 0; n--) {
|
||||
int byte = bytestream2_get_byteu(&gb);
|
||||
@ -1246,8 +1482,8 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
for (count2 = 0; count2 < 28; count2++) {
|
||||
byte = bytestream2_get_byteu(&gb);
|
||||
next_left_sample = sign_extend(byte >> 4, 4) << shift_left;
|
||||
next_right_sample = sign_extend(byte, 4) << shift_right;
|
||||
next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
|
||||
next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
|
||||
|
||||
next_left_sample = (next_left_sample +
|
||||
(current_left_sample * coeff1l) +
|
||||
@ -1286,7 +1522,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if (st) byte[1] = bytestream2_get_byteu(&gb);
|
||||
for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
|
||||
for(channel = 0; channel < avctx->channels; channel++) {
|
||||
int sample = sign_extend(byte[channel] >> i, 4) << shift[channel];
|
||||
int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
|
||||
sample = (sample +
|
||||
c->status[channel].sample1 * coeff[channel][0] +
|
||||
c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
|
||||
@ -1347,10 +1583,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
for (count2=0; count2<28; count2++) {
|
||||
if (count2 & 1)
|
||||
next_sample = sign_extend(byte, 4) << shift;
|
||||
next_sample = (unsigned)sign_extend(byte, 4) << shift;
|
||||
else {
|
||||
byte = bytestream2_get_byte(&gb);
|
||||
next_sample = sign_extend(byte >> 4, 4) << shift;
|
||||
next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
|
||||
}
|
||||
|
||||
next_sample += (current_sample * coeff1) +
|
||||
@ -1401,11 +1637,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int level, pred;
|
||||
int byte = bytestream2_get_byteu(&gb);
|
||||
|
||||
level = sign_extend(byte >> 4, 4) << shift[n];
|
||||
level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
|
||||
pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
|
||||
s[0] = av_clip_int16((level + pred + 0x80) >> 8);
|
||||
|
||||
level = sign_extend(byte, 4) << shift[n];
|
||||
level = sign_extend(byte, 4) * (1 << shift[n]);
|
||||
pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
|
||||
s[1] = av_clip_int16((level + pred + 0x80) >> 8);
|
||||
}
|
||||
@ -1444,8 +1680,8 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
for (n = nb_samples >> (1 - st); n > 0; n--) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
|
||||
*samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4, 3);
|
||||
*samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf, 3);
|
||||
*samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
|
||||
*samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_CT:
|
||||
@ -1562,8 +1798,8 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
sampledat = sign_extend(byte >> 4, 4);
|
||||
}
|
||||
|
||||
sampledat = ((prev1 * factor1 + prev2 * factor2) +
|
||||
((sampledat * scale) << 11)) >> 11;
|
||||
sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
|
||||
sampledat * scale;
|
||||
*samples = av_clip_int16(sampledat);
|
||||
prev2 = prev1;
|
||||
prev1 = *samples++;
|
||||
@ -1625,8 +1861,8 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int byte = bytestream2_get_byteu(&gb);
|
||||
int index = (byte >> 4) & 7;
|
||||
unsigned int exp = byte & 0x0F;
|
||||
int factor1 = table[ch][index * 2];
|
||||
int factor2 = table[ch][index * 2 + 1];
|
||||
int64_t factor1 = table[ch][index * 2];
|
||||
int64_t factor2 = table[ch][index * 2 + 1];
|
||||
|
||||
/* Decode 14 samples. */
|
||||
for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
|
||||
@ -1640,7 +1876,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
sampledat = ((c->status[ch].sample1 * factor1
|
||||
+ c->status[ch].sample2 * factor2) >> 11) + (sampledat << exp);
|
||||
+ c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
|
||||
*samples = av_clip_int16(sampledat);
|
||||
c->status[ch].sample2 = c->status[ch].sample1;
|
||||
c->status[ch].sample1 = *samples++;
|
||||
@ -1687,7 +1923,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
else
|
||||
sampledat = sign_extend(byte >> 4, 4);
|
||||
|
||||
sampledat = (((sampledat << 12) >> (header & 0xf)) << 6) + prev;
|
||||
sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
|
||||
*samples++ = av_clip_int16(sampledat >> 6);
|
||||
c->status[channel].sample2 = c->status[channel].sample1;
|
||||
c->status[channel].sample1 = sampledat;
|
||||
@ -1724,7 +1960,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
scale = sign_extend(byte, 4);
|
||||
}
|
||||
|
||||
scale = scale << 12;
|
||||
scale = scale * (1 << 12);
|
||||
sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
|
||||
}
|
||||
*samples++ = av_clip_int16(sample);
|
||||
@ -1734,7 +1970,64 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_ARGO:
|
||||
/*
|
||||
* The format of each block:
|
||||
* uint8_t left_control;
|
||||
* uint4_t left_samples[nb_samples];
|
||||
* ---- and if stereo ----
|
||||
* uint8_t right_control;
|
||||
* uint4_t right_samples[nb_samples];
|
||||
*
|
||||
* Format of the control byte:
|
||||
* MSB [SSSSDRRR] LSB
|
||||
* S = (Shift Amount - 2)
|
||||
* D = Decoder flag.
|
||||
* R = Reserved
|
||||
*
|
||||
* Each block relies on the previous two samples of each channel.
|
||||
* They should be 0 initially.
|
||||
*/
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
int control, shift;
|
||||
|
||||
samples = samples_p[channel];
|
||||
cs = c->status + channel;
|
||||
|
||||
/* Get the control byte and decode the samples, 2 at a time. */
|
||||
control = bytestream2_get_byteu(&gb);
|
||||
shift = (control >> 4) + 2;
|
||||
|
||||
for (n = 0; n < nb_samples / 2; n++) {
|
||||
int sample = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_argo_expand_nibble(cs, sign_extend(sample >> 4, 4), control, shift);
|
||||
*samples++ = adpcm_argo_expand_nibble(cs, sign_extend(sample >> 0, 4), control, shift);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_ZORK:
|
||||
if (!c->has_status) {
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
c->status[channel].predictor = 0;
|
||||
c->status[channel].step_index = 0;
|
||||
}
|
||||
c->has_status = 1;
|
||||
}
|
||||
for (n = 0; n < nb_samples * avctx->channels; n++) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_MTF:
|
||||
for (n = nb_samples / 2; n > 0; n--) {
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
|
||||
samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
|
||||
}
|
||||
samples += avctx->channels;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
av_assert0(0); // unsupported codec_id should not happen
|
||||
}
|
||||
@ -1788,6 +2081,7 @@ ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm,
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
|
||||
@ -1798,19 +2092,24 @@ ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3,
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_s16, adpcm_ms, "ADPCM Microsoft");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
|
||||
@ -1821,3 +2120,4 @@ ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le,
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork");
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user