1
0
mirror of https://github.com/jellyfin/jellyfin-ffmpeg.git synced 2025-04-18 20:24:05 +03:00

New upstream version 3.2

This commit is contained in:
Andreas Cadhalpun 2016-10-29 18:39:40 +02:00
parent a5afad7384
commit a518647af4
925 changed files with 54825 additions and 16991 deletions

4
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,4 @@
# Note to Github users
Patches should be submitted to the [ffmpeg-devel mailing list](https://ffmpeg.org/mailman/listinfo/ffmpeg-devel) using `git format-patch` or `git send-email`. Github pull requests should be avoided because they are not part of our review process and **will be ignored**.
See [https://ffmpeg.org/developer.html#Contributing](https://ffmpeg.org/developer.html#Contributing) for more information.

149
Changelog
View File

@ -1,118 +1,43 @@
Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
version 3.1.5:
- avformat/mxfdec: Check size to avoid integer overflow in mxf_read_utf16_string()
- avcodec/mpegvideo_enc: Clear mmx state in ff_mpv_reallocate_putbitbuffer()
- avcodec/utils: Clear MMX state before returning from avcodec_default_execute*()
- doc/examples/demuxing_decoding: Drop AVFrame->pts use
- libopenjpegenc: fix out-of-bounds reads when filling the edges
- libopenjpegenc: stop reusing image data buffer for openjpeg 2
- configure: fix detection of libopenjpeg
- doc: fix various typos and grammar errors
- avformat/utils: Update codec_id before using it in the parser init
- cmdutils: fix typos
- lavfi: fix typos
- lavc: fix typos
- tools: fix grammar error
- ffmpeg: remove unused and errorneous AVFrame timestamp check
- Support for MIPS cpu P6600
- avutil/mips/generic_macros_msa: rename macro variable which causes segfault for mips r6
version 3.1.4:
- avformat/avidec: Check nb_streams in read_gab2_sub()
- avformat/avidec: Remove ancient assert
- avfilter/vf_colorspace: fix range for output colorspace option
- lavc/mediacodecdec_h264: fix SODB escaping
- avcodec/nvenc: fix const options for hevc gpu setting
- avformat/avidec: Fix memleak with dv in avi
- lavc/movtextdec.c: Avoid infinite loop on invalid data.
- avcodec/ansi: Check dimensions
- avcodec/cavsdsp: use av_clip_uint8() for idct
- avformat/movenc: Check packet in mov_write_single_packet() too
- avformat/movenc: Factor check_pkt() out
- avformat/utils: fix timebase error in avformat_seek_file()
- avcodec/g726: Add missing ADDB output mask
- avcodec/avpacket: clear side_data_elems
- avformat/movenc: Check first DTS similar to dts difference
- avcodec/ccaption_dec: Use simple array instead of AVBuffer
- avcodec/svq3: Reintroduce slice_type
- avformat/mov: Fix potential integer overflow in mov_read_keys
- swscale/swscale_unscaled: Try to fix Rgb16ToPlanarRgb16Wrapper() with slices
- swscale/swscale_unscaled: Fix packed_16bpc_bswap() with slices
- avformat/avidec: Fix infinite loop in avi_read_nikon()
- lavf/utils: Avoid an overflow for huge negative durations.
- avformat/hls: Fix handling of EXT-X-BYTERANGE streams over 2GB
- lavc/avpacket: Fix undefined behaviour, do not pass a null pointer to memcpy().
- lavc/mjpegdec: Do not skip reading quantization tables.
- cmdutils: fix implicit declaration of SetDllDirectory function
version 3.1.3:
- examples/demuxing_decoding: convert to codecpar
- avcodec/exr: Check tile positions
- avcodec/aacenc: Tighter input checks
- avformat/wtvdec: Check pointer before use
- libavcodec/wmalosslessdec: Check the remaining bits
- avcodec/adpcm: Fix adpcm_ima_wav padding
- avcodec/svq3: fix slice size check
- avcodec/diracdec: Check numx/y
- avcodec/h2645_parse: fix nal size
- avcodec/h2645_parse: Use get_nalsize() in ff_h2645_packet_split()
- h2645_parse: only read avc length code at the correct position
- h2645_parse: don't overread AnnexB NALs within an avc stream
- avcodec/h264_parser: Factor get_avc_nalsize() out
- avcodec/cfhd: Increase minimum band dimension to 3
- avcodec/indeo2: check ctab
- avformat/swfdec: Fix inflate() error code check
- avcodec/rawdec: Fix bits_per_coded_sample checks
- vcodec/h2645_parse: Clear buffer padding
- avcodec/h2645: Fix NAL unit padding
- avfilter/drawutils: Fix single plane with alpha
- cmdutils: check for SetDllDirectory() availability
version 3.1.2:
- cmdutils: remove the current working directory from the DLL search path on win32
- avcodec/rawdec: Fix palette handling with changing palettes
- avcodec/raw: Fix decoding of ilacetest.mov
- avformat/mov: Enable mp3 parsing if a packet needs it
- avformat/hls: Use an array instead of stream offset for stream mapping
- avformat/hls: Sync starting segment across variants on live streams
- avformat/hls: Fix regression with ranged media segments
- avcodec/ffv1enc: Fix assertion failure with non zero bits per sample
- avfilter/af_hdcd: small fix in af_hdcd.c where gain was not being adjusted for "attenuate slowly"
- avformat/oggdec: Fix integer overflow with invalid pts
- ffplay: Fix invalid array index
- avcodec/alacenc: allocate bigger packets (cherry picked from commit 82b84c71b009884c8d041361027718b19922c76d)
- libavcodec/dnxhd: Enable 12-bit DNxHR support.
- lavc/vaapi_encode_h26x: Fix a crash if "." is not the decimal separator.
- jni: Return ENOSYS on unsupported platforms
- lavu/hwcontext_vaapi: Fix compilation if VA_FOURCC_ABGR is not defined.
- avcodec/vp9_parser: Check the input frame sizes for being consistent
- avformat/flvdec: parse keyframe before a\v stream was created add_keyframes_index() when stream created or keyframe parsed
- avformat/flvdec: splitting add_keyframes_index() out from parse_keyframes_index()
- libavformat/rtpdec_asf: zero initialize the AVIOContext struct
- libavutil/opt: Small bugfix in example.
- libx264: Increase x264 opts character limit to 4096
- avcodec/h264_parser: Set sps/pps_ref
- librtmp: Avoid an infiniloop setting connection arguments
- avformat/oggparsevp8: fix pts calculation on pages ending with an invisible frame
- lavc/Makefile: Fix standalone compilation of the svq3 decoder.
- lavf/vplayerdec: Improve auto-detection.
- lavc/mediacodecdec_h264: properly convert extradata to annex-b
- Revert "configure: Enable GCC vectorization on ≥4.9 on x86"
version 3.1.1:
- doc/APIchanges: document the lavu/lavf field moves
- avformat/avformat: Move new field to the end of AVStream
- avformat/utils: update deprecated AVStream->codec when the context is updated
- avutil/frame: Move new field to the end of AVFrame
- libavcodec/exr : fix decoding piz float file.
- avformat/mov: Check sample size
- lavfi: Move new field to the end of AVFilterContext
- lavfi: Move new field to the end of AVFilterLink
- ffplay: Fix usage of private lavfi API
- lavc/mediacodecdec_h264: add missing NAL headers to SPS/PPS buffers
- lavc/pnm_parser: disable parsing for text based PNMs
version 3.2:
- libopenmpt demuxer
- tee protocol
- Changed metadata print option to accept general urls
- Alias muxer for Ogg Video (.ogv)
- VP8 in Ogg muxing
- curves filter doesn't automatically insert points at x=0 and x=1 anymore
- 16-bit support in curves filter and selectivecolor filter
- OpenH264 decoder wrapper
- MediaCodec H.264/HEVC/MPEG-4/VP8/VP9 hwaccel
- True Audio (TTA) muxer
- crystalizer audio filter
- acrusher audio filter
- bitplanenoise video filter
- floating point support in als decoder
- fifo muxer
- maskedclamp filter
- hysteresis filter
- lut2 filter
- yuvtestsrc filter
- CUDA CUVID H.263/VP8/VP9/10 bit HEVC (Dithered) Decoding
- vaguedenoiser filter
- added threads option per filter instance
- weave filter
- gblur filter
- avgblur filter
- sobel and prewitt filter
- MediaCodec HEVC/MPEG-4/VP8/VP9 decoding
- Meridian Lossless Packing (MLP) / TrueHD encoder
- Non-Local Means (nlmeans) denoising filter
- sdl2 output device and ffplay support
- sdl1 output device and sdl1 support removed
- extended mov edit list support
- libfaac encoder removed
- Matroska muxer now writes CRC32 elements by default in all Level 1 elements
- sidedata video and asidedata audio filter
- Changed mapping of rtp MIME type G726 to codec g726le.
version 3.1:

View File

@ -115,8 +115,6 @@ The Fraunhofer FDK AAC and OpenSSL libraries are under licenses which are
incompatible with the GPLv2 and v3. To the best of our knowledge, they are
compatible with the LGPL.
The FAAC library is incompatible with all versions of GPL and LGPL.
The NVENC library, while its header file is licensed under the compatible MIT
license, requires a proprietary binary blob at run time, and is deemed to be
incompatible with the GPL. We are not certain if it is compatible with the

View File

@ -43,7 +43,7 @@ Miscellaneous Areas
===================
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Lou Logan
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
presets Robert Swain
metadata subsystem Aurelien Jacobs
release management Michael Niedermayer
@ -78,6 +78,7 @@ Other:
eval.c, eval.h Michael Niedermayer
float_dsp Loren Merritt
hash Reimar Doeffinger
hwcontext_cuda* Timo Rothenpieler
intfloat* Michael Niedermayer
integer.c, integer.h Michael Niedermayer
lzo Reimar Doeffinger
@ -136,10 +137,11 @@ Codecs:
8svx.c Jaikrishnan Menon
aacenc*, aaccoder.c Rostislav Pehlivanov
alacenc.c Jaikrishnan Menon
alsdec.c Thilo Borgmann
alsdec.c Thilo Borgmann, Umair Khan
ass* Aurelien Jacobs
asv* Michael Niedermayer
atrac3plus* Maxim Poliakovski
audiotoolbox* Rodger Combs
bgmc.c, bgmc.h Thilo Borgmann
binkaudio.c Peter Ross
cavs* Stefan Gehrer
@ -147,7 +149,7 @@ Codecs:
celp_filters.* Vitor Sessak
cinepak.c Roberto Togni
cinepakenc.c Rl / Aetey G.T. AB
ccaption_dec.c Anshul Maheshwari
ccaption_dec.c Anshul Maheshwari, Aman Gupta
cljr Alex Beregszaszi
cpia.c Stephan Hilb
crystalhd.c Philip Langdale
@ -164,6 +166,7 @@ Codecs:
exif.c, exif.h Thilo Borgmann
ffv1* Michael Niedermayer
ffwavesynth.c Nicolas George
fifo.c Jan Sebechlebsky
flicvideo.c Mike Melanson
g722.c Martin Storsjo
g726.c Roman Shaposhnik
@ -195,7 +198,7 @@ Codecs:
mdec.c Michael Niedermayer
mimic.c Ramiro Polla
mjpeg*.c Michael Niedermayer
mlp* Ramiro Polla
mlp* Ramiro Polla, Jai Luthra
mmvideo.c Peter Ross
mpeg12.c, mpeg12data.h Michael Niedermayer
mpegvideo.c, mpegvideo.h Michael Niedermayer
@ -284,6 +287,7 @@ libavdevice
pulse_audio_enc.c Lukasz Marek
qtkit.m Thilo Borgmann
sdl Stefano Sabatini
sdl2.c Josh de Kock
v4l2.c Giorgio Vazzana
vfwcap.c Ramiro Polla
xv.c Lukasz Marek
@ -294,6 +298,8 @@ libavfilter
Generic parts:
graphdump.c Nicolas George
motion_estimation.c Davinder Singh
Filters:
f_drawgraph.c Paul B Mahol
af_adelay.c Paul B Mahol
@ -308,6 +314,7 @@ Filters:
af_chorus.c Paul B Mahol
af_compand.c Paul B Mahol
af_firequalizer.c Muhammad Faiz
af_hdcd.c Burt P.
af_ladspa.c Paul B Mahol
af_loudnorm.c Kyle Swanson
af_pan.c Nicolas George
@ -334,6 +341,8 @@ Filters:
vf_il.c Paul B Mahol
vf_lenscorrection.c Daniel Oberhoff
vf_mergeplanes.c Paul B Mahol
vf_mestimate.c Davinder Singh
vf_minterpolate.c Davinder Singh
vf_neighbor.c Paul B Mahol
vf_psnr.c Paul B Mahol
vf_random.c Paul B Mahol
@ -372,7 +381,7 @@ Muxers/Demuxers:
astdec.c Paul B Mahol
astenc.c James Almer
avi* Michael Niedermayer
avisynth.c AvxSynth Team (avxsynth.testing at gmail dot com)
avisynth.c Stephen Hutchinson
avr.c Paul B Mahol
bink.c Peter Ross
brstm.c Paul B Mahol
@ -391,7 +400,7 @@ Muxers/Demuxers:
gxf.c Reimar Doeffinger
gxfenc.c Baptiste Coudurier
hls.c Anssi Hannula
hls encryption (hlsenc.c) Christian Suloway
hls encryption (hlsenc.c) Christian Suloway, Steven Liu
idcin.c Mike Melanson
idroqdec.c Mike Melanson
iff.c Jaikrishnan Menon
@ -402,6 +411,7 @@ Muxers/Demuxers:
jvdec.c Peter Ross
libmodplug.c Clément Bœsch
libnut.c Oded Shimon
libopenmpt.c Josh de Kock
lmlm4.c Ivo van Poorten
lvfdec.c Paul B Mahol
lxfdec.c Tomas Härdin
@ -520,7 +530,6 @@ Releases
2.7 Michael Niedermayer
2.6 Michael Niedermayer
2.5 Michael Niedermayer
2.4 Michael Niedermayer
If you want to maintain an older release, please contact us
@ -546,6 +555,7 @@ Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
Philip Langdale 5DC5 8D66 5FBA 3A43 18EC 045E F8D6 B194 6A75 682E

View File

@ -45,5 +45,4 @@ GPL. Please refer to the LICENSE file for detailed information.
Patches should be submitted to the ffmpeg-devel mailing list using
`git format-patch` or `git send-email`. Github pull requests should be
avoided because they are not part of our review process. Few developers
follow pull requests so they will likely be ignored.
avoided because they are not part of our review process and will be ignored.

View File

@ -1 +1 @@
3.1.5
3.2

View File

@ -1,10 +1,10 @@
┌────────────────────────────────────────┐
│ RELEASE NOTES for FFmpeg 3.1 "Laplace" │
│ RELEASE NOTES for FFmpeg 3.2 "Hypatia" │
└────────────────────────────────────────┘
The FFmpeg Project proudly presents FFmpeg 3.1 "Laplace", about 4
months after the release of FFmpeg 3.0.
The FFmpeg Project proudly presents FFmpeg 3.2 "Hypatia", about 4
months after the release of FFmpeg 3.1.
A complete Changelog is available at the root of the project, and the
complete Git history on http://source.ffmpeg.org.

View File

@ -1 +1 @@
3.1.5
3.2

View File

@ -61,7 +61,7 @@
#include <sys/time.h>
#include <sys/resource.h>
#endif
#if HAVE_SETDLLDIRECTORY
#ifdef _WIN32
#include <windows.h>
#endif
@ -112,7 +112,7 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
void init_dynload(void)
{
#if HAVE_SETDLLDIRECTORY
#ifdef _WIN32
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
* current working directory from the DLL search path as a security pre-caution. */
SetDllDirectory("");
@ -1579,10 +1579,11 @@ int show_encoders(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
{
AVBitStreamFilter *bsf = NULL;
const AVBitStreamFilter *bsf = NULL;
void *opaque = NULL;
printf("Bitstream filters:\n");
while ((bsf = av_bitstream_filter_next(bsf)))
while ((bsf = av_bsf_next(&opaque)))
printf("%s\n", bsf->name);
printf("\n");
return 0;
@ -1993,7 +1994,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
codec = s->oformat ? avcodec_find_encoder(codec_id)
: avcodec_find_decoder(codec_id);
switch (st->codec->codec_type) {
switch (st->codecpar->codec_type) {
case AVMEDIA_TYPE_VIDEO:
prefix = 'v';
flags |= AV_OPT_FLAG_VIDEO_PARAM;
@ -2051,7 +2052,7 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
return NULL;
}
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codec->codec_id,
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id,
s, s->streams[i], NULL);
return opts;
}

View File

@ -83,12 +83,7 @@ COMPILE_HOSTC = $(call COMPILE,HOSTCC)
%.h.c:
$(Q)echo '#include "$*.h"' >$@
%.ver: %.v
$(Q)sed 's/$$MAJOR/$($(basename $(@F))_VERSION_MAJOR)/' $^ | sed -e 's/:/:\
/' -e 's/; /;\
/g' > $@
%.c %.h: TAG = GEN
%.c %.h %.ver: TAG = GEN
# Dummy rule to stop make trying to rebuild removed or renamed headers
%.h:
@ -152,7 +147,7 @@ $(TOOLOBJS): | tools
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ver-sol2 *.ho *.gcno *.gcda *$(DEFAULT_YASMD).asm
CLEANSUFFIXES = *.d *.o *~ *.h.c *.gcda *.gcno *.map *.ver *.ho *$(DEFAULT_YASMD).asm
DISTCLEANSUFFIXES = *.pc
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a

View File

@ -75,54 +75,149 @@ enum {AVS_PLANAR_Y=1<<0,
AVS_PLANAR_B_ALIGNED=AVS_PLANAR_B|AVS_PLANAR_ALIGNED};
// Colorspace properties.
enum {AVS_CS_BGR = 1<<28,
AVS_CS_YUV = 1<<29,
AVS_CS_INTERLEAVED = 1<<30,
AVS_CS_PLANAR = 1<<31,
enum {
AVS_CS_YUVA = 1 << 27,
AVS_CS_BGR = 1 << 28,
AVS_CS_YUV = 1 << 29,
AVS_CS_INTERLEAVED = 1 << 30,
AVS_CS_PLANAR = 1 << 31,
AVS_CS_SHIFT_SUB_WIDTH = 0,
AVS_CS_SHIFT_SUB_HEIGHT = 8,
AVS_CS_SHIFT_SAMPLE_BITS = 16,
AVS_CS_SHIFT_SUB_WIDTH = 0,
AVS_CS_SHIFT_SUB_HEIGHT = 8,
AVS_CS_SHIFT_SAMPLE_BITS = 16,
AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH,
AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24
AVS_CS_SUB_WIDTH_2 = 0 << AVS_CS_SHIFT_SUB_WIDTH, // YV12, I420, YV16
AVS_CS_SUB_WIDTH_4 = 1 << AVS_CS_SHIFT_SUB_WIDTH, // YUV9, YV411
AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH,
AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24
AVS_CS_SUB_WIDTH_2 = 0 << AVS_CS_SHIFT_SUB_WIDTH, // YV12, I420, YV16
AVS_CS_SUB_WIDTH_4 = 1 << AVS_CS_SHIFT_SUB_WIDTH, // YUV9, YV411
AVS_CS_VPLANEFIRST = 1 << 3, // YV12, YV16, YV24, YV411, YUV9
AVS_CS_UPLANEFIRST = 1 << 4, // I420
AVS_CS_VPLANEFIRST = 1 << 3, // YV12, YV16, YV24, YV411, YUV9
AVS_CS_UPLANEFIRST = 1 << 4, // I420
AVS_CS_SUB_HEIGHT_MASK = 7 << AVS_CS_SHIFT_SUB_HEIGHT,
AVS_CS_SUB_HEIGHT_1 = 3 << AVS_CS_SHIFT_SUB_HEIGHT, // YV16, YV24, YV411
AVS_CS_SUB_HEIGHT_2 = 0 << AVS_CS_SHIFT_SUB_HEIGHT, // YV12, I420
AVS_CS_SUB_HEIGHT_4 = 1 << AVS_CS_SHIFT_SUB_HEIGHT, // YUV9
AVS_CS_SUB_HEIGHT_MASK = 7 << AVS_CS_SHIFT_SUB_HEIGHT,
AVS_CS_SUB_HEIGHT_1 = 3 << AVS_CS_SHIFT_SUB_HEIGHT, // YV16, YV24, YV411
AVS_CS_SUB_HEIGHT_2 = 0 << AVS_CS_SHIFT_SUB_HEIGHT, // YV12, I420
AVS_CS_SUB_HEIGHT_4 = 1 << AVS_CS_SHIFT_SUB_HEIGHT, // YUV9
AVS_CS_SAMPLE_BITS_MASK = 7 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_8 = 0 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_16 = 1 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_32 = 2 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_MASK = 7 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_8 = 0 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_10 = 5 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_12 = 6 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_14 = 7 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_16 = 1 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_32 = 2 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_PLANAR_MASK = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_BGR | AVS_CS_YUVA | AVS_CS_SAMPLE_BITS_MASK | AVS_CS_SUB_HEIGHT_MASK | AVS_CS_SUB_WIDTH_MASK,
AVS_CS_PLANAR_FILTER = ~(AVS_CS_VPLANEFIRST | AVS_CS_UPLANEFIRST),
AVS_CS_RGB_TYPE = 1 << 0,
AVS_CS_RGBA_TYPE = 1 << 1,
AVS_CS_GENERIC_YUV420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // 4:2:0 planar
AVS_CS_GENERIC_YUV422 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // 4:2:2 planar
AVS_CS_GENERIC_YUV444 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1, // 4:4:4 planar
AVS_CS_GENERIC_Y = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV, // Y only (4:0:0)
AVS_CS_GENERIC_RGBP = AVS_CS_PLANAR | AVS_CS_BGR | AVS_CS_RGB_TYPE, // planar RGB
AVS_CS_GENERIC_RGBAP = AVS_CS_PLANAR | AVS_CS_BGR | AVS_CS_RGBA_TYPE, // planar RGBA
AVS_CS_GENERIC_YUVA420 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // 4:2:0:A planar
AVS_CS_GENERIC_YUVA422 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // 4:2:2:A planar
AVS_CS_GENERIC_YUVA444 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1 }; // 4:4:4:A planar
AVS_CS_PLANAR_MASK = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_BGR | AVS_CS_SAMPLE_BITS_MASK | AVS_CS_SUB_HEIGHT_MASK | AVS_CS_SUB_WIDTH_MASK,
AVS_CS_PLANAR_FILTER = ~( AVS_CS_VPLANEFIRST | AVS_CS_UPLANEFIRST )};
// Specific colorformats
enum {
AVS_CS_UNKNOWN = 0,
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
AVS_CS_BGR24 = AVS_CS_RGB_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED,
AVS_CS_BGR32 = AVS_CS_RGBA_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED,
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
// AVS_CS_YV12 = 1<<3 Reserved
// AVS_CS_I420 = 1<<4 Reserved
AVS_CS_RAW32 = 1<<5 | AVS_CS_INTERLEAVED,
AVS_CS_YV24 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1, // YVU 4:4:4 planar
AVS_CS_YV16 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // YVU 4:2:2 planar
AVS_CS_YV12 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YVU 4:2:0 planar
AVS_CS_YV24 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_8, // YVU 4:4:4 planar
AVS_CS_YV16 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_8, // YVU 4:2:2 planar
AVS_CS_YV12 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_8, // YVU 4:2:0 planar
AVS_CS_I420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_UPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YUV 4:2:0 planar
AVS_CS_IYUV = AVS_CS_I420,
AVS_CS_YV411 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:1 planar
AVS_CS_YUV9 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_4 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:0 planar
AVS_CS_Y8 = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 // Y 4:0:0 planar
AVS_CS_Y8 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_8, // Y 4:0:0 planar
//-------------------------
// AVS16: new planar constants go live! Experimental PF 160613
// 10-12-14 bit + planar RGB + BRG48/64 160725
AVS_CS_YUV444P10 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_10, // YUV 4:4:4 10bit samples
AVS_CS_YUV422P10 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_10, // YUV 4:2:2 10bit samples
AVS_CS_YUV420P10 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_10, // YUV 4:2:0 10bit samples
AVS_CS_Y10 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_10, // Y 4:0:0 10bit samples
AVS_CS_YUV444P12 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_12, // YUV 4:4:4 12bit samples
AVS_CS_YUV422P12 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_12, // YUV 4:2:2 12bit samples
AVS_CS_YUV420P12 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_12, // YUV 4:2:0 12bit samples
AVS_CS_Y12 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_12, // Y 4:0:0 12bit samples
AVS_CS_YUV444P14 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_14, // YUV 4:4:4 14bit samples
AVS_CS_YUV422P14 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_14, // YUV 4:2:2 14bit samples
AVS_CS_YUV420P14 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_14, // YUV 4:2:0 14bit samples
AVS_CS_Y14 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_14, // Y 4:0:0 14bit samples
AVS_CS_YUV444P16 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_16, // YUV 4:4:4 16bit samples
AVS_CS_YUV422P16 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_16, // YUV 4:2:2 16bit samples
AVS_CS_YUV420P16 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_16, // YUV 4:2:0 16bit samples
AVS_CS_Y16 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_16, // Y 4:0:0 16bit samples
// 32 bit samples (float)
AVS_CS_YUV444PS = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_32, // YUV 4:4:4 32bit samples
AVS_CS_YUV422PS = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_32, // YUV 4:2:2 32bit samples
AVS_CS_YUV420PS = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_32, // YUV 4:2:0 32bit samples
AVS_CS_Y32 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_32, // Y 4:0:0 32bit samples
// RGB packed
AVS_CS_BGR48 = AVS_CS_RGB_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED | AVS_CS_SAMPLE_BITS_16, // BGR 3x16 bit
AVS_CS_BGR64 = AVS_CS_RGBA_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED | AVS_CS_SAMPLE_BITS_16, // BGR 4x16 bit
// no packed 32 bit (float) support for these legacy types
// RGB planar
AVS_CS_RGBP = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_8, // Planar RGB 8 bit samples
AVS_CS_RGBP10 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_10, // Planar RGB 10bit samples
AVS_CS_RGBP12 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_12, // Planar RGB 12bit samples
AVS_CS_RGBP14 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_14, // Planar RGB 14bit samples
AVS_CS_RGBP16 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_16, // Planar RGB 16bit samples
AVS_CS_RGBPS = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_32, // Planar RGB 32bit samples
// RGBA planar
AVS_CS_RGBAP = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_8, // Planar RGBA 8 bit samples
AVS_CS_RGBAP10 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_10, // Planar RGBA 10bit samples
AVS_CS_RGBAP12 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_12, // Planar RGBA 12bit samples
AVS_CS_RGBAP14 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_14, // Planar RGBA 14bit samples
AVS_CS_RGBAP16 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_16, // Planar RGBA 16bit samples
AVS_CS_RGBAPS = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_32, // Planar RGBA 32bit samples
// Planar YUVA
AVS_CS_YUVA444 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:4:4 8bit samples
AVS_CS_YUVA422 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:2:2 8bit samples
AVS_CS_YUVA420 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:2:0 8bit samples
AVS_CS_YUVA444P10 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:4:4 10bit samples
AVS_CS_YUVA422P10 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:2:2 10bit samples
AVS_CS_YUVA420P10 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:2:0 10bit samples
AVS_CS_YUVA444P12 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:4:4 12bit samples
AVS_CS_YUVA422P12 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:2:2 12bit samples
AVS_CS_YUVA420P12 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:2:0 12bit samples
AVS_CS_YUVA444P14 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:4:4 14bit samples
AVS_CS_YUVA422P14 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:2:2 14bit samples
AVS_CS_YUVA420P14 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:2:0 14bit samples
AVS_CS_YUVA444P16 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:4:4 16bit samples
AVS_CS_YUVA422P16 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:2:2 16bit samples
AVS_CS_YUVA420P16 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:2:0 16bit samples
AVS_CS_YUVA444PS = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:4:4 32bit samples
AVS_CS_YUVA422PS = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:2:2 32bit samples
AVS_CS_YUVA420PS = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:2:0 32bit samples
};
enum {
@ -247,10 +342,10 @@ AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
{ return !!(p->pixel_type&AVS_CS_BGR); }
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
{ return ((p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24) && ((p->pixel_type & AVS_CS_SAMPLE_BITS_MASK) == AVS_CS_SAMPLE_BITS_8); }
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
{ return ((p->pixel_type&AVS_CS_BGR32)==AVS_CS_BGR32) && ((p->pixel_type & AVS_CS_SAMPLE_BITS_MASK) == AVS_CS_SAMPLE_BITS_8); }
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
{ return !!(p->pixel_type&AVS_CS_YUV ); }
@ -258,6 +353,10 @@ AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
AVSC_API(int, avs_is_rgb48)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_rgb64)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yv24)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yv16)(const AVS_VideoInfo * p);
@ -268,6 +367,38 @@ AVSC_API(int, avs_is_yv411)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_y8)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv444p16)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv422p16)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv420p16)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_y16)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv444ps)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv422ps)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv420ps)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_y32)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_444)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_422)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_420)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_y)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuva)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_planar_rgb)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_planar_rgba)(const AVS_VideoInfo * p);
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
{ return ((p->image_type & property)==property ); }
@ -365,6 +496,12 @@ AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
}
#endif
AVSC_API(int, avs_num_components)(const AVS_VideoInfo * p);
AVSC_API(int, avs_component_size)(const AVS_VideoInfo * p);
AVSC_API(int, avs_bits_per_component)(const AVS_VideoInfo * p);
/////////////////////////////////////////////////////////////////////
//
// AVS_VideoFrame
@ -528,7 +665,7 @@ AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
#endif
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = (short)size; return v; }
/////////////////////////////////////////////////////////////////////
//
@ -761,11 +898,28 @@ struct AVS_Library {
AVSC_DECLARE_FUNC(avs_vsprintf);
AVSC_DECLARE_FUNC(avs_get_error);
AVSC_DECLARE_FUNC(avs_is_rgb48);
AVSC_DECLARE_FUNC(avs_is_rgb64);
AVSC_DECLARE_FUNC(avs_is_yv24);
AVSC_DECLARE_FUNC(avs_is_yv16);
AVSC_DECLARE_FUNC(avs_is_yv12);
AVSC_DECLARE_FUNC(avs_is_yv411);
AVSC_DECLARE_FUNC(avs_is_y8);
AVSC_DECLARE_FUNC(avs_is_yuv444p16);
AVSC_DECLARE_FUNC(avs_is_yuv422p16);
AVSC_DECLARE_FUNC(avs_is_yuv420p16);
AVSC_DECLARE_FUNC(avs_is_y16);
AVSC_DECLARE_FUNC(avs_is_yuv444ps);
AVSC_DECLARE_FUNC(avs_is_yuv422ps);
AVSC_DECLARE_FUNC(avs_is_yuv420ps);
AVSC_DECLARE_FUNC(avs_is_y32);
AVSC_DECLARE_FUNC(avs_is_444);
AVSC_DECLARE_FUNC(avs_is_422);
AVSC_DECLARE_FUNC(avs_is_420);
AVSC_DECLARE_FUNC(avs_is_y);
AVSC_DECLARE_FUNC(avs_is_yuva);
AVSC_DECLARE_FUNC(avs_is_planar_rgb);
AVSC_DECLARE_FUNC(avs_is_planar_rgba);
AVSC_DECLARE_FUNC(avs_is_color_space);
AVSC_DECLARE_FUNC(avs_get_plane_width_subsampling);
@ -780,6 +934,11 @@ struct AVS_Library {
AVSC_DECLARE_FUNC(avs_get_read_ptr_p);
AVSC_DECLARE_FUNC(avs_is_writable);
AVSC_DECLARE_FUNC(avs_get_write_ptr_p);
AVSC_DECLARE_FUNC(avs_num_components);
AVSC_DECLARE_FUNC(avs_component_size);
AVSC_DECLARE_FUNC(avs_bits_per_component);
};
#undef AVSC_DECLARE_FUNC
@ -840,11 +999,28 @@ AVSC_INLINE AVS_Library * avs_load_library() {
AVSC_LOAD_FUNC(avs_vsprintf);
AVSC_LOAD_FUNC(avs_get_error);
AVSC_LOAD_FUNC(avs_is_rgb48);
AVSC_LOAD_FUNC(avs_is_rgb64);
AVSC_LOAD_FUNC(avs_is_yv24);
AVSC_LOAD_FUNC(avs_is_yv16);
AVSC_LOAD_FUNC(avs_is_yv12);
AVSC_LOAD_FUNC(avs_is_yv411);
AVSC_LOAD_FUNC(avs_is_y8);
AVSC_LOAD_FUNC(avs_is_yuv444p16);
AVSC_LOAD_FUNC(avs_is_yuv422p16);
AVSC_LOAD_FUNC(avs_is_yuv420p16);
AVSC_LOAD_FUNC(avs_is_y16);
AVSC_LOAD_FUNC(avs_is_yuv444ps);
AVSC_LOAD_FUNC(avs_is_yuv422ps);
AVSC_LOAD_FUNC(avs_is_yuv420ps);
AVSC_LOAD_FUNC(avs_is_y32);
AVSC_LOAD_FUNC(avs_is_444);
AVSC_LOAD_FUNC(avs_is_422);
AVSC_LOAD_FUNC(avs_is_420);
AVSC_LOAD_FUNC(avs_is_y);
AVSC_LOAD_FUNC(avs_is_yuva);
AVSC_LOAD_FUNC(avs_is_planar_rgb);
AVSC_LOAD_FUNC(avs_is_planar_rgba);
AVSC_LOAD_FUNC(avs_is_color_space);
AVSC_LOAD_FUNC(avs_get_plane_width_subsampling);
@ -860,6 +1036,12 @@ AVSC_INLINE AVS_Library * avs_load_library() {
AVSC_LOAD_FUNC(avs_is_writable);
AVSC_LOAD_FUNC(avs_get_write_ptr_p);
AVSC_LOAD_FUNC(avs_num_components);
AVSC_LOAD_FUNC(avs_component_size);
AVSC_LOAD_FUNC(avs_bits_per_component);
#undef __AVSC_STRINGIFY
#undef AVSC_STRINGIFY
#undef AVSC_LOAD_FUNC

827
compat/cuda/cuviddec.h Normal file
View File

@ -0,0 +1,827 @@
/*
* This copyright notice applies to this header file only:
*
* Copyright (c) 2010-2016 NVIDIA Corporation
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the software, and to permit persons to whom the
* software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* \file cuviddec.h
* NvCuvid API provides Video Decoding interface to NVIDIA GPU devices.
* \date 2015-2016
* This file contains constants, structure definitions and function prototypes used for decoding.
*/
#if !defined(__CUDA_VIDEO_H__)
#define __CUDA_VIDEO_H__
#ifndef __cuda_cuda_h__
#include <cuda.h>
#endif // __cuda_cuda_h__
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
#if (CUDA_VERSION >= 3020) && (!defined(CUDA_FORCE_API_VERSION) || (CUDA_FORCE_API_VERSION >= 3020))
#define __CUVID_DEVPTR64
#endif
#endif
#if defined(__cplusplus)
extern "C" {
#endif /* __cplusplus */
typedef void *CUvideodecoder;
typedef struct _CUcontextlock_st *CUvideoctxlock;
/**
* \addtogroup VIDEO_DECODER Video Decoder
* @{
*/
/*!
* \enum cudaVideoCodec
* Video Codec Enums
*/
typedef enum cudaVideoCodec_enum {
cudaVideoCodec_MPEG1=0, /**< MPEG1 */
cudaVideoCodec_MPEG2, /**< MPEG2 */
cudaVideoCodec_MPEG4, /**< MPEG4 */
cudaVideoCodec_VC1, /**< VC1 */
cudaVideoCodec_H264, /**< H264 */
cudaVideoCodec_JPEG, /**< JPEG */
cudaVideoCodec_H264_SVC, /**< H264-SVC */
cudaVideoCodec_H264_MVC, /**< H264-MVC */
cudaVideoCodec_HEVC, /**< HEVC */
cudaVideoCodec_VP8, /**< VP8 */
cudaVideoCodec_VP9, /**< VP9 */
cudaVideoCodec_NumCodecs, /**< Max COdecs */
// Uncompressed YUV
cudaVideoCodec_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), /**< Y,U,V (4:2:0) */
cudaVideoCodec_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,V,U (4:2:0) */
cudaVideoCodec_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,UV (4:2:0) */
cudaVideoCodec_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), /**< YUYV/YUY2 (4:2:2) */
cudaVideoCodec_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) /**< UYVY (4:2:2) */
} cudaVideoCodec;
/*!
* \enum cudaVideoSurfaceFormat
* Video Surface Formats Enums
*/
typedef enum cudaVideoSurfaceFormat_enum {
cudaVideoSurfaceFormat_NV12=0 /**< NV12 (currently the only supported output format) */
} cudaVideoSurfaceFormat;
/*!
* \enum cudaVideoDeinterlaceMode
* Deinterlacing Modes Enums
*/
typedef enum cudaVideoDeinterlaceMode_enum {
cudaVideoDeinterlaceMode_Weave=0, /**< Weave both fields (no deinterlacing) */
cudaVideoDeinterlaceMode_Bob, /**< Drop one field */
cudaVideoDeinterlaceMode_Adaptive /**< Adaptive deinterlacing */
} cudaVideoDeinterlaceMode;
/*!
* \enum cudaVideoChromaFormat
* Chroma Formats Enums
*/
typedef enum cudaVideoChromaFormat_enum {
cudaVideoChromaFormat_Monochrome=0, /**< MonoChrome */
cudaVideoChromaFormat_420, /**< 4:2:0 */
cudaVideoChromaFormat_422, /**< 4:2:2 */
cudaVideoChromaFormat_444 /**< 4:4:4 */
} cudaVideoChromaFormat;
/*!
* \enum cudaVideoCreateFlags
* Decoder Flags Enums
*/
typedef enum cudaVideoCreateFlags_enum {
cudaVideoCreate_Default = 0x00, /**< Default operation mode: use dedicated video engines */
cudaVideoCreate_PreferCUDA = 0x01, /**< Use a CUDA-based decoder if faster than dedicated engines (requires a valid vidLock object for multi-threading) */
cudaVideoCreate_PreferDXVA = 0x02, /**< Go through DXVA internally if possible (requires D3D9 interop) */
cudaVideoCreate_PreferCUVID = 0x04 /**< Use dedicated video engines directly */
} cudaVideoCreateFlags;
/*!
* \struct CUVIDDECODECREATEINFO
* Struct used in create decoder
*/
typedef struct _CUVIDDECODECREATEINFO
{
unsigned long ulWidth; /**< Coded Sequence Width */
unsigned long ulHeight; /**< Coded Sequence Height */
unsigned long ulNumDecodeSurfaces; /**< Maximum number of internal decode surfaces */
cudaVideoCodec CodecType; /**< cudaVideoCodec_XXX */
cudaVideoChromaFormat ChromaFormat; /**< cudaVideoChromaFormat_XXX (only 4:2:0 is currently supported) */
unsigned long ulCreationFlags; /**< Decoder creation flags (cudaVideoCreateFlags_XXX) */
unsigned long bitDepthMinus8;
unsigned long Reserved1[4]; /**< Reserved for future use - set to zero */
/**
* area of the frame that should be displayed
*/
struct {
short left;
short top;
short right;
short bottom;
} display_area;
cudaVideoSurfaceFormat OutputFormat; /**< cudaVideoSurfaceFormat_XXX */
cudaVideoDeinterlaceMode DeinterlaceMode; /**< cudaVideoDeinterlaceMode_XXX */
unsigned long ulTargetWidth; /**< Post-processed Output Width (Should be aligned to 2) */
unsigned long ulTargetHeight; /**< Post-processed Output Height (Should be aligbed to 2) */
unsigned long ulNumOutputSurfaces; /**< Maximum number of output surfaces simultaneously mapped */
CUvideoctxlock vidLock; /**< If non-NULL, context lock used for synchronizing ownership of the cuda context */
/**
* target rectangle in the output frame (for aspect ratio conversion)
* if a null rectangle is specified, {0,0,ulTargetWidth,ulTargetHeight} will be used
*/
struct {
short left;
short top;
short right;
short bottom;
} target_rect;
unsigned long Reserved2[5]; /**< Reserved for future use - set to zero */
} CUVIDDECODECREATEINFO;
/*!
* \struct CUVIDH264DPBENTRY
* H.264 DPB Entry
*/
typedef struct _CUVIDH264DPBENTRY
{
int PicIdx; /**< picture index of reference frame */
int FrameIdx; /**< frame_num(short-term) or LongTermFrameIdx(long-term) */
int is_long_term; /**< 0=short term reference, 1=long term reference */
int not_existing; /**< non-existing reference frame (corresponding PicIdx should be set to -1) */
int used_for_reference; /**< 0=unused, 1=top_field, 2=bottom_field, 3=both_fields */
int FieldOrderCnt[2]; /**< field order count of top and bottom fields */
} CUVIDH264DPBENTRY;
/*!
* \struct CUVIDH264MVCEXT
* H.264 MVC Picture Parameters Ext
*/
typedef struct _CUVIDH264MVCEXT
{
int num_views_minus1;
int view_id;
unsigned char inter_view_flag;
unsigned char num_inter_view_refs_l0;
unsigned char num_inter_view_refs_l1;
unsigned char MVCReserved8Bits;
int InterViewRefsL0[16];
int InterViewRefsL1[16];
} CUVIDH264MVCEXT;
/*!
* \struct CUVIDH264SVCEXT
* H.264 SVC Picture Parameters Ext
*/
typedef struct _CUVIDH264SVCEXT
{
unsigned char profile_idc;
unsigned char level_idc;
unsigned char DQId;
unsigned char DQIdMax;
unsigned char disable_inter_layer_deblocking_filter_idc;
unsigned char ref_layer_chroma_phase_y_plus1;
signed char inter_layer_slice_alpha_c0_offset_div2;
signed char inter_layer_slice_beta_offset_div2;
unsigned short DPBEntryValidFlag;
unsigned char inter_layer_deblocking_filter_control_present_flag;
unsigned char extended_spatial_scalability_idc;
unsigned char adaptive_tcoeff_level_prediction_flag;
unsigned char slice_header_restriction_flag;
unsigned char chroma_phase_x_plus1_flag;
unsigned char chroma_phase_y_plus1;
unsigned char tcoeff_level_prediction_flag;
unsigned char constrained_intra_resampling_flag;
unsigned char ref_layer_chroma_phase_x_plus1_flag;
unsigned char store_ref_base_pic_flag;
unsigned char Reserved8BitsA;
unsigned char Reserved8BitsB;
// For the 4 scaled_ref_layer_XX fields below,
// if (extended_spatial_scalability_idc == 1), SPS field, G.7.3.2.1.4, add prefix "seq_"
// if (extended_spatial_scalability_idc == 2), SLH field, G.7.3.3.4,
short scaled_ref_layer_left_offset;
short scaled_ref_layer_top_offset;
short scaled_ref_layer_right_offset;
short scaled_ref_layer_bottom_offset;
unsigned short Reserved16Bits;
struct _CUVIDPICPARAMS *pNextLayer; /**< Points to the picparams for the next layer to be decoded. Linked list ends at the target layer. */
int bRefBaseLayer; /**< whether to store ref base pic */
} CUVIDH264SVCEXT;
/*!
* \struct CUVIDH264PICPARAMS
* H.264 Picture Parameters
*/
typedef struct _CUVIDH264PICPARAMS
{
// SPS
int log2_max_frame_num_minus4;
int pic_order_cnt_type;
int log2_max_pic_order_cnt_lsb_minus4;
int delta_pic_order_always_zero_flag;
int frame_mbs_only_flag;
int direct_8x8_inference_flag;
int num_ref_frames; // NOTE: shall meet level 4.1 restrictions
unsigned char residual_colour_transform_flag;
unsigned char bit_depth_luma_minus8; // Must be 0 (only 8-bit supported)
unsigned char bit_depth_chroma_minus8; // Must be 0 (only 8-bit supported)
unsigned char qpprime_y_zero_transform_bypass_flag;
// PPS
int entropy_coding_mode_flag;
int pic_order_present_flag;
int num_ref_idx_l0_active_minus1;
int num_ref_idx_l1_active_minus1;
int weighted_pred_flag;
int weighted_bipred_idc;
int pic_init_qp_minus26;
int deblocking_filter_control_present_flag;
int redundant_pic_cnt_present_flag;
int transform_8x8_mode_flag;
int MbaffFrameFlag;
int constrained_intra_pred_flag;
int chroma_qp_index_offset;
int second_chroma_qp_index_offset;
int ref_pic_flag;
int frame_num;
int CurrFieldOrderCnt[2];
// DPB
CUVIDH264DPBENTRY dpb[16]; // List of reference frames within the DPB
// Quantization Matrices (raster-order)
unsigned char WeightScale4x4[6][16];
unsigned char WeightScale8x8[2][64];
// FMO/ASO
unsigned char fmo_aso_enable;
unsigned char num_slice_groups_minus1;
unsigned char slice_group_map_type;
signed char pic_init_qs_minus26;
unsigned int slice_group_change_rate_minus1;
union
{
unsigned long long slice_group_map_addr;
const unsigned char *pMb2SliceGroupMap;
} fmo;
unsigned int Reserved[12];
// SVC/MVC
union
{
CUVIDH264MVCEXT mvcext;
CUVIDH264SVCEXT svcext;
};
} CUVIDH264PICPARAMS;
/*!
* \struct CUVIDMPEG2PICPARAMS
* MPEG-2 Picture Parameters
*/
typedef struct _CUVIDMPEG2PICPARAMS
{
int ForwardRefIdx; // Picture index of forward reference (P/B-frames)
int BackwardRefIdx; // Picture index of backward reference (B-frames)
int picture_coding_type;
int full_pel_forward_vector;
int full_pel_backward_vector;
int f_code[2][2];
int intra_dc_precision;
int frame_pred_frame_dct;
int concealment_motion_vectors;
int q_scale_type;
int intra_vlc_format;
int alternate_scan;
int top_field_first;
// Quantization matrices (raster order)
unsigned char QuantMatrixIntra[64];
unsigned char QuantMatrixInter[64];
} CUVIDMPEG2PICPARAMS;
////////////////////////////////////////////////////////////////////////////////////////////////
//
// MPEG-4 Picture Parameters
//
// MPEG-4 has VOP types instead of Picture types
#define I_VOP 0
#define P_VOP 1
#define B_VOP 2
#define S_VOP 3
/*!
* \struct CUVIDMPEG4PICPARAMS
* MPEG-4 Picture Parameters
*/
typedef struct _CUVIDMPEG4PICPARAMS
{
int ForwardRefIdx; // Picture index of forward reference (P/B-frames)
int BackwardRefIdx; // Picture index of backward reference (B-frames)
// VOL
int video_object_layer_width;
int video_object_layer_height;
int vop_time_increment_bitcount;
int top_field_first;
int resync_marker_disable;
int quant_type;
int quarter_sample;
int short_video_header;
int divx_flags;
// VOP
int vop_coding_type;
int vop_coded;
int vop_rounding_type;
int alternate_vertical_scan_flag;
int interlaced;
int vop_fcode_forward;
int vop_fcode_backward;
int trd[2];
int trb[2];
// Quantization matrices (raster order)
unsigned char QuantMatrixIntra[64];
unsigned char QuantMatrixInter[64];
int gmc_enabled;
} CUVIDMPEG4PICPARAMS;
/*!
* \struct CUVIDVC1PICPARAMS
* VC1 Picture Parameters
*/
typedef struct _CUVIDVC1PICPARAMS
{
int ForwardRefIdx; /**< Picture index of forward reference (P/B-frames) */
int BackwardRefIdx; /**< Picture index of backward reference (B-frames) */
int FrameWidth; /**< Actual frame width */
int FrameHeight; /**< Actual frame height */
// PICTURE
int intra_pic_flag; /**< Set to 1 for I,BI frames */
int ref_pic_flag; /**< Set to 1 for I,P frames */
int progressive_fcm; /**< Progressive frame */
// SEQUENCE
int profile;
int postprocflag;
int pulldown;
int interlace;
int tfcntrflag;
int finterpflag;
int psf;
int multires;
int syncmarker;
int rangered;
int maxbframes;
// ENTRYPOINT
int panscan_flag;
int refdist_flag;
int extended_mv;
int dquant;
int vstransform;
int loopfilter;
int fastuvmc;
int overlap;
int quantizer;
int extended_dmv;
int range_mapy_flag;
int range_mapy;
int range_mapuv_flag;
int range_mapuv;
int rangeredfrm; // range reduction state
} CUVIDVC1PICPARAMS;
/*!
* \struct CUVIDJPEGPICPARAMS
* JPEG Picture Parameters
*/
typedef struct _CUVIDJPEGPICPARAMS
{
int Reserved;
} CUVIDJPEGPICPARAMS;
/*!
* \struct CUVIDHEVCPICPARAMS
* HEVC Picture Parameters
*/
typedef struct _CUVIDHEVCPICPARAMS
{
// sps
int pic_width_in_luma_samples;
int pic_height_in_luma_samples;
unsigned char log2_min_luma_coding_block_size_minus3;
unsigned char log2_diff_max_min_luma_coding_block_size;
unsigned char log2_min_transform_block_size_minus2;
unsigned char log2_diff_max_min_transform_block_size;
unsigned char pcm_enabled_flag;
unsigned char log2_min_pcm_luma_coding_block_size_minus3;
unsigned char log2_diff_max_min_pcm_luma_coding_block_size;
unsigned char pcm_sample_bit_depth_luma_minus1;
unsigned char pcm_sample_bit_depth_chroma_minus1;
unsigned char pcm_loop_filter_disabled_flag;
unsigned char strong_intra_smoothing_enabled_flag;
unsigned char max_transform_hierarchy_depth_intra;
unsigned char max_transform_hierarchy_depth_inter;
unsigned char amp_enabled_flag;
unsigned char separate_colour_plane_flag;
unsigned char log2_max_pic_order_cnt_lsb_minus4;
unsigned char num_short_term_ref_pic_sets;
unsigned char long_term_ref_pics_present_flag;
unsigned char num_long_term_ref_pics_sps;
unsigned char sps_temporal_mvp_enabled_flag;
unsigned char sample_adaptive_offset_enabled_flag;
unsigned char scaling_list_enable_flag;
unsigned char IrapPicFlag;
unsigned char IdrPicFlag;
unsigned char bit_depth_luma_minus8;
unsigned char bit_depth_chroma_minus8;
unsigned char reserved1[14];
// pps
unsigned char dependent_slice_segments_enabled_flag;
unsigned char slice_segment_header_extension_present_flag;
unsigned char sign_data_hiding_enabled_flag;
unsigned char cu_qp_delta_enabled_flag;
unsigned char diff_cu_qp_delta_depth;
signed char init_qp_minus26;
signed char pps_cb_qp_offset;
signed char pps_cr_qp_offset;
unsigned char constrained_intra_pred_flag;
unsigned char weighted_pred_flag;
unsigned char weighted_bipred_flag;
unsigned char transform_skip_enabled_flag;
unsigned char transquant_bypass_enabled_flag;
unsigned char entropy_coding_sync_enabled_flag;
unsigned char log2_parallel_merge_level_minus2;
unsigned char num_extra_slice_header_bits;
unsigned char loop_filter_across_tiles_enabled_flag;
unsigned char loop_filter_across_slices_enabled_flag;
unsigned char output_flag_present_flag;
unsigned char num_ref_idx_l0_default_active_minus1;
unsigned char num_ref_idx_l1_default_active_minus1;
unsigned char lists_modification_present_flag;
unsigned char cabac_init_present_flag;
unsigned char pps_slice_chroma_qp_offsets_present_flag;
unsigned char deblocking_filter_override_enabled_flag;
unsigned char pps_deblocking_filter_disabled_flag;
signed char pps_beta_offset_div2;
signed char pps_tc_offset_div2;
unsigned char tiles_enabled_flag;
unsigned char uniform_spacing_flag;
unsigned char num_tile_columns_minus1;
unsigned char num_tile_rows_minus1;
unsigned short column_width_minus1[21];
unsigned short row_height_minus1[21];
unsigned int reserved3[15];
// RefPicSets
int NumBitsForShortTermRPSInSlice;
int NumDeltaPocsOfRefRpsIdx;
int NumPocTotalCurr;
int NumPocStCurrBefore;
int NumPocStCurrAfter;
int NumPocLtCurr;
int CurrPicOrderCntVal;
int RefPicIdx[16]; // [refpic] Indices of valid reference pictures (-1 if unused for reference)
int PicOrderCntVal[16]; // [refpic]
unsigned char IsLongTerm[16]; // [refpic] 0=not a long-term reference, 1=long-term reference
unsigned char RefPicSetStCurrBefore[8]; // [0..NumPocStCurrBefore-1] -> refpic (0..15)
unsigned char RefPicSetStCurrAfter[8]; // [0..NumPocStCurrAfter-1] -> refpic (0..15)
unsigned char RefPicSetLtCurr[8]; // [0..NumPocLtCurr-1] -> refpic (0..15)
unsigned char RefPicSetInterLayer0[8];
unsigned char RefPicSetInterLayer1[8];
unsigned int reserved4[12];
// scaling lists (diag order)
unsigned char ScalingList4x4[6][16]; // [matrixId][i]
unsigned char ScalingList8x8[6][64]; // [matrixId][i]
unsigned char ScalingList16x16[6][64]; // [matrixId][i]
unsigned char ScalingList32x32[2][64]; // [matrixId][i]
unsigned char ScalingListDCCoeff16x16[6]; // [matrixId]
unsigned char ScalingListDCCoeff32x32[2]; // [matrixId]
} CUVIDHEVCPICPARAMS;
/*!
* \struct CUVIDVP8PICPARAMS
* VP8 Picture Parameters
*/
typedef struct _CUVIDVP8PICPARAMS
{
int width;
int height;
unsigned int first_partition_size;
//Frame Indexes
unsigned char LastRefIdx;
unsigned char GoldenRefIdx;
unsigned char AltRefIdx;
union {
struct {
unsigned char frame_type : 1; /**< 0 = KEYFRAME, 1 = INTERFRAME */
unsigned char version : 3;
unsigned char show_frame : 1;
unsigned char update_mb_segmentation_data : 1; /**< Must be 0 if segmentation is not enabled */
unsigned char Reserved2Bits : 2;
};
unsigned char wFrameTagFlags;
};
unsigned char Reserved1[4];
unsigned int Reserved2[3];
} CUVIDVP8PICPARAMS;
/*!
* \struct CUVIDVP9PICPARAMS
* VP9 Picture Parameters
*/
typedef struct _CUVIDVP9PICPARAMS
{
unsigned int width;
unsigned int height;
//Frame Indices
unsigned char LastRefIdx;
unsigned char GoldenRefIdx;
unsigned char AltRefIdx;
unsigned char colorSpace;
unsigned short profile : 3;
unsigned short frameContextIdx : 2;
unsigned short frameType : 1;
unsigned short showFrame : 1;
unsigned short errorResilient : 1;
unsigned short frameParallelDecoding : 1;
unsigned short subSamplingX : 1;
unsigned short subSamplingY : 1;
unsigned short intraOnly : 1;
unsigned short allow_high_precision_mv : 1;
unsigned short refreshEntropyProbs : 1;
unsigned short reserved2Bits : 2;
unsigned short reserved16Bits;
unsigned char refFrameSignBias[4];
unsigned char bitDepthMinus8Luma;
unsigned char bitDepthMinus8Chroma;
unsigned char loopFilterLevel;
unsigned char loopFilterSharpness;
unsigned char modeRefLfEnabled;
unsigned char log2_tile_columns;
unsigned char log2_tile_rows;
unsigned char segmentEnabled : 1;
unsigned char segmentMapUpdate : 1;
unsigned char segmentMapTemporalUpdate : 1;
unsigned char segmentFeatureMode : 1;
unsigned char reserved4Bits : 4;
unsigned char segmentFeatureEnable[8][4];
short segmentFeatureData[8][4];
unsigned char mb_segment_tree_probs[7];
unsigned char segment_pred_probs[3];
unsigned char reservedSegment16Bits[2];
int qpYAc;
int qpYDc;
int qpChDc;
int qpChAc;
unsigned int activeRefIdx[3];
unsigned int resetFrameContext;
unsigned int mcomp_filter_type;
unsigned int mbRefLfDelta[4];
unsigned int mbModeLfDelta[2];
unsigned int frameTagSize;
unsigned int offsetToDctParts;
unsigned int reserved128Bits[4];
} CUVIDVP9PICPARAMS;
/*!
* \struct CUVIDPICPARAMS
* Picture Parameters for Decoding
*/
typedef struct _CUVIDPICPARAMS
{
int PicWidthInMbs; /**< Coded Frame Size */
int FrameHeightInMbs; /**< Coded Frame Height */
int CurrPicIdx; /**< Output index of the current picture */
int field_pic_flag; /**< 0=frame picture, 1=field picture */
int bottom_field_flag; /**< 0=top field, 1=bottom field (ignored if field_pic_flag=0) */
int second_field; /**< Second field of a complementary field pair */
// Bitstream data
unsigned int nBitstreamDataLen; /**< Number of bytes in bitstream data buffer */
const unsigned char *pBitstreamData; /**< Ptr to bitstream data for this picture (slice-layer) */
unsigned int nNumSlices; /**< Number of slices in this picture */
const unsigned int *pSliceDataOffsets; /**< nNumSlices entries, contains offset of each slice within the bitstream data buffer */
int ref_pic_flag; /**< This picture is a reference picture */
int intra_pic_flag; /**< This picture is entirely intra coded */
unsigned int Reserved[30]; /**< Reserved for future use */
// Codec-specific data
union {
CUVIDMPEG2PICPARAMS mpeg2; /**< Also used for MPEG-1 */
CUVIDH264PICPARAMS h264;
CUVIDVC1PICPARAMS vc1;
CUVIDMPEG4PICPARAMS mpeg4;
CUVIDJPEGPICPARAMS jpeg;
CUVIDHEVCPICPARAMS hevc;
CUVIDVP8PICPARAMS vp8;
CUVIDVP9PICPARAMS vp9;
unsigned int CodecReserved[1024];
} CodecSpecific;
} CUVIDPICPARAMS;
/*!
* \struct CUVIDPROCPARAMS
* Picture Parameters for Postprocessing
*/
typedef struct _CUVIDPROCPARAMS
{
int progressive_frame; /**< Input is progressive (deinterlace_mode will be ignored) */
int second_field; /**< Output the second field (ignored if deinterlace mode is Weave) */
int top_field_first; /**< Input frame is top field first (1st field is top, 2nd field is bottom) */
int unpaired_field; /**< Input only contains one field (2nd field is invalid) */
// The fields below are used for raw YUV input
unsigned int reserved_flags; /**< Reserved for future use (set to zero) */
unsigned int reserved_zero; /**< Reserved (set to zero) */
unsigned long long raw_input_dptr; /**< Input CUdeviceptr for raw YUV extensions */
unsigned int raw_input_pitch; /**< pitch in bytes of raw YUV input (should be aligned appropriately) */
unsigned int raw_input_format; /**< Reserved for future use (set to zero) */
unsigned long long raw_output_dptr; /**< Reserved for future use (set to zero) */
unsigned int raw_output_pitch; /**< Reserved for future use (set to zero) */
unsigned int Reserved[48];
void *Reserved3[3];
} CUVIDPROCPARAMS;
/**
*
* In order to minimize decode latencies, there should be always at least 2 pictures in the decode
* queue at any time, in order to make sure that all decode engines are always busy.
*
* Overall data flow:
* - cuvidCreateDecoder(...)
* For each picture:
* - cuvidDecodePicture(N)
* - cuvidMapVideoFrame(N-4)
* - do some processing in cuda
* - cuvidUnmapVideoFrame(N-4)
* - cuvidDecodePicture(N+1)
* - cuvidMapVideoFrame(N-3)
* ...
* - cuvidDestroyDecoder(...)
*
* NOTE:
* - When the cuda context is created from a D3D device, the D3D device must also be created
* with the D3DCREATE_MULTITHREADED flag.
* - There is a limit to how many pictures can be mapped simultaneously (ulNumOutputSurfaces)
* - cuVidDecodePicture may block the calling thread if there are too many pictures pending
* in the decode queue
*/
/**
* \fn CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci)
* Create the decoder object
*/
CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci);
/**
* \fn CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder)
* Destroy the decoder object
*/
CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder);
/**
* \fn CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams)
* Decode a single picture (field or frame)
*/
CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams);
#if !defined(__CUVID_DEVPTR64) || defined(__CUVID_INTERNAL)
/**
* \fn CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx, unsigned int *pDevPtr, unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
* Post-process and map a video frame for use in cuda
*/
CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx,
unsigned int *pDevPtr, unsigned int *pPitch,
CUVIDPROCPARAMS *pVPP);
/**
* \fn CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr)
* Unmap a previously mapped video frame
*/
CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr);
#endif
#if defined(WIN64) || defined(_WIN64) || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
/**
* \fn CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr, unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
* map a video frame
*/
CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr,
unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
/**
* \fn CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr);
* Unmap a previously mapped video frame
*/
CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr);
#if defined(__CUVID_DEVPTR64) && !defined(__CUVID_INTERNAL)
#define cuvidMapVideoFrame cuvidMapVideoFrame64
#define cuvidUnmapVideoFrame cuvidUnmapVideoFrame64
#endif
#endif
/**
*
* Context-locking: to facilitate multi-threaded implementations, the following 4 functions
* provide a simple mutex-style host synchronization. If a non-NULL context is specified
* in CUVIDDECODECREATEINFO, the codec library will acquire the mutex associated with the given
* context before making any cuda calls.
* A multi-threaded application could create a lock associated with a context handle so that
* multiple threads can safely share the same cuda context:
* - use cuCtxPopCurrent immediately after context creation in order to create a 'floating' context
* that can be passed to cuvidCtxLockCreate.
* - When using a floating context, all cuda calls should only be made within a cuvidCtxLock/cuvidCtxUnlock section.
*
* NOTE: This is a safer alternative to cuCtxPushCurrent and cuCtxPopCurrent, and is not related to video
* decoder in any way (implemented as a critical section associated with cuCtx{Push|Pop}Current calls).
*/
/**
* \fn CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx)
*/
CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx);
/**
* \fn CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck)
*/
CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck);
/**
* \fn CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags)
*/
CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags);
/**
* \fn CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags)
*/
CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags);
/** @} */ /* End VIDEO_DECODER */
////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__cplusplus)
// Auto-lock helper for C++ applications
class CCtxAutoLock
{
private:
CUvideoctxlock m_ctx;
public:
CCtxAutoLock(CUvideoctxlock ctx);
~CCtxAutoLock();
};
}
#endif /* __cplusplus */
#endif // __CUDA_VIDEO_H__

321
compat/cuda/nvcuvid.h Normal file
View File

@ -0,0 +1,321 @@
/*
* This copyright notice applies to this header file only:
*
* Copyright (c) 2010-2016 NVIDIA Corporation
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the software, and to permit persons to whom the
* software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* \file nvcuvid.h
* NvCuvid API provides Video Decoding interface to NVIDIA GPU devices.
* \date 2015-2015
* This file contains the interface constants, structure definitions and function prototypes.
*/
#if !defined(__NVCUVID_H__)
#define __NVCUVID_H__
#include "compat/cuda/cuviddec.h"
#if defined(__cplusplus)
extern "C" {
#endif /* __cplusplus */
/*********************************
** Initialization
*********************************/
CUresult CUDAAPI cuvidInit(unsigned int Flags);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// High-level helper APIs for video sources
//
typedef void *CUvideosource;
typedef void *CUvideoparser;
typedef long long CUvideotimestamp;
/**
* \addtogroup VIDEO_PARSER Video Parser
* @{
*/
/*!
* \enum cudaVideoState
* Video Source State
*/
typedef enum {
cudaVideoState_Error = -1, /**< Error state (invalid source) */
cudaVideoState_Stopped = 0, /**< Source is stopped (or reached end-of-stream) */
cudaVideoState_Started = 1 /**< Source is running and delivering data */
} cudaVideoState;
/*!
* \enum cudaAudioCodec
* Audio compression
*/
typedef enum {
cudaAudioCodec_MPEG1=0, /**< MPEG-1 Audio */
cudaAudioCodec_MPEG2, /**< MPEG-2 Audio */
cudaAudioCodec_MP3, /**< MPEG-1 Layer III Audio */
cudaAudioCodec_AC3, /**< Dolby Digital (AC3) Audio */
cudaAudioCodec_LPCM /**< PCM Audio */
} cudaAudioCodec;
/*!
* \struct CUVIDEOFORMAT
* Video format
*/
typedef struct
{
cudaVideoCodec codec; /**< Compression format */
/**
* frame rate = numerator / denominator (for example: 30000/1001)
*/
struct {
unsigned int numerator; /**< frame rate numerator (0 = unspecified or variable frame rate) */
unsigned int denominator; /**< frame rate denominator (0 = unspecified or variable frame rate) */
} frame_rate;
unsigned char progressive_sequence; /**< 0=interlaced, 1=progressive */
unsigned char bit_depth_luma_minus8; /**< high bit depth Luma */
unsigned char bit_depth_chroma_minus8; /**< high bit depth Chroma */
unsigned char reserved1; /**< Reserved for future use */
unsigned int coded_width; /**< coded frame width */
unsigned int coded_height; /**< coded frame height */
/**
* area of the frame that should be displayed
* typical example:
* coded_width = 1920, coded_height = 1088
* display_area = { 0,0,1920,1080 }
*/
struct {
int left; /**< left position of display rect */
int top; /**< top position of display rect */
int right; /**< right position of display rect */
int bottom; /**< bottom position of display rect */
} display_area;
cudaVideoChromaFormat chroma_format; /**< Chroma format */
unsigned int bitrate; /**< video bitrate (bps, 0=unknown) */
/**
* Display Aspect Ratio = x:y (4:3, 16:9, etc)
*/
struct {
int x;
int y;
} display_aspect_ratio;
/**
* Video Signal Description
*/
struct {
unsigned char video_format : 3;
unsigned char video_full_range_flag : 1;
unsigned char reserved_zero_bits : 4;
unsigned char color_primaries;
unsigned char transfer_characteristics;
unsigned char matrix_coefficients;
} video_signal_description;
unsigned int seqhdr_data_length; /**< Additional bytes following (CUVIDEOFORMATEX) */
} CUVIDEOFORMAT;
/*!
* \struct CUVIDEOFORMATEX
* Video format including raw sequence header information
*/
typedef struct
{
CUVIDEOFORMAT format;
unsigned char raw_seqhdr_data[1024];
} CUVIDEOFORMATEX;
/*!
* \struct CUAUDIOFORMAT
* Audio Formats
*/
typedef struct
{
cudaAudioCodec codec; /**< Compression format */
unsigned int channels; /**< number of audio channels */
unsigned int samplespersec; /**< sampling frequency */
unsigned int bitrate; /**< For uncompressed, can also be used to determine bits per sample */
unsigned int reserved1; /**< Reserved for future use */
unsigned int reserved2; /**< Reserved for future use */
} CUAUDIOFORMAT;
/*!
* \enum CUvideopacketflags
* Data packet flags
*/
typedef enum {
CUVID_PKT_ENDOFSTREAM = 0x01, /**< Set when this is the last packet for this stream */
CUVID_PKT_TIMESTAMP = 0x02, /**< Timestamp is valid */
CUVID_PKT_DISCONTINUITY = 0x04 /**< Set when a discontinuity has to be signalled */
} CUvideopacketflags;
/*!
* \struct CUVIDSOURCEDATAPACKET
* Data Packet
*/
typedef struct _CUVIDSOURCEDATAPACKET
{
unsigned long flags; /**< Combination of CUVID_PKT_XXX flags */
unsigned long payload_size; /**< number of bytes in the payload (may be zero if EOS flag is set) */
const unsigned char *payload; /**< Pointer to packet payload data (may be NULL if EOS flag is set) */
CUvideotimestamp timestamp; /**< Presentation timestamp (10MHz clock), only valid if CUVID_PKT_TIMESTAMP flag is set */
} CUVIDSOURCEDATAPACKET;
// Callback for packet delivery
typedef int (CUDAAPI *PFNVIDSOURCECALLBACK)(void *, CUVIDSOURCEDATAPACKET *);
/*!
* \struct CUVIDSOURCEPARAMS
* Source Params
*/
typedef struct _CUVIDSOURCEPARAMS
{
unsigned int ulClockRate; /**< Timestamp units in Hz (0=default=10000000Hz) */
unsigned int uReserved1[7]; /**< Reserved for future use - set to zero */
void *pUserData; /**< Parameter passed in to the data handlers */
PFNVIDSOURCECALLBACK pfnVideoDataHandler; /**< Called to deliver audio packets */
PFNVIDSOURCECALLBACK pfnAudioDataHandler; /**< Called to deliver video packets */
void *pvReserved2[8]; /**< Reserved for future use - set to NULL */
} CUVIDSOURCEPARAMS;
/*!
* \enum CUvideosourceformat_flags
* CUvideosourceformat_flags
*/
typedef enum {
CUVID_FMT_EXTFORMATINFO = 0x100 /**< Return extended format structure (CUVIDEOFORMATEX) */
} CUvideosourceformat_flags;
#if !defined(__APPLE__)
/**
* \fn CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams)
* Create Video Source
*/
CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams);
/**
* \fn CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams)
* Create Video Source
*/
CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams);
/**
* \fn CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj)
* Destroy Video Source
*/
CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj);
/**
* \fn CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state)
* Set Video Source state
*/
CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state);
/**
* \fn cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj)
* Get Video Source state
*/
cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj);
/**
* \fn CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags)
* Get Video Source Format
*/
CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags);
/**
* \fn CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags)
* Set Video Source state
*/
CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags);
#endif
/**
* \struct CUVIDPARSERDISPINFO
*/
typedef struct _CUVIDPARSERDISPINFO
{
int picture_index; /**< */
int progressive_frame; /**< */
int top_field_first; /**< */
int repeat_first_field; /**< Number of additional fields (1=ivtc, 2=frame doubling, 4=frame tripling, -1=unpaired field) */
CUvideotimestamp timestamp; /**< */
} CUVIDPARSERDISPINFO;
//
// Parser callbacks
// The parser will call these synchronously from within cuvidParseVideoData(), whenever a picture is ready to
// be decoded and/or displayed.
//
typedef int (CUDAAPI *PFNVIDSEQUENCECALLBACK)(void *, CUVIDEOFORMAT *);
typedef int (CUDAAPI *PFNVIDDECODECALLBACK)(void *, CUVIDPICPARAMS *);
typedef int (CUDAAPI *PFNVIDDISPLAYCALLBACK)(void *, CUVIDPARSERDISPINFO *);
/**
* \struct CUVIDPARSERPARAMS
*/
typedef struct _CUVIDPARSERPARAMS
{
cudaVideoCodec CodecType; /**< cudaVideoCodec_XXX */
unsigned int ulMaxNumDecodeSurfaces; /**< Max # of decode surfaces (parser will cycle through these) */
unsigned int ulClockRate; /**< Timestamp units in Hz (0=default=10000000Hz) */
unsigned int ulErrorThreshold; /**< % Error threshold (0-100) for calling pfnDecodePicture (100=always call pfnDecodePicture even if picture bitstream is fully corrupted) */
unsigned int ulMaxDisplayDelay; /**< Max display queue delay (improves pipelining of decode with display) - 0=no delay (recommended values: 2..4) */
unsigned int uReserved1[5]; /**< Reserved for future use - set to 0 */
void *pUserData; /**< User data for callbacks */
PFNVIDSEQUENCECALLBACK pfnSequenceCallback; /**< Called before decoding frames and/or whenever there is a format change */
PFNVIDDECODECALLBACK pfnDecodePicture; /**< Called when a picture is ready to be decoded (decode order) */
PFNVIDDISPLAYCALLBACK pfnDisplayPicture; /**< Called whenever a picture is ready to be displayed (display order) */
void *pvReserved2[7]; /**< Reserved for future use - set to NULL */
CUVIDEOFORMATEX *pExtVideoInfo; /**< [Optional] sequence header data from system layer */
} CUVIDPARSERPARAMS;
/**
* \fn CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams)
*/
CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams);
/**
* \fn CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket)
*/
CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket);
/**
* \fn CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj)
*/
CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj);
/** @} */ /* END VIDEO_PARSER */
////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__cplusplus)
}
#endif /* __cplusplus */
#endif // __NVCUVID_H__

3219
compat/nvenc/nvEncodeAPI.h Normal file

File diff suppressed because it is too large Load Diff

241
configure vendored
View File

@ -108,7 +108,6 @@ Configuration options:
--enable-gray enable full grayscale support (slower color)
--disable-swscale-alpha disable alpha channel support in swscale
--disable-all disable building components, libraries and programs
--enable-incompatible-libav-abi enable incompatible Libav fork ABI [no]
--enable-raise-major increase major version numbers in sonames [no]
Program options:
@ -225,7 +224,6 @@ External library support:
and libraw1394 [no]
--enable-libebur128 enable libebur128 for EBU R128 measurement,
needed for loudnorm filter [no]
--enable-libfaac enable AAC encoding via libfaac [no]
--enable-libfdk-aac enable AAC de/encoding via libfdk-aac [no]
--enable-libflite enable flite (voice synthesis) support via libflite [no]
--enable-libfontconfig enable libfontconfig, useful for drawtext filter [no]
@ -245,6 +243,7 @@ External library support:
--enable-libopencv enable video filtering via libopencv [no]
--enable-libopenh264 enable H.264 encoding via OpenH264 [no]
--enable-libopenjpeg enable JPEG 2000 de/encoding via OpenJPEG [no]
--enable-libopenmpt enable decoding tracked files via libopenmpt [no]
--enable-libopus enable Opus de/encoding via libopus [no]
--enable-libpulse enable Pulseaudio input via libpulse [no]
--enable-librubberband enable rubberband needed for rubberband filter [no]
@ -290,7 +289,7 @@ External library support:
if gnutls is not used [no]
--disable-schannel disable SChannel SSP, needed for TLS support on
Windows if openssl and gnutls are not used [autodetect]
--disable-sdl disable sdl [autodetect]
--disable-sdl2 disable sdl2 [autodetect]
--disable-securetransport disable Secure Transport, needed for TLS support
on OSX if openssl and gnutls are not used [autodetect]
--enable-x11grab enable X11 grabbing (legacy) [no]
@ -306,7 +305,7 @@ External library support:
--enable-libmfx enable Intel MediaSDK (AKA Quick Sync Video) code via libmfx [no]
--enable-libnpp enable Nvidia Performance Primitives-based code [no]
--enable-mmal enable Broadcom Multi-Media Abstraction Layer (Raspberry Pi) via MMAL [no]
--enable-nvenc enable Nvidia video encoding code [no]
--disable-nvenc disable Nvidia video encoding code [autodetect]
--enable-omx enable OpenMAX IL code [no]
--enable-omx-rpi enable OpenMAX IL code for Raspberry Pi [no]
--disable-vaapi disable Video Acceleration API (mainly Unix/Intel) code [autodetect]
@ -984,7 +983,7 @@ check_ld(){
check_$type $($cflags_filter $flags) || return
flags=$($ldflags_filter $flags)
libs=$($ldflags_filter $libs)
check_cmd $ld $LDFLAGS $flags $(ld_o $TMPE) $TMPO $libs $extralibs
check_cmd $ld $LDFLAGS $LDEXEFLAGS $flags $(ld_o $TMPE) $TMPO $libs $extralibs
}
print_include(){
@ -1447,14 +1446,15 @@ COMPONENT_LIST="
"
EXAMPLE_LIST="
avio_reading_example
avio_dir_cmd_example
avio_reading_example
decoding_encoding_example
demuxing_decoding_example
extract_mvs_example
filter_audio_example
filtering_audio_example
filtering_video_example
http_multiclient_example
metadata_example
muxing_example
qsvdec_example
@ -1486,7 +1486,6 @@ EXTERNAL_LIBRARY_LIST="
libcelt
libdc1394
libebur128
libfaac
libfdk_aac
libflite
libfontconfig
@ -1505,6 +1504,7 @@ EXTERNAL_LIBRARY_LIST="
libopencv
libopenh264
libopenjpeg
libopenmpt
libopus
libpulse
librtmp
@ -1546,6 +1546,7 @@ EXTERNAL_LIBRARY_LIST="
openssl
schannel
sdl
sdl2
securetransport
videotoolbox
x11grab
@ -1644,7 +1645,6 @@ CONFIG_LIST="
$PROGRAM_LIST
$SUBSYSTEM_LIST
fontconfig
incompatible_libav_abi
memalign_hack
memory_poisoning
neon_clobber_test
@ -1797,7 +1797,6 @@ HAVE_LIST_CMDLINE="
HAVE_LIST_PUB="
bigendian
fast_unaligned
incompatible_libav_abi
"
HEADERS_LIST="
@ -1919,6 +1918,7 @@ SYSTEM_FUNCS="
isatty
jack_port_get_latency_range
kbhit
LoadLibrary
localtime_r
lstat
lzo1x_999_compress
@ -1935,7 +1935,6 @@ SYSTEM_FUNCS="
sched_getaffinity
SetConsoleTextAttribute
SetConsoleCtrlHandler
SetDllDirectory
setmode
setrlimit
Sleep
@ -2019,7 +2018,7 @@ HAVE_LIST="
MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS
perl
pod2man
sdl
sdl2
section_data_rel_ro
texi2html
threads
@ -2212,6 +2211,8 @@ setend_deps="arm"
map 'eval ${v}_inline_deps=inline_asm' $ARCH_EXT_LIST_ARM
loongson2_deps="mips"
loongson3_deps="mips"
mipsfpu_deps="mips"
mipsdsp_deps="mips"
mipsdspr2_deps="mips"
@ -2545,17 +2546,19 @@ videotoolbox_hwaccel_deps="videotoolbox pthreads"
videotoolbox_hwaccel_extralibs="-framework QuartzCore"
xvmc_deps="X11_extensions_XvMClib_h"
h263_cuvid_hwaccel_deps="cuda cuvid"
h263_vaapi_hwaccel_deps="vaapi"
h263_vaapi_hwaccel_select="h263_decoder"
h263_videotoolbox_hwaccel_deps="videotoolbox"
h263_videotoolbox_hwaccel_select="h263_decoder"
h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
h264_cuvid_hwaccel_deps="cuda cuvid CUVIDH264PICPARAMS"
h264_cuvid_hwaccel_deps="cuda cuvid"
h264_d3d11va_hwaccel_deps="d3d11va"
h264_d3d11va_hwaccel_select="h264_decoder"
h264_dxva2_hwaccel_deps="dxva2"
h264_dxva2_hwaccel_select="h264_decoder"
h264_mediacodec_decoder_deps="mediacodec"
h264_mediacodec_hwaccel_deps="mediacodec"
h264_mediacodec_decoder_select="h264_mp4toannexb_bsf h264_parser"
h264_mmal_decoder_deps="mmal"
h264_mmal_decoder_select="mmal"
@ -2576,9 +2579,12 @@ h264_vdpau_hwaccel_deps="vdpau"
h264_vdpau_hwaccel_select="h264_decoder"
h264_videotoolbox_hwaccel_deps="videotoolbox"
h264_videotoolbox_hwaccel_select="h264_decoder"
hevc_cuvid_hwaccel_deps="cuda cuvid CUVIDHEVCPICPARAMS"
hevc_cuvid_hwaccel_deps="cuda cuvid"
hevc_d3d11va_hwaccel_deps="d3d11va DXVA_PicParams_HEVC"
hevc_d3d11va_hwaccel_select="hevc_decoder"
hevc_mediacodec_decoder_deps="mediacodec"
hevc_mediacodec_hwaccel_deps="mediacodec"
hevc_mediacodec_decoder_select="hevc_mp4toannexb_bsf hevc_parser"
hevc_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_HEVC"
hevc_dxva2_hwaccel_select="hevc_decoder"
hevc_qsv_hwaccel_deps="libmfx"
@ -2586,10 +2592,12 @@ hevc_vaapi_hwaccel_deps="vaapi VAPictureParameterBufferHEVC"
hevc_vaapi_hwaccel_select="hevc_decoder"
hevc_vdpau_hwaccel_deps="vdpau VdpPictureInfoHEVC"
hevc_vdpau_hwaccel_select="hevc_decoder"
mjpeg_cuvid_hwaccel_deps="cuda cuvid"
mpeg_vdpau_decoder_deps="vdpau"
mpeg_vdpau_decoder_select="mpeg2video_decoder"
mpeg_xvmc_hwaccel_deps="xvmc"
mpeg_xvmc_hwaccel_select="mpeg2video_decoder"
mpeg1_cuvid_hwaccel_deps="cuda cuvid"
mpeg1_vdpau_decoder_deps="vdpau"
mpeg1_vdpau_decoder_select="mpeg1video_decoder"
mpeg1_vdpau_hwaccel_deps="vdpau"
@ -2599,6 +2607,7 @@ mpeg1_videotoolbox_hwaccel_select="mpeg1video_decoder"
mpeg1_xvmc_hwaccel_deps="xvmc"
mpeg1_xvmc_hwaccel_select="mpeg1video_decoder"
mpeg2_crystalhd_decoder_select="crystalhd"
mpeg2_cuvid_hwaccel_deps="cuda cuvid"
mpeg2_d3d11va_hwaccel_deps="d3d11va"
mpeg2_d3d11va_hwaccel_select="mpeg2video_decoder"
mpeg2_dxva2_hwaccel_deps="dxva2"
@ -2617,6 +2626,9 @@ mpeg2_videotoolbox_hwaccel_select="mpeg2video_decoder"
mpeg2_xvmc_hwaccel_deps="xvmc"
mpeg2_xvmc_hwaccel_select="mpeg2video_decoder"
mpeg4_crystalhd_decoder_select="crystalhd"
mpeg4_cuvid_hwaccel_deps="cuda cuvid"
mpeg4_mediacodec_decoder_deps="mediacodec"
mpeg4_mediacodec_hwaccel_deps="mediacodec"
mpeg4_mmal_decoder_deps="mmal"
mpeg4_mmal_decoder_select="mmal"
mpeg4_mmal_hwaccel_deps="mmal"
@ -2631,7 +2643,7 @@ mpeg4_videotoolbox_hwaccel_deps="videotoolbox"
mpeg4_videotoolbox_hwaccel_select="mpeg4_decoder"
msmpeg4_crystalhd_decoder_select="crystalhd"
vc1_crystalhd_decoder_select="crystalhd"
vc1_cuvid_hwaccel_deps="cuda cuvid CUVIDVC1PICPARAMS"
vc1_cuvid_hwaccel_deps="cuda cuvid"
vc1_d3d11va_hwaccel_deps="d3d11va"
vc1_d3d11va_hwaccel_select="vc1_decoder"
vc1_dxva2_hwaccel_deps="dxva2"
@ -2647,12 +2659,16 @@ vc1_vdpau_decoder_deps="vdpau"
vc1_vdpau_decoder_select="vc1_decoder"
vc1_vdpau_hwaccel_deps="vdpau"
vc1_vdpau_hwaccel_select="vc1_decoder"
vp8_cuvid_hwaccel_deps="cuda cuvid CUVIDVP9PICPARAMS"
vp9_cuvid_hwaccel_deps="cuda cuvid CUVIDVP9PICPARAMS"
vp8_cuvid_hwaccel_deps="cuda cuvid"
vp9_cuvid_hwaccel_deps="cuda cuvid"
vp8_mediacodec_decoder_deps="mediacodec"
vp8_mediacodec_hwaccel_deps="mediacodec"
vp9_d3d11va_hwaccel_deps="d3d11va DXVA_PicParams_VP9"
vp9_d3d11va_hwaccel_select="vp9_decoder"
vp9_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_VP9"
vp9_dxva2_hwaccel_select="vp9_decoder"
vp9_mediacodec_decoder_deps="mediacodec"
vp9_mediacodec_hwaccel_deps="mediacodec"
vp9_vaapi_hwaccel_deps="vaapi VADecPictureParameterBufferVP9"
vp9_vaapi_hwaccel_select="vp9_decoder"
wmv3_crystalhd_decoder_select="crystalhd"
@ -2672,8 +2688,11 @@ vaapi_encode_deps="vaapi"
hwupload_cuda_filter_deps="cuda"
scale_npp_filter_deps="cuda libnpp"
nvenc_deps_any="dlopen LoadLibrary"
nvenc_encoder_deps="nvenc"
h264_cuvid_decoder_deps="cuda cuvid CUVIDH264PICPARAMS"
h263_cuvid_decoder_deps="cuda cuvid"
h263_cuvid_decoder_select="h263_cuvid_hwaccel"
h264_cuvid_decoder_deps="cuda cuvid"
h264_cuvid_decoder_select="h264_mp4toannexb_bsf h264_cuvid_hwaccel"
h264_nvenc_encoder_deps="nvenc"
h264_qsv_decoder_deps="libmfx"
@ -2683,7 +2702,7 @@ h264_qsv_encoder_select="qsvenc"
h264_vaapi_encoder_deps="VAEncPictureParameterBufferH264"
h264_vaapi_encoder_select="vaapi_encode golomb"
hevc_cuvid_decoder_deps="cuda cuvid CUVIDHEVCPICPARAMS"
hevc_cuvid_decoder_deps="cuda cuvid"
hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf hevc_cuvid_hwaccel"
hevc_nvenc_encoder_deps="nvenc"
hevc_qsv_decoder_deps="libmfx"
@ -2692,18 +2711,26 @@ hevc_qsv_encoder_deps="libmfx"
hevc_qsv_encoder_select="qsvenc"
hevc_vaapi_encoder_deps="VAEncPictureParameterBufferHEVC"
hevc_vaapi_encoder_select="vaapi_encode golomb"
mjpeg_cuvid_decoder_deps="cuda cuvid"
mjpeg_cuvid_decoder_select="mjpeg_cuvid_hwaccel"
mpeg1_cuvid_decoder_deps="cuda cuvid"
mpeg1_cuvid_decoder_select="mpeg1_cuvid_hwaccel"
mpeg2_cuvid_decoder_deps="cuda cuvid"
mpeg2_cuvid_decoder_select="mpeg2_cuvid_hwaccel"
mpeg2_qsv_decoder_deps="libmfx"
mpeg2_qsv_decoder_select="qsvdec mpeg2_qsv_hwaccel"
mpeg2_qsv_encoder_deps="libmfx"
mpeg2_qsv_encoder_select="qsvenc"
mpeg4_cuvid_decoder_deps="cuda cuvid"
mpeg4_cuvid_decoder_select="mpeg4_cuvid_hwaccel"
nvenc_h264_encoder_deps="nvenc"
nvenc_hevc_encoder_deps="nvenc"
vc1_cuvid_decoder_deps="cuda cuvid CUVIDVC1PICPARAMS"
vc1_cuvid_decoder_deps="cuda cuvid"
vc1_cuvid_decoder_select="vc1_cuvid_hwaccel"
vp8_cuvid_decoder_deps="cuda cuvid CUVIDVP9PICPARAMS"
vp8_cuvid_decoder_deps="cuda cuvid"
vp8_cuvid_decoder_select="vp8_cuvid_hwaccel"
vp9_cuvid_decoder_deps="cuda cuvid CUVIDVP9PICPARAMS"
vp9_cuvid_decoder_deps="cuda cuvid"
vp9_cuvid_decoder_select="vp9_cuvid_hwaccel"
# parsers
@ -2747,8 +2774,6 @@ pcm_mulaw_at_encoder_select="audio_frame_queue"
chromaprint_muxer_deps="chromaprint"
h264_videotoolbox_encoder_deps="videotoolbox_encoder pthreads"
libcelt_decoder_deps="libcelt"
libfaac_encoder_deps="libfaac"
libfaac_encoder_select="audio_frame_queue"
libfdk_aac_decoder_deps="libfdk_aac"
libfdk_aac_encoder_deps="libfdk_aac"
libfdk_aac_encoder_select="audio_frame_queue"
@ -2767,9 +2792,12 @@ libopencore_amrnb_decoder_deps="libopencore_amrnb"
libopencore_amrnb_encoder_deps="libopencore_amrnb"
libopencore_amrnb_encoder_select="audio_frame_queue"
libopencore_amrwb_decoder_deps="libopencore_amrwb"
libopenh264_decoder_deps="libopenh264"
libopenh264_decoder_select="h264_mp4toannexb_bsf"
libopenh264_encoder_deps="libopenh264"
libopenjpeg_decoder_deps="libopenjpeg"
libopenjpeg_encoder_deps="libopenjpeg"
libopenmpt_demuxer_deps="libopenmpt"
libopus_decoder_deps="libopus"
libopus_encoder_deps="libopus"
libopus_encoder_select="audio_frame_queue"
@ -2827,6 +2855,7 @@ dv_muxer_select="dvprofile"
dxa_demuxer_select="riffdec"
eac3_demuxer_select="ac3_parser"
f4v_muxer_select="mov_muxer"
fifo_muxer_deps="threads"
flac_demuxer_select="flac_parser"
hds_muxer_select="flv_muxer"
hls_muxer_select="mpegts_muxer"
@ -2855,6 +2884,7 @@ nut_muxer_select="riffenc"
nuv_demuxer_select="riffdec"
oga_muxer_select="ogg_muxer"
ogg_demuxer_select="dirac_parse"
ogv_muxer_select="ogg_muxer"
opus_muxer_select="ogg_muxer"
psp_muxer_select="mov_muxer"
rtp_demuxer_select="sdp_demuxer"
@ -2918,7 +2948,7 @@ pulse_indev_deps="libpulse"
pulse_outdev_deps="libpulse"
qtkit_indev_extralibs="-framework QTKit -framework Foundation -framework QuartzCore"
qtkit_indev_select="qtkit"
sdl_outdev_deps="sdl"
sdl2_outdev_deps="sdl2"
sndio_indev_deps="sndio_h"
sndio_outdev_deps="sndio_h"
v4l_indev_deps="linux_videodev_h"
@ -3066,6 +3096,7 @@ tinterlace_filter_deps="gpl"
tinterlace_merge_test_deps="tinterlace_filter"
tinterlace_pad_test_deps="tinterlace_filter"
uspp_filter_deps="gpl avcodec"
vaguedenoiser_filter_deps="gpl"
vidstabdetect_filter_deps="libvidstab"
vidstabtransform_filter_deps="libvidstab"
zmq_filter_deps="libzmq"
@ -3074,15 +3105,15 @@ zscale_filter_deps="libzimg"
scale_vaapi_filter_deps="vaapi VAProcPipelineParameterBuffer"
# examples
avcodec_example_deps="avcodec avutil"
avio_dir_cmd="avformat avutil"
avio_reading="avformat avcodec avutil"
avio_dir_cmd_deps="avformat avutil"
avio_reading_deps="avformat avcodec avutil"
decoding_encoding_example_deps="avcodec avformat avutil"
demuxing_decoding_example_deps="avcodec avformat avutil"
extract_mvs_example_deps="avcodec avformat avutil"
filter_audio_example_deps="avfilter avutil"
filtering_audio_example_deps="avfilter avcodec avformat avutil"
filtering_video_example_deps="avfilter avcodec avformat avutil"
http_multiclient_example_deps="avformat avutil"
metadata_example_deps="avformat avutil"
muxing_example_deps="avcodec avformat avutil swscale"
qsvdec_example_deps="avcodec avutil libmfx h264_qsv_decoder vaapi_x11"
@ -3107,8 +3138,8 @@ ffmpeg_deps="avcodec avfilter avformat swresample"
ffmpeg_select="aformat_filter anull_filter atrim_filter format_filter
null_filter
setpts_filter trim_filter"
ffplay_deps="avcodec avformat swscale swresample sdl"
ffplay_libs='$sdl_libs'
ffplay_deps="avcodec avformat swscale swresample sdl2"
ffplay_libs='$sdl2_libs'
ffplay_select="rdft crop_filter transpose_filter hflip_filter vflip_filter rotate_filter"
ffprobe_deps="avcodec avformat"
ffserver_deps="avformat fork sarestart"
@ -3149,6 +3180,7 @@ objformat="elf"
pkg_config_default=pkg-config
ranlib_default="ranlib"
strip_default="strip"
version_script='--version-script'
yasmexe_default="yasm"
windres_default="windres"
@ -3194,7 +3226,7 @@ enable audiotoolbox
enable d3d11va dxva2 vaapi vda vdpau videotoolbox_hwaccel xvmc
enable xlib
enable vda_framework videotoolbox videotoolbox_encoder
enable nvenc vda_framework videotoolbox videotoolbox_encoder
# build settings
SHFLAGS='-shared -Wl,-soname,$$(@F)'
@ -3210,6 +3242,7 @@ SLIBNAME_WITH_MAJOR='$(SLIBNAME).$(LIBMAJOR)'
LIB_INSTALL_EXTRA_CMD='$$(RANLIB) "$(LIBDIR)/$(LIBNAME)"'
SLIB_INSTALL_NAME='$(SLIBNAME_WITH_VERSION)'
SLIB_INSTALL_LINKS='$(SLIBNAME_WITH_MAJOR) $(SLIBNAME)'
VERSION_SCRIPT_POSTPROCESS_CMD="cat"
asflags_filter=echo
cflags_filter=echo
@ -3545,6 +3578,8 @@ case "$toolchain" in
add_cppflags -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
add_cflags -fno-strict-overflow -fstack-protector-all
add_ldflags -Wl,-z,relro -Wl,-z,now
add_cflags -fPIE
add_ldexeflags -fPIE -pie
;;
?*)
die "Unknown toolchain $toolchain"
@ -3794,11 +3829,11 @@ suncc_flags(){
westmere) echo -xtarget=westmere ;;
silvermont) echo -xarch=sse4_2 ;;
corei7-avx|sandybridge) echo -xtarget=sandybridge ;;
core-avx*|ivybridge|haswell|broadwell)
core-avx*|ivybridge|haswell|broadwell|skylake*|knl)
echo -xarch=avx ;;
amdfam10|barcelona) echo -xtarget=barcelona ;;
btver1) echo -xarch=amdsse4a ;;
btver2|bdver*) echo -xarch=avx ;;
btver2|bdver*|znver*) echo -xarch=avx ;;
athlon-4|athlon-[mx]p) echo -xarch=ssea ;;
k8|opteron|athlon64|athlon-fx)
echo -xarch=sse2a ;;
@ -4202,7 +4237,7 @@ case "$arch" in
sh4|sh)
arch="sh4"
;;
sun4u|sparc*)
sun4*|sparc*)
arch="sparc"
;;
tilegx|tile-gx)
@ -4350,6 +4385,9 @@ elif enabled mips; then
enable fast_cmov
enable fast_unaligned
disable aligned_stack
disable mipsfpu
disable mipsdsp
disable mipsdspr2
case $cpu in
loongson3*)
cpuflags="-march=loongson3a -mhard-float -fno-expensive-optimizations"
@ -4498,7 +4536,8 @@ elif enabled x86; then
;;
# targets that do support nopl and conditional mov (cmov)
i686|pentiumpro|pentium[23]|pentium-m|athlon|athlon-tbird|athlon-4|athlon-[mx]p|athlon64*|k8*|opteron*|athlon-fx\
|core*|atom|bonnell|nehalem|westmere|silvermont|sandybridge|ivybridge|haswell|broadwell|amdfam10|barcelona|b[dt]ver*)
|core*|atom|bonnell|nehalem|westmere|silvermont|sandybridge|ivybridge|haswell|broadwell|skylake*|knl\
|amdfam10|barcelona|b[dt]ver*|znver*)
cpuflags="-march=$cpu"
enable i686
enable fast_cmov
@ -4632,7 +4671,8 @@ case $target_os in
echo "hwcap_1 = OVERRIDE;" > mapfile &&
add_ldflags -Wl,-M,mapfile
nm_default='nm -P -g'
SLIB_CREATE_DEF_CMD='$(Q)perl $(SRC_PATH)/compat/solaris/make_sunver.pl $$(filter %.ver,$$^) $(OBJS) | grep -v @ > $(SUBDIR)lib$(NAME).ver-sol2'
version_script='-M'
VERSION_SCRIPT_POSTPROCESS_CMD='perl $(SRC_PATH)/compat/solaris/make_sunver.pl - $(OBJS)'
;;
netbsd)
disable symver
@ -4673,6 +4713,8 @@ case $target_os in
{ check_cflags -mdynamic-no-pic && add_asflags -mdynamic-no-pic; }
check_header dispatch/dispatch.h &&
add_cppflags '-I\$(SRC_PATH)/compat/dispatch_semaphore'
version_script='-exported_symbols_list'
VERSION_SCRIPT_POSTPROCESS_CMD='tr " " "\n" | sed -n /global:/,/local:/p | grep ";" | tr ";" "\n" | sed -E "s/(.+)/_\1/g" | sed -E "s/(.+[^*])$$$$/\1*/"'
;;
msys*)
die "Native MSYS builds are discouraged, please use the MINGW environment."
@ -4770,6 +4812,7 @@ case $target_os in
objformat="win32"
enable dos_paths
enabled shared && ! enabled small && check_cmd $windres --version && enable gnu_windres
add_cppflags -D_POSIX_C_SOURCE=200112 -D_XOPEN_SOURCE=600
;;
*-dos|freedos|opendos)
network_extralibs="-lsocket"
@ -4907,6 +4950,8 @@ probe_libc(){
(__MINGW32_MAJOR_VERSION == 3 && __MINGW32_MINOR_VERSION >= 15)" ||
die "ERROR: MinGW32 runtime version must be >= 3.15."
add_${pfx}cppflags -U__STRICT_ANSI__ -D__USE_MINGW_ANSI_STDIO=1
check_${pfx}cpp_condition _mingw.h "defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0502" ||
add_${pfx}cppflags -D_WIN32_WINNT=0x0502
eval test \$${pfx_no_}cc_type = "gcc" &&
add_${pfx}cppflags -D__printf__=__gnu_printf__
elif check_${pfx}cpp_condition crtversion.h "defined _VC_CRT_MAJOR_VERSION"; then
@ -5034,7 +5079,6 @@ die_license_disabled gpl x11grab
die_license_disabled nonfree cuda
die_license_disabled nonfree cuvid
die_license_disabled nonfree libfaac
die_license_disabled nonfree libnpp
enabled gpl && die_license_disabled_gpl nonfree libfdk_aac
enabled gpl && die_license_disabled_gpl nonfree openssl
@ -5361,9 +5405,9 @@ check_code cc arm_neon.h "int16x8_t test = vdupq_n_s16(0)" && enable intrinsics_
check_ldflags -Wl,--as-needed
check_ldflags -Wl,-z,noexecstack
if check_func dlopen; then
if check_func dlopen && check_func dlsym; then
ldl=
elif check_func dlopen -ldl; then
elif check_func dlopen -ldl && check_func dlsym -ldl; then
ldl=-ldl
fi
@ -5372,7 +5416,7 @@ decklink_indev_extralibs="$decklink_indev_extralibs $ldl"
frei0r_filter_extralibs='$ldl'
frei0r_src_filter_extralibs='$ldl'
ladspa_filter_extralibs='$ldl'
nvenc_encoder_extralibs='$ldl'
nvenc_extralibs='$ldl'
coreimage_filter_extralibs="-framework QuartzCore -framework AppKit -framework OpenGL"
coreimagesrc_filter_extralibs="-framework QuartzCore -framework AppKit -framework OpenGL"
@ -5443,7 +5487,7 @@ check_func ${malloc_prefix}memalign && enable memalign
check_func ${malloc_prefix}posix_memalign && enable posix_memalign
check_func access
check_func arc4random
check_func_headers stdlib.h arc4random
check_func_headers time.h clock_gettime || { check_func_headers time.h clock_gettime -lrt && add_extralibs -lrt && LIBRT="-lrt"; }
check_func fcntl
check_func fork
@ -5476,11 +5520,11 @@ check_func_headers windows.h CoTaskMemFree -lole32
check_func_headers windows.h GetProcessAffinityMask
check_func_headers windows.h GetProcessTimes
check_func_headers windows.h GetSystemTimeAsFileTime
check_func_headers windows.h LoadLibrary
check_func_headers windows.h MapViewOfFile
check_func_headers windows.h PeekNamedPipe
check_func_headers windows.h SetConsoleTextAttribute
check_func_headers windows.h SetConsoleCtrlHandler
check_func_headers windows.h SetDllDirectory
check_func_headers windows.h Sleep
check_func_headers windows.h VirtualAlloc
check_struct windows.h "CONDITION_VARIABLE" Ptr
@ -5542,11 +5586,6 @@ check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
check_type "vdpau/vdpau.h" "VdpPictureInfoHEVC"
check_type "cuviddec.h" "CUVIDH264PICPARAMS"
check_type "cuviddec.h" "CUVIDHEVCPICPARAMS"
check_type "cuviddec.h" "CUVIDVC1PICPARAMS"
check_type "cuviddec.h" "CUVIDVP9PICPARAMS"
check_cpp_condition windows.h "!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)" && enable winrt || disable winrt
if ! disabled w32threads && ! enabled pthreads; then
@ -5624,14 +5663,16 @@ enabled avisynth && { { check_lib2 "windows.h" LoadLibrary; } ||
die "ERROR: LoadLibrary/dlopen not found for avisynth"; }
enabled cuda && { check_lib cuda.h cuInit -lcuda ||
die "ERROR: CUDA not found"; }
enabled cuvid && { check_lib cuviddec.h cuvidCreateDecoder -lnvcuvid ||
enabled cuvid && { add_cflags -I$source_path;
check_lib "compat/cuda/cuviddec.h" cuvidCreateDecoder -lnvcuvid ||
die "ERROR: CUVID not found"; } &&
{ enabled cuda ||
die "ERROR: CUVID requires CUDA"; }
enabled chromaprint && require chromaprint chromaprint.h chromaprint_get_version -lchromaprint
enabled coreimage_filter && { check_header_objcc QuartzCore/CoreImage.h || disable coreimage_filter; }
enabled coreimagesrc_filter && { check_header_objcc QuartzCore/CoreImage.h || disable coreimagesrc_filter; }
enabled decklink && { check_header DeckLinkAPI.h || die "ERROR: DeckLinkAPI.h header not found"; }
enabled decklink && { { check_header DeckLinkAPI.h || die "ERROR: DeckLinkAPI.h header not found"; } &&
{ check_cpp_condition DeckLinkAPIVersion.h "BLACKMAGIC_DECKLINK_API_VERSION >= 0x0a060100" || die "ERROR: Decklink API version must be >= 10.6.1."; } }
enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; }
enabled gmp && require2 gmp gmp.h mpz_export -lgmp
enabled gnutls && require_pkg_config gnutls gnutls/gnutls.h gnutls_global_init
@ -5647,7 +5688,6 @@ enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
enabled libcaca && require_pkg_config caca caca.h caca_create_canvas
enabled libebur128 && require ebur128 ebur128.h ebur128_relative_threshold -lebur128
enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaac
enabled libfdk_aac && { use_pkg_config fdk-aac "fdk-aac/aacenc_lib.h" aacEncOpen ||
{ require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac &&
warn "using libfdk without pkg-config"; } }
@ -5681,6 +5721,7 @@ enabled libopenjpeg && { { check_lib2 openjpeg-2.1/openjpeg.h opj_version
{ check_lib2 openjpeg-1.5/openjpeg.h opj_version -lopenjpeg -DOPJ_STATIC && add_cppflags -DOPJ_STATIC; } ||
{ check_lib2 openjpeg.h opj_version -lopenjpeg -DOPJ_STATIC && add_cppflags -DOPJ_STATIC; } ||
die "ERROR: libopenjpeg not found"; }
enabled libopenmpt && require_pkg_config "libopenmpt >= 0.2.6557" libopenmpt/libopenmpt.h openmpt_module_create
enabled libopus && require_pkg_config opus opus_multistream.h opus_multistream_decoder_create
enabled libpulse && require_pkg_config libpulse pulse/pulseaudio.h pa_context_new
enabled librtmp && require_pkg_config librtmp librtmp/rtmp.h RTMP_Socket
@ -5761,10 +5802,6 @@ enabled mmal && { check_lib interface/mmal/mmal.h mmal_port_connect
enabled mmal && check_func_headers interface/mmal/mmal.h "MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS"
enabled netcdf && require_pkg_config netcdf netcdf.h nc_inq_libvers
enabled nvenc && { check_header nvEncodeAPI.h || die "ERROR: nvEncodeAPI.h not found."; } &&
{ check_cpp_condition nvEncodeAPI.h "NVENCAPI_MAJOR_VERSION >= 6" ||
die "ERROR: NVENC API version 5 or older is not supported"; } &&
{ [ $target_os != cygwin ] || die "ERROR: NVENC is not supported on Cygwin currently."; }
enabled openal && { { for al_libs in "${OPENAL_LIBS}" "-lopenal" "-lOpenAL32"; do
check_lib 'AL/al.h' alGetError "${al_libs}" && break; done } ||
die "ERROR: openal not found"; } &&
@ -5788,7 +5825,8 @@ enabled omx && { check_header OMX_Core.h ||
add_cflags -isystem/opt/vc/include/IL ; }
check_header OMX_Core.h ; } ||
die "ERROR: OpenMAX IL headers not found"; }
enabled openssl && { use_pkg_config openssl openssl/ssl.h SSL_library_init ||
enabled openssl && { use_pkg_config openssl openssl/ssl.h OPENSSL_init_ssl ||
use_pkg_config openssl openssl/ssl.h SSL_library_init ||
check_lib openssl/ssl.h SSL_library_init -lssl -lcrypto ||
check_lib openssl/ssl.h SSL_library_init -lssl32 -leay32 ||
check_lib openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 ||
@ -5817,31 +5855,27 @@ if enabled gcrypt; then
fi
fi
if ! disabled sdl; then
SDL_CONFIG="${cross_prefix}sdl-config"
if check_pkg_config sdl SDL_events.h SDL_PollEvent; then
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags &&
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) < 0x010300" $sdl_cflags &&
enable sdl
disabled sdl && disable sdl2
if ! disabled sdl2; then
SDL2_CONFIG="${cross_prefix}sdl2-config"
if check_pkg_config sdl2 SDL_events.h SDL_PollEvent; then
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x020001" $sdl2_cflags &&
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) < 0x020100" $sdl2_cflags &&
check_func SDL_Init $sdl2_libs $sdl2_cflags && enable sdl2
else
if "${SDL_CONFIG}" --version > /dev/null 2>&1; then
sdl_cflags=$("${SDL_CONFIG}" --cflags)
sdl_libs=$("${SDL_CONFIG}" --libs)
check_func_headers SDL_version.h SDL_Linked_Version $sdl_cflags $sdl_libs &&
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags &&
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) < 0x010300" $sdl_cflags &&
enable sdl
elif enabled sdl ; then
die "ERROR: SDL not found"
else
disable sdl
fi
if "${SDL2_CONFIG}" --version > /dev/null 2>&1; then
sdl2_cflags=$("${SDL2_CONFIG}" --cflags)
sdl2_libs=$("${SDL2_CONFIG}" --libs)
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x020001" $sdl2_cflags &&
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) < 0x020100" $sdl2_cflags &&
check_func SDL_Init $sdl2_libs $sdl2_cflags && enable sdl2
fi
fi
if test $target_os = "mingw32"; then
sdl_libs="$sdl_libs -mconsole"
sdl2_libs="$sdl2_libs -mconsole"
fi
fi
enabled sdl && add_cflags $sdl_cflags && add_extralibs $sdl_libs
enabled sdl2 && add_cflags $sdl2_cflags && add_extralibs $sdl2_libs
disabled securetransport || { check_func SecIdentityCreate "-Wl,-framework,CoreFoundation -Wl,-framework,Security" &&
check_lib2 "Security/SecureTransport.h Security/Security.h" "SSLCreateContext SecItemImport" "-Wl,-framework,CoreFoundation -Wl,-framework,Security" &&
@ -5980,6 +6014,26 @@ enabled vdpau && enabled xlib &&
check_lib2 "vdpau/vdpau.h vdpau/vdpau_x11.h" vdp_device_create_x11 -lvdpau &&
enable vdpau_x11
if enabled x86; then
case $target_os in
mingw32*|mingw64*|win32|win64|linux|cygwin*)
;;
*)
disable nvenc
;;
esac
else
disable nvenc
fi
enabled nvenc &&
check_cc -I$source_path <<EOF || disable nvenc
#include "compat/nvenc/nvEncodeAPI.h"
NV_ENCODE_API_FUNCTION_LIST flist;
void f(void) { struct { const GUID guid; } s[] = { { NV_ENC_PRESET_HQ_GUID } }; }
int main(void) { return 0; }
EOF
# Funny iconv installations are not unusual, so check it after all flags have been set
disabled iconv || check_func_headers iconv.h iconv || check_lib2 iconv.h iconv -liconv || disable iconv
@ -6030,6 +6084,10 @@ enabled neon_clobber_test &&
-Wl,--wrap,avcodec_decode_subtitle2 \
-Wl,--wrap,avcodec_encode_audio2 \
-Wl,--wrap,avcodec_encode_video2 \
-Wl,--wrap,avcodec_send_packet \
-Wl,--wrap,avcodec_receive_frame \
-Wl,--wrap,avcodec_send_frame \
-Wl,--wrap,avcodec_receive_packet \
-Wl,--wrap,avcodec_encode_subtitle \
-Wl,--wrap,swr_convert \
-Wl,--wrap,avresample_convert ||
@ -6043,26 +6101,35 @@ enabled xmm_clobber_test &&
-Wl,--wrap,avcodec_encode_audio2 \
-Wl,--wrap,avcodec_encode_video2 \
-Wl,--wrap,avcodec_encode_subtitle \
-Wl,--wrap,avcodec_send_packet \
-Wl,--wrap,avcodec_receive_frame \
-Wl,--wrap,avcodec_send_frame \
-Wl,--wrap,avcodec_receive_packet \
-Wl,--wrap,swr_convert \
-Wl,--wrap,avresample_convert \
-Wl,--wrap,sws_scale ||
disable xmm_clobber_test
echo "X { local: *; };" > $TMPV
if test_ldflags -Wl,--version-script,$TMPV; then
append SHFLAGS '-Wl,--version-script,\$(SUBDIR)lib\$(NAME).ver'
elif test_ldflags -Wl,-M,$TMPV; then
append SHFLAGS '-Wl,-M,\$(SUBDIR)lib\$(NAME).ver-sol2'
fi
check_ld "cc" <<EOF && enable proper_dce
extern const int array[512];
static inline int func(void) { return array[0]; }
int main(void) { return 0; }
EOF
check_cc <<EOF && enable symver_asm_label
if enabled proper_dce; then
echo "X { local: *; };" > $TMPV
if test_ldflags -Wl,${version_script},$TMPV; then
append SHFLAGS '-Wl,${version_script},\$(SUBDIR)lib\$(NAME).ver'
check_cc <<EOF && enable symver_asm_label
void ff_foo(void) __asm__ ("av_foo@VERSION");
void ff_foo(void) { ${inline_asm+__asm__($quotes);} }
EOF
check_cc <<EOF && enable symver_gnu_asm
check_cc <<EOF && enable symver_gnu_asm
__asm__(".symver ff_foo,av_foo@VERSION");
void ff_foo(void) {}
EOF
fi
fi
if [ -z "$optflags" ]; then
if enabled small; then
@ -6145,6 +6212,7 @@ elif enabled llvm_gcc; then
check_cflags -mllvm -stack-alignment=16
elif enabled clang; then
check_cflags -mllvm -stack-alignment=16
check_cflags -mstack-alignment=16
check_cflags -Qunused-arguments
check_cflags -Werror=implicit-function-declaration
check_cflags -Werror=missing-prototypes
@ -6197,6 +6265,10 @@ __declspec($_restrict) void* foo(int);
EOF
fi
check_func strtoll || add_cflags -Dstrtoll=_strtoi64
# the new SSA optimzer in VS2015 U3 is mis-optimizing some parts of the code
# this flag should be re-checked on newer compiler releases and put under a
# version check once its fixed
check_cflags -d2SSAOptimizer-
fi
for pfx in "" host_; do
@ -6413,7 +6485,7 @@ echo "new filter support ${avfilter-no}"
echo "network support ${network-no}"
echo "threading support ${thread_type-no}"
echo "safe bitstream reader ${safe_bitstream_reader-no}"
echo "SDL support ${sdl-no}"
echo "SDL2 support ${sdl2-no}"
echo "opencl enabled ${opencl-no}"
echo "JNI support ${jni-no}"
echo "texi2html enabled ${texi2html-no}"
@ -6573,7 +6645,7 @@ HOSTLD_O=$HOSTLD_O
TARGET_EXEC=$target_exec $target_exec_args
TARGET_PATH=$target_path
TARGET_SAMPLES=${target_samples:-\$(SAMPLES)}
CFLAGS-ffplay=$sdl_cflags
CFLAGS-ffplay=${sdl2_cflags}
ZLIB=$($ldflags_filter -lz)
LIB_INSTALL_EXTRA_CMD=$LIB_INSTALL_EXTRA_CMD
EXTRALIBS=$extralibs
@ -6590,6 +6662,7 @@ SLIB_INSTALL_NAME=${SLIB_INSTALL_NAME}
SLIB_INSTALL_LINKS=${SLIB_INSTALL_LINKS}
SLIB_INSTALL_EXTRA_LIB=${SLIB_INSTALL_EXTRA_LIB}
SLIB_INSTALL_EXTRA_SHLIB=${SLIB_INSTALL_EXTRA_SHLIB}
VERSION_SCRIPT_POSTPROCESS_CMD=${VERSION_SCRIPT_POSTPROCESS_CMD}
SAMPLES:=${samples:-\$(FATE_SAMPLES)}
NOREDZONE_FLAGS=$noredzone_flags
EOF

View File

@ -15,6 +15,63 @@ libavutil: 2015-08-28
API changes, most recent first:
-------- 8< --------- FFmpeg 3.2 was cut here -------- 8< ---------
2016-10-24 - 73ead47 - lavf 57.55.100 - avformat.h
Add AV_DISPOSITION_TIMED_THUMBNAILS
2016-10-24 - a246fef - lavf 57.54.100 - avformat.h
Add avformat_init_output() and AVSTREAM_INIT_IN_ macros
2016-10-22 - f5495c9 - lavu 55.33.100 - avassert.h
Add av_assert0_fpu() / av_assert2_fpu()
2016-10-07 - 3f9137c / 32c8359 - lavc 57.61.100 / 57.24.0 - avcodec.h
Decoders now export the frame timestamp as AVFrame.pts. It was
previously exported as AVFrame.pkt_pts, which is now deprecated.
Note: When decoding, AVFrame.pts uses the stream/packet timebase,
and not the codec timebase.
2016-09-28 - eba0414 - lavu 55.32.100 / 55.16.0 - hwcontext.h hwcontext_qsv.h
Add AV_HWDEVICE_TYPE_QSV and a new installed header with QSV-specific
hwcontext definitions.
2016-09-26 - 32c25f0 - lavc 57.59.100 / 57.23.0 - avcodec.h
AVCodecContext.hw_frames_ctx now may be used by decoders.
2016-09-27 - f0b6f72 - lavf 57.51.100 - avformat.h
Add av_stream_get_codec_timebase()
2016-09-27 - 23c0779 - lswr 2.2.100 - swresample.h
Add swr_build_matrix().
2016-09-23 - 30d3e36 - lavc 57.58.100 - avcodec.h
Add AV_CODEC_CAP_AVOID_PROBING codec capability flag.
2016-09-14 - ae1dd0c - lavf 57.49.100 - avformat.h
Add avformat_transfer_internal_stream_timing_info helper to help with stream
copy.
2016-08-29 - 4493390 - lavfi 6.58.100 - avfilter.h
Add AVFilterContext.nb_threads.
2016-08-15 - c3c4c72 - lavc 57.53.100 - avcodec.h
Add trailing_padding to AVCodecContext to match the corresponding
field in AVCodecParameters.
2016-08-15 - b746ed7 - lavc 57.52.100 - avcodec.h
Add a new API for chained BSF filters and passthrough (null) BSF --
av_bsf_list_alloc(), av_bsf_list_free(), av_bsf_list_append(),
av_bsf_list_append2(), av_bsf_list_finalize(), av_bsf_list_parse_str()
and av_bsf_get_null_filter().
2016-08-04 - 82a33c8 - lavf 57.46.100 - avformat.h
Add av_get_frame_filename2()
2016-07-09 - 775389f / 90f469a - lavc 57.50.100 / 57.20.0 - avcodec.h
Add FF_PROFILE_H264_MULTIVIEW_HIGH and FF_PROFILE_H264_STEREO_HIGH.
2016-06-30 - c1c7e0ab - lavf 57.41.100 - avformat.h
Moved codecpar field from AVStream to the end of the struct, so that
the following private fields are in the same location as in FFmpeg 3.0 (lavf 57.25.100).

File diff suppressed because it is too large Load Diff

View File

@ -38,13 +38,13 @@ DOCS = $(DOCS-yes)
DOC_EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
DOC_EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
DOC_EXAMPLES-$(CONFIG_HTTP_MULTICLIENT_EXAMPLE) += http_multiclient
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
DOC_EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
@ -125,7 +125,7 @@ $(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
OBJDIRS += doc/examples
DOXY_INPUT = $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c)
DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT))
DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) config.mak
doc/doxy/html: TAG = DOXY
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)

View File

@ -18,7 +18,7 @@ comma-separated list of filters, whose parameters follow the filter
name after a '='.
@example
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1:opt2=str2][,filter2] OUTPUT
@end example
Below is a description of the currently available bitstream filters,

View File

@ -1049,7 +1049,31 @@ Possible values:
@item rc_max_vbv_use @var{float} (@emph{encoding,video})
@item rc_min_vbv_use @var{float} (@emph{encoding,video})
@item ticks_per_frame @var{integer} (@emph{decoding/encoding,audio,video})
@item color_primaries @var{integer} (@emph{decoding/encoding,video})
Possible values:
@table @samp
@item bt709
BT.709
@item bt470m
BT.470 M
@item bt470bg
BT.470 BG
@item smpte170m
SMPTE 170 M
@item smpte240m
SMPTE 240 M
@item film
Film
@item bt2020
BT.2020
@item smpte428_1
SMPTE ST 428-1
@item smpte431
SMPTE 431-2
@item smpte432
SMPTE 432-1
@end table
@item color_trc @var{integer} (@emph{decoding/encoding,video})
Possible values:
@ -1060,29 +1084,58 @@ BT.709
BT.470 M
@item gamma28
BT.470 BG
@item linear
@item smpte170m
SMPTE 170 M
@item log
@item smpte240m
SMPTE 240 M
@item log_sqrt
@item linear
Linear
@item iec61966_2_4
@item log
Log
@item bt1361
@item log_sqrt
Log square root
@item iec61966_2_1
@item iec61966_2_4
IEC 61966-2-4
@item bt2020_10bit
@item bt1361
BT.1361
@item bt2020_12bit
@item iec61966_2_1
IEC 61966-2-1
@item smpte2084
@item bt2020_10bit
BT.2020 - 10 bit
@item smpte428_1
@item bt2020_12bit
BT.2020 - 12 bit
@item smpte2084
SMPTE ST 2084
@item smpte428_1
SMPTE ST 428-1
@item arib-std-b67
ARIB STD-B67
@end table
@item colorspace @var{integer} (@emph{decoding/encoding,video})
Possible values:
@table @samp
@item rgb
RGB
@item bt709
BT.709
@item fcc
FCC
@item bt470bg
BT.470 BG
@item smpte170m
SMPTE 170 M
@item smpte240m
SMPTE 240 M
@item ycocg
YCOCG
@item bt2020_ncl
BT.2020 NCL
@item bt2020_cl
BT.2020 CL
@item smpte2085
SMPTE 2085
@end table
@item color_range @var{integer} (@emph{decoding/encoding,video})
If used as input parameter, it serves as a hint to the decoder, which

View File

@ -279,7 +279,7 @@ present between the subtitle lines because of double-sized teletext charactes.
Default value is 1.
@item txt_duration
Sets the display duration of the decoded teletext pages or subtitles in
miliseconds. Default value is 30000 which is 30 seconds.
milliseconds. Default value is 30000 which is 30 seconds.
@item txt_transparent
Force transparent background of the generated teletext bitmaps. Default value
is 0 which means an opaque background.

View File

@ -254,19 +254,6 @@ This demuxer is used to demux FLV files and RTMP network streams.
Allocate the streams according to the onMetaData array content.
@end table
@section libgme
The Game Music Emu library is a collection of video game music file emulators.
See @url{http://code.google.com/p/game-music-emu/} for more information.
Some files have multiple tracks. The demuxer will pick the first track by
default. The @option{track_index} option can be used to select a different
track. Track indexes start at 0. The demuxer exports the number of tracks as
@var{tracks} meta data entry.
For very large files, the @option{max_size} option may have to be adjusted.
@section gif
Animated GIF demuxer.
@ -441,6 +428,46 @@ ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
@end example
@end itemize
@section libgme
The Game Music Emu library is a collection of video game music file emulators.
See @url{http://code.google.com/p/game-music-emu/} for more information.
Some files have multiple tracks. The demuxer will pick the first track by
default. The @option{track_index} option can be used to select a different
track. Track indexes start at 0. The demuxer exports the number of tracks as
@var{tracks} meta data entry.
For very large files, the @option{max_size} option may have to be adjusted.
@section libopenmpt
libopenmpt based module demuxer
See @url{https://lib.openmpt.org/libopenmpt/} for more information.
Some files have multiple subsongs (tracks) this can be set with the @option{subsong}
option.
It accepts the following options:
@table @option
@item subsong
Set the subsong index. This can be either 'all', 'auto', or the index of the
subsong. Subsong indexes start at 0. The default is 'auto'.
The default value is to let libopenmpt choose.
@item layout
Set the channel layout. Valid values are 1, 2, and 4 channel layouts.
The default value is STEREO.
@item sample_rate
Set the sample rate for libopenmpt to output.
Range is from 1000 to INT_MAX. The value default is 48000.
@end table
@section mov/mp4/3gp/QuickTime
QuickTime / MP4 demuxer.

View File

@ -246,8 +246,8 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
@section Development Policy
@enumerate
@item
@subsection Patches/Committing
@subheading Licenses for patches must be compatible with FFmpeg.
Contributions should be licensed under the
@uref{http://www.gnu.org/licenses/lgpl-2.1.html, LGPL 2.1},
including an "or any later version" clause, or, if you prefer
@ -260,15 +260,15 @@ preferred.
If you add a new file, give it a proper license header. Do not copy and
paste it from a random place, use an existing file as template.
@item
You must not commit code which breaks FFmpeg! (Meaning unfinished but
enabled code which breaks compilation or compiles but does not work or
breaks the regression tests)
You can commit unfinished stuff (for testing etc), but it must be disabled
(#ifdef etc) by default so it does not interfere with other developers'
work.
@subheading You must not commit code which breaks FFmpeg!
This means unfinished code which is enabled and breaks compilation,
or compiles but does not work/breaks the regression tests. Code which
is unfinished but disabled may be permitted under-circumstances, like
missing samples or an implementation with a small subset of features.
Always check the mailing list for any reviewers with issues and test
FATE before you push.
@item
@subheading Keep the main commit message short with an extended description below.
The commit message should have a short first line in the form of
a @samp{topic: short description} as a header, separated by a newline
from the body consisting of an explanation of why the change is necessary.
@ -276,30 +276,24 @@ If the commit fixes a known bug on the bug tracker, the commit message
should include its bug ID. Referring to the issue on the bug tracker does
not exempt you from writing an excerpt of the bug in the commit message.
@item
You do not have to over-test things. If it works for you, and you think it
should work for others, then commit. If your code has problems
(portability, triggers compiler bugs, unusual environment etc) they will be
reported and eventually fixed.
@subheading Testing must be adequate but not excessive.
If it works for you, others, and passes FATE then it should be OK to commit
it, provided it fits the other committing criteria. You should not worry about
over-testing things. If your code has problems (portability, triggers
compiler bugs, unusual environment etc) they will be reported and eventually
fixed.
@item
Do not commit unrelated changes together, split them into self-contained
pieces. Also do not forget that if part B depends on part A, but A does not
depend on B, then A can and should be committed first and separate from B.
Keeping changes well split into self-contained parts makes reviewing and
understanding them on the commit log mailing list easier. This also helps
in case of debugging later on.
@subheading Do not commit unrelated changes together.
They should be split them into self-contained pieces. Also do not forget
that if part B depends on part A, but A does not depend on B, then A can
and should be committed first and separate from B. Keeping changes well
split into self-contained parts makes reviewing and understanding them on
the commit log mailing list easier. This also helps in case of debugging
later on.
Also if you have doubts about splitting or not splitting, do not hesitate to
ask/discuss it on the developer mailing list.
@item
Do not change behavior of the programs (renaming options etc) or public
API or ABI without first discussing it on the ffmpeg-devel mailing list.
Do not remove functionality from the code. Just improve!
Note: Redundant code can be removed.
@item
@subheading Ask before you change the build system (configure, etc).
Do not commit changes to the build system (Makefiles, configure script)
which change behavior, defaults etc, without asking first. The same
applies to compiler warning fixes, trivial looking fixes and to code
@ -308,7 +302,7 @@ the way we do. Send your changes as patches to the ffmpeg-devel mailing
list, and if the code maintainers say OK, you may commit. This does not
apply to files you wrote and/or maintain.
@item
@subheading Cosmetic changes should be kept in separate patches.
We refuse source indentation and other cosmetic changes if they are mixed
with functional changes, such commits will be rejected and removed. Every
developer has his own indentation style, you should not change it. Of course
@ -322,7 +316,7 @@ NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
then either do NOT change the indentation of the inner part within (do not
move it to the right)! or do so in a separate commit
@item
@subheading Commit messages should always be filled out properly.
Always fill out the commit log message. Describe in a few lines what you
changed and why. You can refer to mailing list postings if you fix a
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
@ -334,47 +328,31 @@ area changed: Short 1 line description
details describing what and why and giving references.
@end example
@item
@subheading Credit the author of the patch.
Make sure the author of the commit is set correctly. (see git commit --author)
If you apply a patch, send an
answer to ffmpeg-devel (or wherever you got the patch from) saying that
you applied the patch.
@item
@subheading Complex patches should refer to discussion surrounding them.
When applying patches that have been discussed (at length) on the mailing
list, reference the thread in the log message.
@item
@subheading Always wait long enough before pushing changes
Do NOT commit to code actively maintained by others without permission.
Send a patch to ffmpeg-devel instead. If no one answers within a reasonable
timeframe (12h for build failures and security fixes, 3 days small changes,
Send a patch to ffmpeg-devel. If no one answers within a reasonable
time-frame (12h for build failures and security fixes, 3 days small changes,
1 week for big patches) then commit your patch if you think it is OK.
Also note, the maintainer can simply ask for more time to review!
@item
Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
are sent there and reviewed by all the other developers. Bugs and possible
improvements or general questions regarding commits are discussed there. We
expect you to react if problems with your code are uncovered.
@subsection Code
@subheading API/ABI changes should be discussed before they are made.
Do not change behavior of the programs (renaming options etc) or public
API or ABI without first discussing it on the ffmpeg-devel mailing list.
Do not remove widely used functionality or features (redundant code can be removed).
@item
Update the documentation if you change behavior or add features. If you are
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
maintainer(s) will review and commit your stuff.
@item
Try to keep important discussions and requests (also) on the public
developer mailing list, so that all developers can benefit from them.
@item
Never write to unallocated memory, never write over the end of arrays,
always check values read from some untrusted source before using them
as array index or other risky things.
@item
Remember to check if you need to bump versions for the specific libav*
parts (libavutil, libavcodec, libavformat) you are changing. You need
to change the version integer.
@subheading Remember to check if you need to bump versions for libav*.
Depending on the change, you may need to change the version integer.
Incrementing the first component means no backward compatibility to
previous versions (e.g. removal of a function from the public API).
Incrementing the second component means backward compatible change
@ -384,7 +362,7 @@ Incrementing the third component means a noteworthy binary compatible
change (e.g. encoder bug fix that matters for the decoder). The third
component always starts at 100 to distinguish FFmpeg from Libav.
@item
@subheading Warnings for correct code may be disabled if there is no other option.
Compiler warnings indicate potential bugs or code with bad style. If a type of
warning always points to correct and clean code, that warning should
be disabled, not the code changed.
@ -393,13 +371,33 @@ If it is a bug, the bug has to be fixed. If it is not, the code should
be changed to not generate a warning unless that causes a slowdown
or obfuscates the code.
@item
@subheading Check untrusted input properly.
Never write to unallocated memory, never write over the end of arrays,
always check values read from some untrusted source before using them
as array index or other risky things.
@subsection Documentation/Other
@subheading Subscribe to the ffmpeg-cvslog mailing list.
It is important to do this as the diffs of all commits are sent there and
reviewed by all the other developers. Bugs and possible improvements or
general questions regarding commits are discussed there. We expect you to
react if problems with your code are uncovered.
@subheading Keep the documentation up to date.
Update the documentation if you change behavior or add features. If you are
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
maintainer(s) will review and commit your stuff.
@subheading Important discussions should be accessible to all.
Try to keep important discussions and requests (also) on the public
developer mailing list, so that all developers can benefit from them.
@subheading Check your entries in MAINTAINERS.
Make sure that no parts of the codebase that you maintain are missing from the
@file{MAINTAINERS} file. If something that you want to maintain is missing add it with
your name after it.
If at some point you no longer want to maintain some code, then please help in
finding a new maintainer and also don't forget to update the @file{MAINTAINERS} file.
@end enumerate
We think our rules are not too hard. If you have comments, contact us.
@ -466,7 +464,11 @@ Patches should be posted to the
mailing list. Use @code{git send-email} when possible since it will properly
send patches without requiring extra care. If you cannot, then send patches
as base64-encoded attachments, so your patch is not trashed during
transmission.
transmission. Also ensure the correct mime type is used
(text/x-diff or text/x-patch or at least text/plain) and that only one
patch is inline or attached per mail.
You can check @url{https://patchwork.ffmpeg.org}, if your patch does not show up, its mime type
likely was wrong.
Your patch will be reviewed on the mailing list. You will likely be asked
to make some changes and are expected to send in an improved version that

View File

@ -61,8 +61,9 @@ Two loop searching (TLS) method.
This method first sets quantizers depending on band thresholds and then tries
to find an optimal combination by adding or subtracting a specific value from
all quantizers and adjusting some individual quantizer a little.
Will tune itself based on whether aac_is/aac_ms/aac_pns are enabled.
all quantizers and adjusting some individual quantizer a little. Will tune
itself based on whether @option{aac_is}, @option{aac_ms} and @option{aac_pns}
are enabled.
This is the default choice for a coder.
@item anmr
@ -84,7 +85,7 @@ Not recommended.
@end table
@item aac_ms
Sets mid/side coding mode. The default value of auto will automatically use
Sets mid/side coding mode. The default value of "auto" will automatically use
M/S with bands which will benefit from such coding. Can be forced for all bands
using the value "enable", which is mainly useful for debugging or disabled using
"disable".
@ -130,19 +131,19 @@ The default, AAC "Low-complexity" profile. Is the most compatible and produces
decent quality.
@item mpeg2_aac_low
Equivalent to -profile:a aac_low -aac_pns 0. PNS was introduced with the MPEG4
specifications.
Equivalent to @code{-profile:a aac_low -aac_pns 0}. PNS was introduced with the
MPEG4 specifications.
@item aac_ltp
Long term prediction profile, is enabled by and will enable the aac_ltp option.
Introduced in MPEG4.
Long term prediction profile, is enabled by and will enable the @option{aac_ltp}
option. Introduced in MPEG4.
@item aac_main
Main-type prediction profile, is enabled by and will enable the aac_pred option.
Introduced in MPEG2.
Main-type prediction profile, is enabled by and will enable the @option{aac_pred}
option. Introduced in MPEG2.
If this option is unspecified it is set to @samp{aac_low}.
@end table
If this option is unspecified it is set to @samp{aac_low}.
@end table
@section ac3 and ac3_fixed
@ -611,111 +612,6 @@ and slightly improves compression.
@end table
@anchor{libfaac}
@section libfaac
libfaac AAC (Advanced Audio Coding) encoder wrapper.
This encoder is of much lower quality and is more unstable than any other AAC
encoders, so it's highly recommended to instead use other encoders, like
@ref{aacenc,,the native FFmpeg AAC encoder}.
This encoder also requires the presence of the libfaac headers and library
during configuration. You need to explicitly configure the build with
@code{--enable-libfaac --enable-nonfree}.
@subsection Options
The following shared FFmpeg codec options are recognized.
The following options are supported by the libfaac wrapper. The
@command{faac}-equivalent of the options are listed in parentheses.
@table @option
@item b (@emph{-b})
Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
is not explicitly specified, it is automatically set to a suitable
value depending on the selected profile. @command{faac} bitrate is
expressed in kilobits/s.
Note that libfaac does not support CBR (Constant Bit Rate) but only
ABR (Average Bit Rate).
If VBR mode is enabled this option is ignored.
@item ar (@emph{-R})
Set audio sampling rate (in Hz).
@item ac (@emph{-c})
Set the number of audio channels.
@item cutoff (@emph{-C})
Set cutoff frequency. If not specified (or explicitly set to 0) it
will use a value automatically computed by the library. Default value
is 0.
@item profile
Set audio profile.
The following profiles are recognized:
@table @samp
@item aac_main
Main AAC (Main)
@item aac_low
Low Complexity AAC (LC)
@item aac_ssr
Scalable Sample Rate (SSR)
@item aac_ltp
Long Term Prediction (LTP)
@end table
If not specified it is set to @samp{aac_low}.
@item flags +qscale
Set constant quality VBR (Variable Bit Rate) mode.
@item global_quality
Set quality in VBR mode as an integer number of lambda units.
Only relevant when VBR mode is enabled with @code{flags +qscale}. The
value is converted to QP units by dividing it by @code{FF_QP2LAMBDA},
and used to set the quality value used by libfaac. A reasonable range
for the option value in QP units is [10-500], the higher the value the
higher the quality.
@item q (@emph{-q})
Enable VBR mode when set to a non-negative value, and set constant
quality value as a double floating point value in QP units.
The value sets the quality value used by libfaac. A reasonable range
for the option value is [10-500], the higher the value the higher the
quality.
This option is valid only using the @command{ffmpeg} command-line
tool. For library interface users, use @option{global_quality}.
@end table
@subsection Examples
@itemize
@item
Use @command{ffmpeg} to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
container:
@example
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
@end example
@item
Use @command{ffmpeg} to convert an audio file to VBR AAC, using the
LTP AAC profile:
@example
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
@end example
@end itemize
@anchor{libfdk-aac-enc}
@section libfdk_aac
@ -1184,6 +1080,17 @@ following: 4000, 6000, 8000, 12000, or 20000, corresponding to
narrowband, mediumband, wideband, super wideband, and fullband
respectively. The default is 0 (cutoff disabled).
@item mapping_family (@emph{mapping_family})
Set channel mapping family to be used by the encoder. The default value of -1
uses mapping family 0 for mono and stereo inputs, and mapping family 1
otherwise. The default also disables the surround masking and LFE bandwidth
optimzations in libopus, and requires that the input contains 8 channels or
fewer.
Other values include 0 for mono and stereo, 1 for surround sound with masking
and LFE bandwidth optimizations, and 255 for independent streams with an
unspecified channel layout.
@end table
@section libvorbis
@ -1390,7 +1297,7 @@ is 0. This is only used when @option{slice_mode} is set to
@samp{fixed}.
@item slice_mode
Set slice mode. Can assume one of the follwing possible values:
Set slice mode. Can assume one of the following possible values:
@table @samp
@item fixed
@ -1866,7 +1773,7 @@ Enable CAVLC and disable CABAC. It generates the same effect as
@end table
@item cmp
Set full pixel motion estimation comparation algorithm. Possible values:
Set full pixel motion estimation comparison algorithm. Possible values:
@table @samp
@item chroma
@ -2097,7 +2004,7 @@ ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
@item a53cc @var{boolean}
Import closed captions (which must be ATSC compatible format) into output.
Only the mpeg2 and h264 decoders provide these. Default is 0 (off).
Only the mpeg2 and h264 decoders provide these. Default is 1 (on).
@item x264-params (N.A.)
Override the x264 configuration using a :-separated list of key=value

View File

@ -33,12 +33,12 @@
#include <libavutil/opt.h>
#include <unistd.h>
void process_client(AVIOContext *client, const char *in_uri)
static void process_client(AVIOContext *client, const char *in_uri)
{
AVIOContext *input = NULL;
uint8_t buf[1024];
int ret, n, reply_code;
char *resource = NULL;
uint8_t *resource = NULL;
while ((ret = avio_handshake(client)) > 0) {
av_opt_get(client, "resource", AV_OPT_SEARCH_CHILDREN, &resource);
// check for strlen(resource) is necessary, because av_opt_get()
@ -97,11 +97,11 @@ end:
int main(int argc, char **argv)
{
av_log_set_level(AV_LOG_TRACE);
AVDictionary *options = NULL;
AVIOContext *client = NULL, *server = NULL;
const char *in_uri, *out_uri;
int ret, pid;
av_log_set_level(AV_LOG_TRACE);
if (argc < 3) {
printf("usage: %s input http://hostname[:port]\n"
"API example program to serve http to multiple clients.\n"

View File

@ -311,18 +311,18 @@ invoking ffmpeg with several @option{-i} options.
For audio, to put all channels together in a single stream (example: two
mono streams into one stereo stream): this is sometimes called to
@emph{merge} them, and can be done using the
@url{https://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter.
@url{ffmpeg-filters.html#amerge, @code{amerge}} filter.
@item
For audio, to play one on top of the other: this is called to @emph{mix}
them, and can be done by first merging them into a single stream and then
using the @url{https://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix
using the @url{ffmpeg-filters.html#pan, @code{pan}} filter to mix
the channels at will.
@item
For video, to display both together, side by side or one on top of a part of
the other; it can be done using the
@url{https://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter.
@url{ffmpeg-filters.html#overlay, @code{overlay}} video filter.
@end itemize
@ -333,19 +333,19 @@ There are several solutions, depending on the exact circumstances.
@subsection Concatenating using the concat @emph{filter}
FFmpeg has a @url{https://ffmpeg.org/ffmpeg-filters.html#concat,
FFmpeg has a @url{ffmpeg-filters.html#concat,
@code{concat}} filter designed specifically for that, with examples in the
documentation. This operation is recommended if you need to re-encode.
@subsection Concatenating using the concat @emph{demuxer}
FFmpeg has a @url{https://www.ffmpeg.org/ffmpeg-formats.html#concat,
FFmpeg has a @url{ffmpeg-formats.html#concat,
@code{concat}} demuxer which you can use when you want to avoid a re-encode and
your format doesn't support file level concatenation.
@subsection Concatenating using the concat @emph{protocol} (file level)
FFmpeg has a @url{https://ffmpeg.org/ffmpeg-protocols.html#concat,
FFmpeg has a @url{ffmpeg-protocols.html#concat,
@code{concat}} protocol designed specifically for that, with examples in the
documentation.
@ -485,7 +485,7 @@ scaling adjusts the SAR to keep the DAR constant.
If you want to stretch, or “unstretch”, the image, you need to override the
information with the
@url{https://ffmpeg.org/ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
@url{ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
Do not forget to examine carefully the original video to check whether the
stretching comes from the image or from the aspect ratio information.

View File

@ -223,7 +223,7 @@ with the highest resolution, for audio, it is the stream with the most channels,
subtitles, it is the first subtitle stream. In the case where several streams of
the same type rate equally, the stream with the lowest index is chosen.
You can disable some of those defaults by using the @code{-vn/-an/-sn} options. For
You can disable some of those defaults by using the @code{-vn/-an/-sn/-dn} options. For
full manual control, use the @code{-map} option, which disables the defaults just
described.
@ -1279,6 +1279,15 @@ No packets were passed to the muxer, the output is empty.
@item -xerror (@emph{global})
Stop and exit on error
@item -max_muxing_queue_size @var{packets} (@emph{output,per-stream})
When transcoding audio and/or video streams, ffmpeg will not begin writing into
the output until it has one packet for each such stream. While waiting for that
to happen, packets for other streams are buffered. This option sets the size of
this buffer, in packets, for the matching output stream.
The default value of this option should be high enough for most uses, so only
touch this option if you are sure that you need it.
@end table
As a special exception, you can use a bitmap subtitle stream as input: it

View File

@ -245,7 +245,7 @@ continue reading from that.
Each interval is specified by two optional parts, separated by "%".
The first part specifies the interval start position. It is
interpreted as an abolute position, or as a relative offset from the
interpreted as an absolute position, or as a relative offset from the
current position if it is preceded by the "+" character. If this first
part is not specified, no seeking will be performed when reading this
interval.

View File

@ -129,6 +129,7 @@
<xsd:complexType name="frameSideDataType">
<xsd:attribute name="side_data_type" type="xsd:string"/>
<xsd:attribute name="side_data_size" type="xsd:int" />
<xsd:attribute name="timecode" type="xsd:string"/>
</xsd:complexType>
<xsd:complexType name="subtitleType">
@ -165,6 +166,7 @@
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
</xsd:complexType>
<xsd:complexType name="streamType">
@ -200,6 +202,7 @@
<xsd:attribute name="color_transfer" type="xsd:string"/>
<xsd:attribute name="color_primaries" type="xsd:string"/>
<xsd:attribute name="chroma_location" type="xsd:string"/>
<xsd:attribute name="field_order" type="xsd:string"/>
<xsd:attribute name="timecode" type="xsd:string"/>
<xsd:attribute name="refs" type="xsd:int"/>

View File

@ -317,7 +317,7 @@ StartSendOnKey
#AVPresetVideo baseline
#AVOptionVideo flags +global_header
#
#AudioCodec libfaac
#AudioCodec aac
#AudioBitRate 32
#AudioChannels 2
#AudioSampleRate 22050

File diff suppressed because it is too large Load Diff

View File

@ -61,6 +61,10 @@ Reduce the latency introduced by optional buffering
Only write platform-, build- and time-independent data.
This ensures that file and data checksums are reproducible and match between
platforms. Its primary use is for regression testing.
@item shortest
Stop muxing at the end of the shortest stream.
It may be needed to increase max_interleave_delta to avoid flusing the longer
streams before EOF.
@end table
@item seek2any @var{integer} (@emph{input})

View File

@ -103,12 +103,19 @@ enable it.
@section OpenH264
FFmpeg can make use of the OpenH264 library for H.264 encoding.
FFmpeg can make use of the OpenH264 library for H.264 encoding and decoding.
Go to @url{http://www.openh264.org/} and follow the instructions for
installing the library. Then pass @code{--enable-libopenh264} to configure to
enable it.
For decoding, this library is much more limited than the built-in decoder
in libavcodec; currently, this library lacks support for decoding B-frames
and some other main/high profile features. (It currently only supports
constrained baseline profile and CABAC.) Using it is mostly useful for
testing and for taking advantage of Cisco's patent portfolio license
(@url{http://www.openh264.org/BINARY_LICENSE.txt}).
@section x264
FFmpeg can make use of the x264 library for H.264 encoding.
@ -502,7 +509,7 @@ library:
@tab Used on the Nintendo GameCube.
@item Tiertex Limited SEQ @tab @tab X
@tab Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
@item True Audio @tab @tab X
@item True Audio @tab X @tab X
@item VAG @tab @tab X
@tab Audio format used in many Sony PS2 games.
@item VC-1 test bitstream @tab X @tab X
@ -679,6 +686,8 @@ following image formats are supported:
@tab fourcc: DUCK
@item Duck TrueMotion 2.0 @tab @tab X
@tab fourcc: TM20
@item Duck TrueMotion 2.0 RT @tab @tab X
@tab fourcc: TR20
@item DV (Digital Video) @tab X @tab X
@item Dxtory capture format @tab @tab X
@item Feeble Files/ScummVM DXA @tab @tab X
@ -745,7 +754,7 @@ following image formats are supported:
@item LucasArts SANM/Smush @tab @tab X
@tab Used in LucasArts games / SMUSH animations.
@item lossless MJPEG @tab X @tab X
@item MagicYUV Lossless Video @tab @tab X
@item MagicYUV Video @tab @tab X
@item Microsoft ATC Screen @tab @tab X
@tab Also known as Microsoft Screen 3.
@item Microsoft Expression Encoder Screen @tab @tab X
@ -878,7 +887,7 @@ following image formats are supported:
@item 8SVX exponential @tab @tab X
@item 8SVX fibonacci @tab @tab X
@item AAC @tab EX @tab X
@tab encoding supported through internal encoder and external libraries libfaac and libfdk-aac
@tab encoding supported through internal encoder and external library libfdk-aac
@item AAC+ @tab E @tab IX
@tab encoding supported through external library libfdk-aac
@item AC-3 @tab IX @tab IX
@ -988,7 +997,7 @@ following image formats are supported:
@item Interplay ACM @tab @tab X
@item MACE (Macintosh Audio Compression/Expansion) 3:1 @tab @tab X
@item MACE (Macintosh Audio Compression/Expansion) 6:1 @tab @tab X
@item MLP (Meridian Lossless Packing) @tab @tab X
@item MLP (Meridian Lossless Packing) @tab X @tab X
@tab Used in DVD-Audio discs.
@item Monkey's Audio @tab @tab X
@item MP1 (MPEG audio layer 1) @tab @tab IX
@ -1055,7 +1064,7 @@ following image formats are supported:
@tab supported through external library libspeex
@item TAK (Tom's lossless Audio Kompressor) @tab @tab X
@item True Audio (TTA) @tab X @tab X
@item TrueHD @tab @tab X
@item TrueHD @tab X @tab X
@tab Used in HD-DVD and Blu-Ray discs.
@item TwinVQ (VQF flavor) @tab @tab X
@item VIMA @tab @tab X

View File

@ -251,6 +251,32 @@ To use this option, ffmpeg needs to be compiled with @code{--enable-libzvbi}.
Defines number of audio channels to capture. Must be @samp{2}, @samp{8} or @samp{16}.
Defaults to @samp{2}.
@item duplex_mode
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
Defaults to @samp{unset}.
@item video_input
Sets the video input source. Must be @samp{unset}, @samp{sdi}, @samp{hdmi},
@samp{optical_sdi}, @samp{component}, @samp{composite} or @samp{s_video}.
Defaults to @samp{unset}.
@item audio_input
Sets the audio input source. Must be @samp{unset}, @samp{embedded},
@samp{aes_ebu}, @samp{analog}, @samp{analog_xlr}, @samp{analog_rca} or
@samp{microphone}. Defaults to @samp{unset}.
@item video_pts
Sets the video packet timestamp source. Must be @samp{video}, @samp{audio},
@samp{reference} or @samp{wallclock}. Defaults to @samp{video}.
@item audio_pts
Sets the audio packet timestamp source. Must be @samp{video}, @samp{audio},
@samp{reference} or @samp{wallclock}. Defaults to @samp{audio}.
@item draw_bars
If set to @samp{true}, color bars are drawn in the event of a signal loss.
Defaults to @samp{true}.
@end table
@subsection Examples

98
doc/libav-merge.txt Normal file
View File

@ -0,0 +1,98 @@
CONTEXT
=======
The FFmpeg project merges all the changes from the Libav project
(https://libav.org) since the origin of the fork (around 2011).
With the exceptions of some commits due to technical/political disagreements or
issues, the changes are merged on a more or less regular schedule (daily for
years thanks to Michael, but more sparse nowadays).
WHY
===
The majority of the active developers believe the project needs to keep this
policy for various reasons.
The most important one is that we don't want our users to have to choose
between two distributors of libraries of the exact same name in order to have a
different set of features and bugfixes. By taking the responsibility of
unifying the two codebases, we allow users to benefit from the changes from the
two teams.
Today, FFmpeg has a much larger user database (we are distributed by every
major distribution), so we consider this mission a priority.
A different approach to the merge could have been to pick the changes we are
interested in and drop most of the cosmetics and other less important changes.
Unfortunately, this makes the following picks much harder, especially since the
Libav project is involved in various deep API changes. As a result, we decide
to virtually take everything done there.
Any Libav developer is of course welcome anytime to contribute directly to the
FFmpeg tree. Of course, we fully understand and are forced to accept that very
few Libav developers are interested in doing so, but we still want to recognize
their work. This leads us to create merge commits for every single one from
Libav. The original commit appears totally unchanged with full authorship in
our history (and the conflict are solved in the merge one). That way, not a
single thing from Libav will be lost in the future in case some reunification
happens, or that project disappears one way or another.
DOWNSIDES
=========
Of course, there are many downsides to this approach.
- It causes a non negligible merge commits pollution. We make sure there are
not several level of merges entangled (we do a 1:1 merge/commit), but it's
still a non-linear history.
- Many duplicated work. For instance, we added libavresample in our tree to
keep compatibility with Libav when our libswresample was already covering the
exact same purpose. The same thing happened for various elements such as the
ProRes support (but differences in features, bugs, licenses, ...). There are
many work to do to unify them, and any help is very much welcome.
- So much manpower from both FFmpeg and Libav is lost because of this mess. We
know it, and we don't know how to fix it. It takes incredible time to do
these merges, so we have even less time to work on things we personally care
about. The bad vibes also do not help with keeping our developers motivated.
- There is a growing technical risk factor with the merges due to the codebase
differing more and more.
MERGE GUIDELINES
================
The following gives developer guidelines on how to proceed when merging Libav commits.
Before starting, you can reduce the risk of errors on merge conflicts by using
a different merge conflict style:
$ git config --global merge.conflictstyle diff3
tools/libav-merge-next-commit is a script to help merging the next commit in
the queue. It assumes a remote named libav. It has two modes: merge, and noop.
The noop mode creates a merge with no change to the HEAD. You can pass a hash
as extra argument to reference a justification (it is common that we already
have the change done in FFmpeg).
Also see tools/murge, you can copy and paste a 3 way conflict into its stdin
and it will display colored diffs. Any arguments to murge (like ones to suppress
whitespace differences) are passed into colordiff.
TODO/FIXME/UNMERGED
===================
Stuff that didn't reach the codebase:
-------------------------------------
- HEVC DSP and x86 MC SIMD improvements from Libav (see https://ffmpeg.org/pipermail/ffmpeg-devel/2015-December/184777.html)
- QSV HWContext integration (04b17ff and 130e1f1), QSV scaling filter (62c58c5)
Collateral damage that needs work locally:
------------------------------------------
- Merge proresdec2.c and proresdec_lgpl.c
- Merge proresenc_anatoliy.c and proresenc_kostya.c
- Remove ADVANCED_PARSER in libavcodec/hevc_parser.c

View File

@ -129,6 +129,27 @@ and the input video converted to MPEG-2 video, use the command:
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
@end example
@section flv
Adobe Flash Video Format muxer.
This muxer accepts the following options:
@table @option
@item flvflags @var{flags}
Possible values:
@table @samp
@item aac_seq_header_detect
Place AAC sequence header based on audio stream data.
@item no_sequence_end
Disable sequence end tag.
@end table
@end table
@anchor{framecrc}
@section framecrc
@ -357,6 +378,12 @@ segmentation.
This muxer supports the following options:
@table @option
@item hls_init_time @var{seconds}
Set the initial target segment length in seconds. Default value is @var{0}.
Segment will be cut on the next key frame after this time has passed on the first m3u8 list.
After the initial playlist is filled @command{ffmpeg} will cut segments
at duration equal to @code{hls_time}
@item hls_time @var{seconds}
Set the target segment length in seconds. Default value is 2.
Segment will be cut on the next key frame after this time has passed.
@ -396,10 +423,10 @@ which can be cyclic, for example if the @option{wrap} option is
specified.
@item hls_segment_filename @var{filename}
Set the segment filename. Unless hls_flags single_file is set @var{filename}
is used as a string format with the segment number:
Set the segment filename. Unless @code{hls_flags single_file} is set,
@var{filename} is used as a string format with the segment number:
@example
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
ffmpeg -i in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
@end example
This example will produce the playlist, @file{out.m3u8}, and segment files:
@file{file000.ts}, @file{file001.ts}, @file{file002.ts}, etc.
@ -408,7 +435,7 @@ This example will produce the playlist, @file{out.m3u8}, and segment files:
Use strftime on @var{filename} to expand the segment filename with localtime.
The segment number (%d) is not available in this mode.
@example
ffmpeg in.nut -use_localtime 1 -hls_segment_filename 'file-%Y%m%d-%s.ts' out.m3u8
ffmpeg -i in.nut -use_localtime 1 -hls_segment_filename 'file-%Y%m%d-%s.ts' out.m3u8
@end example
This example will produce the playlist, @file{out.m3u8}, and segment files:
@file{file-20160215-1455569023.ts}, @file{file-20160215-1455569024.ts}, etc.
@ -417,7 +444,7 @@ This example will produce the playlist, @file{out.m3u8}, and segment files:
Used together with -use_localtime, it will create up to one subdirectory which
is expanded in @var{filename}.
@example
ffmpeg in.nut -use_localtime 1 -use_localtime_mkdir 1 -hls_segment_filename '%Y%m%d/file-%Y%m%d-%s.ts' out.m3u8
ffmpeg -i in.nut -use_localtime 1 -use_localtime_mkdir 1 -hls_segment_filename '%Y%m%d/file-%Y%m%d-%s.ts' out.m3u8
@end example
This example will create a directory 201560215 (if it does not exist), and then
produce the playlist, @file{out.m3u8}, and segment files:
@ -495,6 +522,30 @@ Will produce the playlist, @file{out.m3u8}, and a single segment file,
Segment files removed from the playlist are deleted after a period of time
equal to the duration of the segment plus the duration of the playlist.
@item hls_flags append_list
Append new segments into the end of old segment list,
and remove the @code{#EXT-X-ENDLIST} from the old segment list.
@item hls_flags round_durations
Round the duration info in the playlist file segment info to integer
values, instead of using floating point.
@item hls_flags discont_starts
Add the @code{#EXT-X-DISCONTINUITY} tag to the playlist, before the
first segment's information.
@item hls_flags omit_endlist
Do not append the @code{EXT-X-ENDLIST} tag at the end of the playlist.
@item hls_flags split_by_time
Allow segments to start on frames other than keyframes. This improves
behavior on some players when the time between keyframes is inconsistent,
but may make things worse on others, and can cause some oddities during
seeking. This flag should be used with the @code{hls_time} option.
@item hls_flags program_date_time
Generate @code{EXT-X-PROGRAM-DATE-TIME} tags.
@item hls_playlist_type event
Emit @code{#EXT-X-PLAYLIST-TYPE:EVENT} in the m3u8 header. Forces
@option{hls_list_size} to 0; the playlist can only be appended to.
@ -502,6 +553,17 @@ Emit @code{#EXT-X-PLAYLIST-TYPE:EVENT} in the m3u8 header. Forces
@item hls_playlist_type vod
Emit @code{#EXT-X-PLAYLIST-TYPE:VOD} in the m3u8 header. Forces
@option{hls_list_size} to 0; the playlist must not change.
@item method
Use the given HTTP method to create the hls files.
@example
ffmpeg -re -i in.ts -f hls -method PUT http://example.com/live/out.m3u8
@end example
This example will upload all the mpegts segment files to the HTTP
server using the HTTP PUT method, and update the m3u8 files every
@code{refresh} times using the same method.
Note that the HTTP server must support the given method for uploading
files.
@end table
@anchor{ico}
@ -822,6 +884,9 @@ the new default-base-is-moof flag instead. This flag is new from
14496-12:2012. This may make the fragments easier to parse in certain
circumstances (avoiding basing track fragment location calculations
on the implicit end of the previous track fragment).
@item -write_tmcd
Specify @code{on} to force writing a timecode track, @code{off} to disable it
and @code{auto} to write a timecode track only for mov and mp4 output (default).
@end table
@subsection Example
@ -1372,9 +1437,9 @@ ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_fr
@item
Convert the @file{in.mkv} to TS segments using the @code{libx264}
and @code{libfaac} encoders:
and @code{aac} encoders:
@example
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a aac -f ssegment -segment_list out.list out%03d.ts
@end example
@item
@ -1408,6 +1473,101 @@ Specify whether to remove all fragments when finished. Default 0 (do not remove)
@end table
@section fifo
The fifo pseudo-muxer allows the separation of encoding and muxing by using
first-in-first-out queue and running the actual muxer in a separate thread. This
is especially useful in combination with the @ref{tee} muxer and can be used to
send data to several destinations with different reliability/writing speed/latency.
API users should be aware that callback functions (interrupt_callback,
io_open and io_close) used within its AVFormatContext must be thread-safe.
The behavior of the fifo muxer if the queue fills up or if the output fails is
selectable,
@itemize @bullet
@item
output can be transparently restarted with configurable delay between retries
based on real time or time of the processed stream.
@item
encoding can be blocked during temporary failure, or continue transparently
dropping packets in case fifo queue fills up.
@end itemize
@table @option
@item fifo_format
Specify the format name. Useful if it cannot be guessed from the
output name suffix.
@item queue_size
Specify size of the queue (number of packets). Default value is 60.
@item format_opts
Specify format options for the underlying muxer. Muxer options can be specified
as a list of @var{key}=@var{value} pairs separated by ':'.
@item drop_pkts_on_overflow @var{bool}
If set to 1 (true), in case the fifo queue fills up, packets will be dropped
rather than blocking the encoder. This makes it possible to continue streaming without
delaying the input, at the cost of omitting part of the stream. By default
this option is set to 0 (false), so in such cases the encoder will be blocked
until the muxer processes some of the packets and none of them is lost.
@item attempt_recovery @var{bool}
If failure occurs, attempt to recover the output. This is especially useful
when used with network output, since it makes it possible to restart streaming transparently.
By default this option is set to 0 (false).
@item max_recovery_attempts
Sets maximum number of successive unsuccessful recovery attempts after which
the output fails permanently. By default this option is set to 0 (unlimited).
@item recovery_wait_time @var{duration}
Waiting time before the next recovery attempt after previous unsuccessful
recovery attempt. Default value is 5 seconds.
@item recovery_wait_streamtime @var{bool}
If set to 0 (false), the real time is used when waiting for the recovery
attempt (i.e. the recovery will be attempted after at least
recovery_wait_time seconds).
If set to 1 (true), the time of the processed stream is taken into account
instead (i.e. the recovery will be attempted after at least @var{recovery_wait_time}
seconds of the stream is omitted).
By default, this option is set to 0 (false).
@item recover_any_error @var{bool}
If set to 1 (true), recovery will be attempted regardless of type of the error
causing the failure. By default this option is set to 0 (false) and in case of
certain (usually permanent) errors the recovery is not attempted even when
@var{attempt_recovery} is set to 1.
@item restart_with_keyframe @var{bool}
Specify whether to wait for the keyframe after recovering from
queue overflow or failure. This option is set to 0 (false) by default.
@end table
@subsection Examples
@itemize
@item
Stream something to rtmp server, continue processing the stream at real-time
rate even in case of temporary failure (network outage) and attempt to recover
streaming every second indefinitely.
@example
ffmpeg -re -i ... -c:v libx264 -c:a aac -f fifo -fifo_format flv -map 0:v -map 0:a
-drop_pkts_on_overflow 1 -attempt_recovery 1 -recovery_wait_time 1 rtmp://example.com/live/stream_name
@end example
@end itemize
@anchor{tee}
@section tee
The tee muxer can be used to write the same data to several files or any

10
doc/patchwork Normal file
View File

@ -0,0 +1,10 @@
Patchwork states
NEW: Initial state of new patches
Accepted: The patch was pushed to the main master repository
Rejected: The patch has been rejected
Withdrawn: The patch was withdrawn by the author
Not Applicable: The patch does not apply to the main master repository
Superseded: A newer version of the patch has been posted
Changes Requested: The patch has been or is under review and changes have been requested
RFC: The patch is not intended to be applied but only for comments

View File

@ -314,7 +314,7 @@ These library packages are only available from
@uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
@example
yasm, libSDL-devel, libfaac-devel, libgsm-devel, libmp3lame-devel,
yasm, libSDL-devel, libgsm-devel, libmp3lame-devel,
libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
@end example

View File

@ -276,7 +276,7 @@ value is -1.
If set to 1 use chunked Transfer-Encoding for posts, default is 1.
@item content_type
Set a specific content type for the POST messages.
Set a specific content type for the POST messages or for listen mode.
@item http_proxy
set HTTP proxy to tunnel through e.g. http://example.com:1234
@ -291,11 +291,13 @@ Use persistent connections if set to 1, default is 0.
@item post_data
Set custom HTTP post data.
@item user-agent
@item user_agent
Override the User-Agent header. If not specified the protocol will use a
string describing the libavformat build. ("Lavf/<version>")
@item user-agent
This is a deprecated option, you can use user_agent instead it.
@item timeout
Set timeout in microseconds of socket I/O operations used by the underlying low level
operation. By default it is set to -1, which means that the timeout is
@ -695,7 +697,7 @@ This protocol accepts the following options.
@table @option
@item timeout
Set timeout in miliseconds of socket I/O operations used by the underlying
Set timeout in milliseconds of socket I/O operations used by the underlying
low level operation. By default it is set to -1, which means that the timeout
is not specified.
@ -1159,6 +1161,15 @@ Play an AVI file directly from a TAR archive:
subfile,,start,183241728,end,366490624,,:archive.tar
@end example
@section tee
Writes the output to multiple protocols. The individual outputs are separated
by |
@example
tee:file://path/to/local/this.avi|file://path/to/local/that.avi
@end example
@section tcp
Transmission Control Protocol.

View File

@ -22,7 +22,7 @@ EOT
my $TEMPLATE_HEADER2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
</head>
<body>
<div style="width: 95%; margin: auto">
<div class="container">
EOT
my $TEMPLATE_FOOTER = $ENV{"FFMPEG_FOOTER"} || <<EOT;

View File

@ -174,7 +174,7 @@ EOT
<link rel="stylesheet" type="text/css" href="style.min.css">
</head>
<body>
<div style="width: 95%; margin: auto">
<div class="container">
<h1>
EOT

1103
ffmpeg.c

File diff suppressed because it is too large Load Diff

View File

@ -212,6 +212,8 @@ typedef struct OptionsContext {
int nb_pass;
SpecifierOpt *passlogfiles;
int nb_passlogfiles;
SpecifierOpt *max_muxing_queue_size;
int nb_max_muxing_queue_size;
SpecifierOpt *guess_layout_max;
int nb_guess_layout_max;
SpecifierOpt *apad;
@ -287,7 +289,6 @@ typedef struct InputStream {
double ts_scale;
int saw_first_ts;
int showed_multi_packet_warning;
AVDictionary *decoder_opts;
AVRational framerate; /* framerate forced with -r */
int top_field_first;
@ -349,6 +350,9 @@ typedef struct InputStream {
// number of frames/samples retrieved from the decoder
uint64_t frames_decoded;
uint64_t samples_decoded;
int64_t *dts_buffer;
int nb_dts_buffer;
} InputStream;
typedef struct InputFile {
@ -416,8 +420,13 @@ typedef struct OutputStream {
int64_t first_pts;
/* dts of the last packet sent to the muxer */
int64_t last_mux_dts;
AVBitStreamFilterContext *bitstream_filters;
int nb_bitstream_filters;
uint8_t *bsf_extradata_updated;
AVBSFContext **bsf_ctx;
AVCodecContext *enc_ctx;
AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */
AVCodec *enc;
int64_t max_frames;
AVFrame *filtered_frame;
@ -464,6 +473,12 @@ typedef struct OutputStream {
OSTFinished finished; /* no more packets should be written for this stream */
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
int stream_copy;
// init_output_stream() has been called for this stream
// The encoder and the bitstream filters have been initialized and the stream
// parameters are set in the AVStream.
int initialized;
const char *attachment_filename;
int copy_initial_nonkeyframes;
int copy_prior_start;
@ -472,6 +487,7 @@ typedef struct OutputStream {
int keep_pix_fmt;
AVCodecParserContext *parser;
AVCodecContext *parser_avctx;
/* stats */
// combined size of all the packets written
@ -485,6 +501,11 @@ typedef struct OutputStream {
/* packet quality factor */
int quality;
int max_muxing_queue_size;
/* the packets are buffered here until the muxer is ready to be initialized */
AVFifoBuffer *muxing_queue;
/* packet picture type */
int pict_type;
@ -501,6 +522,8 @@ typedef struct OutputFile {
uint64_t limit_filesize; /* filesize limit expressed in bytes */
int shortest;
int header_written;
} OutputFile;
extern InputStream **input_streams;
@ -573,7 +596,8 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec);
int configure_filtergraph(FilterGraph *fg);
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int filtergraph_is_simple(FilterGraph *fg);
int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int init_complex_filtergraph(FilterGraph *fg);
int ffmpeg_parse_options(int argc, char **argv);

View File

@ -17,13 +17,9 @@
*/
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_cuda.h"
#include "ffmpeg.h"
#include <cuda.h>
#include <nvcuvid.h>
typedef struct CUVIDContext {
AVBufferRef *hw_frames_ctx;
} CUVIDContext;
@ -61,24 +57,13 @@ int cuvid_init(AVCodecContext *avctx)
return 0;
}
static void cuvid_ctx_free(AVHWDeviceContext *ctx)
{
AVCUDADeviceContext *hwctx = ctx->hwctx;
cuCtxDestroy(hwctx->cuda_ctx);
}
int cuvid_transcode_init(OutputStream *ost)
{
InputStream *ist;
const enum AVPixelFormat *pix_fmt;
AVCUDADeviceContext *device_hwctx;
AVHWDeviceContext *device_ctx;
AVHWFramesContext *hwframe_ctx;
AVBufferRef *device_ref = NULL;
CUVIDContext *ctx = NULL;
CUdevice device;
CUcontext cuda_ctx = NULL;
CUcontext dummy;
CUresult err;
int ret = 0;
av_log(NULL, AV_LOG_TRACE, "Initializing cuvid transcoding\n");
@ -118,118 +103,51 @@ int cuvid_transcode_init(OutputStream *ost)
}
}
if (!hw_device_ctx) {
hw_device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
if (!hw_device_ctx) {
av_log(NULL, AV_LOG_ERROR, "av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA) failed\n");
ret = AVERROR(ENOMEM);
goto error;
}
err = cuInit(0);
if (err != CUDA_SUCCESS) {
av_log(NULL, AV_LOG_ERROR, "Could not initialize the CUDA driver API\n");
ret = AVERROR_UNKNOWN;
goto error;
}
err = cuDeviceGet(&device, 0); ///TODO: Make device index configurable
if (err != CUDA_SUCCESS) {
av_log(NULL, AV_LOG_ERROR, "Could not get the device number %d\n", 0);
ret = AVERROR_UNKNOWN;
goto error;
}
err = cuCtxCreate(&cuda_ctx, CU_CTX_SCHED_BLOCKING_SYNC, device);
if (err != CUDA_SUCCESS) {
av_log(NULL, AV_LOG_ERROR, "Error creating a CUDA context\n");
ret = AVERROR_UNKNOWN;
goto error;
}
device_ctx = (AVHWDeviceContext*)hw_device_ctx->data;
device_ctx->free = cuvid_ctx_free;
device_hwctx = device_ctx->hwctx;
device_hwctx->cuda_ctx = cuda_ctx;
err = cuCtxPopCurrent(&dummy);
if (err != CUDA_SUCCESS) {
av_log(NULL, AV_LOG_ERROR, "cuCtxPopCurrent failed\n");
ret = AVERROR_UNKNOWN;
goto error;
}
ret = av_hwdevice_ctx_init(hw_device_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "av_hwdevice_ctx_init failed\n");
goto error;
}
} else {
device_ctx = (AVHWDeviceContext*)hw_device_ctx->data;
device_hwctx = device_ctx->hwctx;
cuda_ctx = device_hwctx->cuda_ctx;
}
if (device_ctx->type != AV_HWDEVICE_TYPE_CUDA) {
av_log(NULL, AV_LOG_ERROR, "Hardware device context is already initialized for a diffrent hwaccel.\n");
ret = AVERROR(EINVAL);
goto error;
}
if (!ctx->hw_frames_ctx) {
ctx->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_CUDA,
ist->hwaccel_device, NULL, 0);
if (ret < 0)
goto error;
ctx->hw_frames_ctx = av_hwframe_ctx_alloc(device_ref);
if (!ctx->hw_frames_ctx) {
av_log(NULL, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
ret = AVERROR(ENOMEM);
goto error;
}
}
av_buffer_unref(&device_ref);
/* This is a bit hacky, av_hwframe_ctx_init is called by the cuvid decoder
* once it has probed the neccesary format information. But as filters/nvenc
* need to know the format/sw_format, set them here so they are happy.
* This is fine as long as CUVID doesn't add another supported pix_fmt.
*/
hwframe_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
hwframe_ctx->format = AV_PIX_FMT_CUDA;
hwframe_ctx->sw_format = AV_PIX_FMT_NV12;
ost->hwaccel_ctx = ctx;
ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ctx->hw_frames_ctx);
ost->enc_ctx->pix_fmt = AV_PIX_FMT_CUDA;
if (!ost->enc_ctx->hw_frames_ctx) {
av_log(NULL, AV_LOG_ERROR, "av_buffer_ref failed\n");
ret = AVERROR(ENOMEM);
goto error;
}
if (!ist->hwaccel_ctx) {
ist->hwaccel_ctx = ctx;
ist->hw_frames_ctx = av_buffer_ref(ctx->hw_frames_ctx);
ist->dec_ctx->hw_frames_ctx = av_buffer_ref(ctx->hw_frames_ctx);
ist->dec_ctx->pix_fmt = AV_PIX_FMT_CUDA;
ist->resample_pix_fmt = AV_PIX_FMT_CUDA;
ist->hwaccel_uninit = cuvid_uninit;
if (!ist->hw_frames_ctx || !ist->dec_ctx->hw_frames_ctx) {
if (!ist->hw_frames_ctx) {
av_log(NULL, AV_LOG_ERROR, "av_buffer_ref failed\n");
ret = AVERROR(ENOMEM);
goto error;
}
ist->hwaccel_ctx = ctx;
ist->resample_pix_fmt = AV_PIX_FMT_CUDA;
ist->hwaccel_uninit = cuvid_uninit;
/* This is a bit hacky, av_hwframe_ctx_init is called by the cuvid decoder
* once it has probed the necessary format information. But as filters/nvenc
* need to know the format/sw_format, set them here so they are happy.
* This is fine as long as CUVID doesn't add another supported pix_fmt.
*/
hwframe_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
hwframe_ctx->format = AV_PIX_FMT_CUDA;
hwframe_ctx->sw_format = AV_PIX_FMT_NV12;
}
return 0;
error:
av_freep(&ctx);
av_buffer_unref(&device_ref);
return ret;
cancel:
if (ist->hwaccel_id == HWACCEL_CUVID) {
av_log(NULL, AV_LOG_ERROR, "CUVID hwaccel requested, but impossible to achive.\n");
av_log(NULL, AV_LOG_ERROR, "CUVID hwaccel requested, but impossible to achieve.\n");
return AVERROR(EINVAL);
}

View File

@ -94,19 +94,19 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
if (codec && codec->sample_fmts) {
const enum AVSampleFormat *p = codec->sample_fmts;
for (; *p != -1; p++) {
if (*p == st->codec->sample_fmt)
if (*p == st->codecpar->format)
break;
}
if (*p == -1) {
if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0]))
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
if(av_get_sample_fmt_name(st->codec->sample_fmt))
if(av_get_sample_fmt_name(st->codecpar->format))
av_log(NULL, AV_LOG_WARNING,
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
av_get_sample_fmt_name(st->codec->sample_fmt),
av_get_sample_fmt_name(st->codecpar->format),
codec->name,
av_get_sample_fmt_name(codec->sample_fmts[0]));
st->codec->sample_fmt = codec->sample_fmts[0];
st->codecpar->format = codec->sample_fmts[0];
}
}
}
@ -193,7 +193,7 @@ DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
GET_CH_LAYOUT_NAME)
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
{
FilterGraph *fg = av_mallocz(sizeof(*fg));
@ -221,7 +221,7 @@ FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
GROW_ARRAY(filtergraphs, nb_filtergraphs);
filtergraphs[nb_filtergraphs - 1] = fg;
return fg;
return 0;
}
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
@ -251,7 +251,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
s = input_files[file_idx]->ctx;
for (i = 0; i < s->nb_streams; i++) {
enum AVMediaType stream_type = s->streams[i]->codec->codec_type;
enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
if (stream_type != type &&
!(stream_type == AVMEDIA_TYPE_SUBTITLE &&
type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
@ -611,7 +611,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
int i;
for (i=0; i<of->ctx->nb_streams; i++)
if (of->ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
break;
if (i<of->ctx->nb_streams) {
@ -673,15 +673,15 @@ static int sub2video_prepare(InputStream *ist)
int i, w, h;
/* Compute the size of the canvas for the subtitles stream.
If the subtitles codec has set a size, use it. Otherwise use the
If the subtitles codecpar has set a size, use it. Otherwise use the
maximum dimensions of the video streams in the same file. */
w = ist->dec_ctx->width;
h = ist->dec_ctx->height;
if (!(w && h)) {
for (i = 0; i < avf->nb_streams; i++) {
if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
w = FFMAX(w, avf->streams[i]->codec->width);
h = FFMAX(h, avf->streams[i]->codec->height);
if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
w = FFMAX(w, avf->streams[i]->codecpar->width);
h = FFMAX(h, avf->streams[i]->codecpar->height);
}
}
if (!(w && h)) {
@ -979,7 +979,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
int configure_filtergraph(FilterGraph *fg)
{
AVFilterInOut *inputs, *outputs, *cur;
int ret, i, simple = !fg->graph_desc;
int ret, i, simple = filtergraph_is_simple(fg);
const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
fg->graph_desc;
@ -1081,7 +1081,7 @@ int configure_filtergraph(FilterGraph *fg)
/* identical to the same check in ffmpeg.c, needed because
complex filter graphs are initialized earlier */
av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
return AVERROR(EINVAL);
}
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
@ -1102,3 +1102,7 @@ int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
return 0;
}
int filtergraph_is_simple(FilterGraph *fg)
{
return !fg->graph_desc;
}

View File

@ -429,12 +429,12 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
exit_program(1);
}
st = input_files[m->file_idx]->ctx->streams[m->stream_idx];
if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(NULL, AV_LOG_FATAL, "mapchan: stream #%d.%d is not an audio stream.\n",
m->file_idx, m->stream_idx);
exit_program(1);
}
if (m->channel_idx < 0 || m->channel_idx >= st->codec->channels) {
if (m->channel_idx < 0 || m->channel_idx >= st->codecpar->channels) {
av_log(NULL, AV_LOG_FATAL, "mapchan: invalid audio channel #%d.%d.%d\n",
m->file_idx, m->stream_idx, m->channel_idx);
exit_program(1);
@ -634,11 +634,11 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
if (codec_name) {
AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
st->codec->codec_id = codec->id;
AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type, 0);
st->codecpar->codec_id = codec->id;
return codec;
} else
return avcodec_find_decoder(st->codec->codec_id);
return avcodec_find_decoder(st->codecpar->codec_id);
}
/* Add all the streams from the given input file to the global
@ -649,14 +649,15 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
AVCodecContext *dec = st->codec;
AVCodecParameters *par = st->codecpar;
InputStream *ist = av_mallocz(sizeof(*ist));
char *framerate = NULL, *hwaccel = NULL, *hwaccel_device = NULL;
char *hwaccel_output_format = NULL;
char *codec_tag = NULL;
char *next;
char *discard_str = NULL;
const AVOption *discard_opt = av_opt_find(dec, "skip_frame", NULL, 0, 0);
const AVClass *cc = avcodec_get_class();
const AVOption *discard_opt = av_opt_find(&cc, "skip_frame", NULL, 0, 0);
if (!ist)
exit_program(1);
@ -683,18 +684,18 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
uint32_t tag = strtol(codec_tag, &next, 0);
if (*next)
tag = AV_RL32(codec_tag);
st->codec->codec_tag = tag;
st->codecpar->codec_tag = tag;
}
ist->dec = choose_decoder(o, ic, st);
ist->decoder_opts = filter_codec_opts(o->g->codec_opts, ist->st->codec->codec_id, ic, st, ist->dec);
ist->decoder_opts = filter_codec_opts(o->g->codec_opts, ist->st->codecpar->codec_id, ic, st, ist->dec);
ist->reinit_filters = -1;
MATCH_PER_STREAM_OPT(reinit_filters, i, ist->reinit_filters, ic, st);
MATCH_PER_STREAM_OPT(discard, str, discard_str, ic, st);
ist->user_set_discard = AVDISCARD_NONE;
if (discard_str && av_opt_eval_int(dec, discard_opt, discard_str, &ist->user_set_discard) < 0) {
if (discard_str && av_opt_eval_int(&cc, discard_opt, discard_str, &ist->user_set_discard) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error parsing discard %s.\n",
discard_str);
exit_program(1);
@ -708,22 +709,30 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
exit_program(1);
}
ret = avcodec_copy_context(ist->dec_ctx, dec);
ret = avcodec_parameters_to_context(ist->dec_ctx, par);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
exit_program(1);
}
switch (dec->codec_type) {
switch (par->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id);
ist->dec = avcodec_find_decoder(par->codec_id);
#if FF_API_EMU_EDGE
if (av_codec_get_lowres(dec)) {
dec->flags |= CODEC_FLAG_EMU_EDGE;
if (av_codec_get_lowres(st->codec)) {
av_codec_set_lowres(ist->dec_ctx, av_codec_get_lowres(st->codec));
ist->dec_ctx->width = st->codec->width;
ist->dec_ctx->height = st->codec->height;
ist->dec_ctx->coded_width = st->codec->coded_width;
ist->dec_ctx->coded_height = st->codec->coded_height;
ist->dec_ctx->flags |= CODEC_FLAG_EMU_EDGE;
}
#endif
// avformat_find_stream_info() doesn't set this for us anymore.
ist->dec_ctx->framerate = st->avg_frame_rate;
ist->resample_height = ist->dec_ctx->height;
ist->resample_width = ist->dec_ctx->width;
ist->resample_pix_fmt = ist->dec_ctx->pix_fmt;
@ -803,7 +812,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
case AVMEDIA_TYPE_SUBTITLE: {
char *canvas_size = NULL;
if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id);
ist->dec = avcodec_find_decoder(par->codec_id);
MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
if (canvas_size &&
@ -819,6 +828,12 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
default:
abort();
}
ret = avcodec_parameters_from_context(par, ist->dec_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
exit_program(1);
}
}
}
@ -857,7 +872,7 @@ static void dump_attachment(AVStream *st, const char *filename)
AVIOContext *out = NULL;
AVDictionaryEntry *e;
if (!st->codec->extradata_size) {
if (!st->codecpar->extradata_size) {
av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
nb_input_files - 1, st->index);
return;
@ -878,7 +893,7 @@ static void dump_attachment(AVStream *st, const char *filename)
exit_program(1);
}
avio_write(out, st->codec->extradata, st->codec->extradata_size);
avio_write(out, st->codecpar->extradata, st->codecpar->extradata_size);
avio_flush(out);
avio_close(out);
}
@ -982,6 +997,8 @@ static int open_input_file(OptionsContext *o, const char *filename)
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
if (err < 0) {
print_error(filename, err);
if (err == AVERROR_PROTOCOL_NOT_FOUND)
av_log(NULL, AV_LOG_ERROR, "Did you mean file:%s?\n", filename);
exit_program(1);
}
if (scan_all_pmts_set)
@ -1026,8 +1043,8 @@ static int open_input_file(OptionsContext *o, const char *filename)
if (!(ic->iformat->flags & AVFMT_SEEK_TO_PTS)) {
int dts_heuristic = 0;
for (i=0; i<ic->nb_streams; i++) {
AVCodecContext *avctx = ic->streams[i]->codec;
if (avctx->has_b_frames)
const AVCodecParameters *par = ic->streams[i]->codecpar;
if (par->video_delay)
dts_heuristic = 1;
}
if (dts_heuristic) {
@ -1172,21 +1189,39 @@ static int get_preset_file_2(const char *preset_name, const char *codec_name, AV
return ret;
}
static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
static int choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
{
enum AVMediaType type = ost->st->codecpar->codec_type;
char *codec_name = NULL;
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
if (!codec_name) {
ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
NULL, ost->st->codec->codec_type);
ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
} else if (!strcmp(codec_name, "copy"))
ost->stream_copy = 1;
else {
ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
ost->st->codec->codec_id = ost->enc->id;
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO || type == AVMEDIA_TYPE_SUBTITLE) {
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
if (!codec_name) {
ost->st->codecpar->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
NULL, ost->st->codecpar->codec_type);
ost->enc = avcodec_find_encoder(ost->st->codecpar->codec_id);
if (!ost->enc) {
av_log(NULL, AV_LOG_FATAL, "Automatic encoder selection failed for "
"output stream #%d:%d. Default encoder for format %s (codec %s) is "
"probably disabled. Please choose an encoder manually.\n",
ost->file_index, ost->index, s->oformat->name,
avcodec_get_name(ost->st->codecpar->codec_id));
return AVERROR_ENCODER_NOT_FOUND;
}
} else if (!strcmp(codec_name, "copy"))
ost->stream_copy = 1;
else {
ost->enc = find_codec_or_die(codec_name, ost->st->codecpar->codec_type, 1);
ost->st->codecpar->codec_id = ost->enc->id;
}
ost->encoding_needed = !ost->stream_copy;
} else {
/* no encoding supported for other media types */
ost->stream_copy = 1;
ost->encoding_needed = 0;
}
return 0;
}
static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type, int source_index)
@ -1194,8 +1229,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
OutputStream *ost;
AVStream *st = avformat_new_stream(oc, NULL);
int idx = oc->nb_streams - 1, ret = 0;
char *bsf = NULL, *next, *codec_tag = NULL;
AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
const char *bsfs = NULL;
char *next, *codec_tag = NULL;
double qscale = -1;
int i;
@ -1215,8 +1250,14 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
ost->file_index = nb_output_files - 1;
ost->index = idx;
ost->st = st;
st->codec->codec_type = type;
choose_encoder(o, oc, ost);
st->codecpar->codec_type = type;
ret = choose_encoder(o, oc, ost);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error selecting an encoder for stream "
"%d:%d\n", ost->file_index, ost->index);
exit_program(1);
}
ost->enc_ctx = avcodec_alloc_context3(ost->enc);
if (!ost->enc_ctx) {
@ -1225,6 +1266,12 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
}
ost->enc_ctx->codec_type = type;
ost->ref_par = avcodec_parameters_alloc();
if (!ost->ref_par) {
av_log(NULL, AV_LOG_ERROR, "Error allocating the encoding parameters.\n");
exit_program(1);
}
if (ost->enc) {
AVIOContext *s = NULL;
char *buf = NULL, *arg = NULL, *preset = NULL;
@ -1272,29 +1319,62 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
ost->copy_prior_start = -1;
MATCH_PER_STREAM_OPT(copy_prior_start, i, ost->copy_prior_start, oc ,st);
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
while (bsf) {
char *arg = NULL;
if (next = strchr(bsf, ','))
*next++ = 0;
if (arg = strchr(bsf, '='))
*arg++ = 0;
if (!(bsfc = av_bitstream_filter_init(bsf))) {
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsfs, oc, st);
while (bsfs && *bsfs) {
const AVBitStreamFilter *filter;
char *bsf, *bsf_options_str, *bsf_name;
bsf = av_get_token(&bsfs, ",");
if (!bsf)
exit_program(1);
bsf_name = av_strtok(bsf, "=", &bsf_options_str);
if (!bsf_name)
exit_program(1);
filter = av_bsf_get_by_name(bsf_name);
if (!filter) {
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf_name);
exit_program(1);
}
if (bsfc_prev)
bsfc_prev->next = bsfc;
else
ost->bitstream_filters = bsfc;
if (arg)
if (!(bsfc->args = av_strdup(arg))) {
av_log(NULL, AV_LOG_FATAL, "Bitstream filter memory allocation failed\n");
ost->bsf_ctx = av_realloc_array(ost->bsf_ctx,
ost->nb_bitstream_filters + 1,
sizeof(*ost->bsf_ctx));
if (!ost->bsf_ctx)
exit_program(1);
ret = av_bsf_alloc(filter, &ost->bsf_ctx[ost->nb_bitstream_filters]);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error allocating a bitstream filter context\n");
exit_program(1);
}
ost->nb_bitstream_filters++;
if (bsf_options_str && filter->priv_class) {
const AVOption *opt = av_opt_next(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, NULL);
const char * shorthand[2] = {NULL};
if (opt)
shorthand[0] = opt->name;
ret = av_opt_set_from_string(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, bsf_options_str, shorthand, "=", ":");
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error parsing options for bitstream filter %s\n", bsf_name);
exit_program(1);
}
}
av_freep(&bsf);
bsfc_prev = bsfc;
bsf = next;
if (*bsfs)
bsfs++;
}
if (ost->nb_bitstream_filters) {
ost->bsf_extradata_updated = av_mallocz_array(ost->nb_bitstream_filters, sizeof(*ost->bsf_extradata_updated));
if (!ost->bsf_extradata_updated) {
av_log(NULL, AV_LOG_FATAL, "Bitstream filter memory allocation failed\n");
exit_program(1);
}
}
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
@ -1302,7 +1382,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
uint32_t tag = strtol(codec_tag, &next, 0);
if (*next)
tag = AV_RL32(codec_tag);
ost->st->codec->codec_tag =
ost->st->codecpar->codec_tag =
ost->enc_ctx->codec_tag = tag;
}
@ -1315,6 +1395,10 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
ost->disposition = av_strdup(ost->disposition);
ost->max_muxing_queue_size = 128;
MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st);
ost->max_muxing_queue_size *= sizeof(AVPacket);
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
@ -1334,6 +1418,10 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
}
ost->last_mux_dts = AV_NOPTS_VALUE;
ost->muxing_queue = av_fifo_alloc(8 * sizeof(AVPacket));
if (!ost->muxing_queue)
exit_program(1);
return ost;
}
@ -1399,7 +1487,7 @@ static char *get_ost_filters(OptionsContext *o, AVFormatContext *oc,
else if (ost->filters)
return av_strdup(ost->filters);
return av_strdup(st->codec->codec_type == AVMEDIA_TYPE_VIDEO ?
return av_strdup(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ?
"null" : "anull");
}
@ -1831,9 +1919,9 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
AVCodec *codec;
const char *enc_config;
codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
codec = avcodec_find_encoder(ic->streams[i]->codecpar->codec_id);
if (!codec) {
av_log(s, AV_LOG_ERROR, "no encoder found for codec id %i\n", ic->streams[i]->codec->codec_id);
av_log(s, AV_LOG_ERROR, "no encoder found for codec id %i\n", ic->streams[i]->codecpar->codec_id);
return AVERROR(EINVAL);
}
if (codec->type == AVMEDIA_TYPE_AUDIO)
@ -1852,10 +1940,10 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
av_dict_free(&opts);
}
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
choose_sample_fmt(st, codec);
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
choose_pixel_fmt(st, st->codec, codec, st->codec->pix_fmt);
else if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
choose_pixel_fmt(st, st->codec, codec, st->codecpar->format);
avcodec_copy_context(ost->enc_ctx, st->codec);
if (enc_config)
av_dict_parse_string(&ost->encoder_opts, enc_config, "=", ",", 0);
@ -1923,7 +2011,7 @@ static int configure_complex_filters(void)
int i, ret = 0;
for (i = 0; i < nb_filtergraphs; i++)
if (!filtergraphs[i]->graph &&
if (!filtergraph_is_simple(filtergraphs[i]) &&
(ret = configure_filtergraph(filtergraphs[i])) < 0)
return ret;
return 0;
@ -2027,18 +2115,18 @@ static int open_output_file(OptionsContext *o, const char *filename)
ost = output_streams[j];
for (i = 0; i < nb_input_streams; i++) {
ist = input_streams[i];
if(ist->st->codec->codec_type == ost->st->codec->codec_type){
if(ist->st->codecpar->codec_type == ost->st->codecpar->codec_type){
ost->sync_ist= ist;
ost->source_index= i;
if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup("anull");
if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup("null");
if(ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup("anull");
if(ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup("null");
ist->discard = 0;
ist->st->discard = ist->user_set_discard;
break;
}
}
if(!ost->sync_ist){
av_log(NULL, AV_LOG_FATAL, "Missing %s stream which is required by this ffm\n", av_get_media_type_string(ost->st->codec->codec_type));
av_log(NULL, AV_LOG_FATAL, "Missing %s stream which is required by this ffm\n", av_get_media_type_string(ost->st->codecpar->codec_type));
exit_program(1);
}
}
@ -2053,10 +2141,10 @@ static int open_output_file(OptionsContext *o, const char *filename)
for (i = 0; i < nb_input_streams; i++) {
int new_area;
ist = input_streams[i];
new_area = ist->st->codec->width * ist->st->codec->height + 100000000*!!ist->st->codec_info_nb_frames;
new_area = ist->st->codecpar->width * ist->st->codecpar->height + 100000000*!!ist->st->codec_info_nb_frames;
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
new_area = 1;
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
new_area > area) {
if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
continue;
@ -2074,8 +2162,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
for (i = 0; i < nb_input_streams; i++) {
int score;
ist = input_streams[i];
score = ist->st->codec->channels + 100000000*!!ist->st->codec_info_nb_frames;
if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
score = ist->st->codecpar->channels + 100000000*!!ist->st->codec_info_nb_frames;
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
score > best_score) {
best_score = score;
idx = i;
@ -2089,9 +2177,9 @@ static int open_output_file(OptionsContext *o, const char *filename)
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, oc, "s");
if (!o->subtitle_disable && (avcodec_find_encoder(oc->oformat->subtitle_codec) || subtitle_codec_name)) {
for (i = 0; i < nb_input_streams; i++)
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
if (input_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
AVCodecDescriptor const *input_descriptor =
avcodec_descriptor_get(input_streams[i]->st->codec->codec_id);
avcodec_descriptor_get(input_streams[i]->st->codecpar->codec_id);
AVCodecDescriptor const *output_descriptor = NULL;
AVCodec const *output_codec =
avcodec_find_encoder(oc->oformat->subtitle_codec);
@ -2117,8 +2205,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
if (!o->data_disable ) {
enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_DATA);
for (i = 0; codec_id != AV_CODEC_ID_NONE && i < nb_input_streams; i++) {
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_DATA
&& input_streams[i]->st->codec->codec_id == codec_id )
if (input_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_DATA
&& input_streams[i]->st->codecpar->codec_id == codec_id )
new_data_stream(o, oc, i);
}
}
@ -2155,17 +2243,17 @@ loop_end:
int src_idx = input_files[map->file_index]->ist_index + map->stream_index;
ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
if(o->subtitle_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
if(o->subtitle_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
continue;
if(o-> audio_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
if(o-> audio_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
continue;
if(o-> video_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
if(o-> video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
continue;
if(o-> data_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_DATA)
if(o-> data_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_DATA)
continue;
ost = NULL;
switch (ist->st->codec->codec_type) {
switch (ist->st->codecpar->codec_type) {
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream (o, oc, src_idx); break;
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream (o, oc, src_idx); break;
case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream (o, oc, src_idx); break;
@ -2220,17 +2308,17 @@ loop_end:
avio_read(pb, attachment, len);
ost = new_attachment_stream(o, oc, -1);
ost->stream_copy = 1;
ost->stream_copy = 0;
ost->attachment_filename = o->attachments[i];
ost->finished = 1;
ost->st->codec->extradata = attachment;
ost->st->codec->extradata_size = len;
ost->st->codecpar->extradata = attachment;
ost->st->codecpar->extradata_size = len;
p = strrchr(o->attachments[i], '/');
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
avio_closep(&pb);
}
#if FF_API_LAVF_AVCTX
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
AVDictionaryEntry *e;
ost = output_streams[i];
@ -2241,6 +2329,7 @@ loop_end:
if (av_opt_set(ost->st->codec, "flags", e->value, 0) < 0)
exit_program(1);
}
#endif
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
av_dump_format(oc, nb_output_files - 1, oc->filename, 1);
@ -2290,14 +2379,25 @@ loop_end:
}
av_dict_free(&unused_opts);
/* set the encoding/decoding_needed flags */
/* set the decoding_needed flags and create simple filtergraphs */
for (i = of->ost_index; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
ost->encoding_needed = !ost->stream_copy;
if (ost->encoding_needed && ost->source_index >= 0) {
InputStream *ist = input_streams[ost->source_index];
ist->decoding_needed |= DECODING_FOR_OST;
if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
err = init_simple_filtergraph(ist, ost);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR,
"Error initializing a simple filtergraph between streams "
"%d:%d->%d:%d\n", ist->file_index, ost->source_index,
nb_output_files - 1, ost->st->index);
exit_program(1);
}
}
}
}
@ -2535,11 +2635,10 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
int i, j, fr;
for (j = 0; j < nb_input_files; j++) {
for (i = 0; i < input_files[j]->nb_streams; i++) {
AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
if (c->codec_type != AVMEDIA_TYPE_VIDEO ||
!c->time_base.num)
AVStream *st = input_files[j]->ctx->streams[i];
if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
fr = c->time_base.den * 1000 / c->time_base.num;
fr = st->time_base.den * 1000 / st->time_base.num;
if (fr == 25000) {
norm = PAL;
break;
@ -3069,6 +3168,9 @@ int ffmpeg_parse_options(int argc, char **argv)
goto fail;
}
/* configure terminal and setup signal handlers */
term_init();
/* open input files */
ret = open_files(&octx.groups[GROUP_INFILE], "input", open_input_file);
if (ret < 0) {
@ -3384,9 +3486,6 @@ const OptionDef options[] = {
{ "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
"select output format used with HW accelerated decoding", "format" },
{ "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
"select output format used with HW accelerated decoding", "format" },
#if CONFIG_VDA || CONFIG_VIDEOTOOLBOX
{ "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
#endif
@ -3474,6 +3573,10 @@ const OptionDef options[] = {
"set the subtitle options to the indicated preset", "preset" },
{ "fpre", HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
"set options from indicated preset file", "filename" },
{ "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) },
"maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" },
/* data codec support */
{ "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec },
"force data codec ('copy' to copy stream)", "codec" },

View File

@ -210,8 +210,7 @@ int qsv_transcode_init(OutputStream *ost)
/* check if the decoder supports QSV and the output only goes to this stream */
ist = input_streams[ost->source_index];
if (ist->nb_filters || ist->hwaccel_id != HWACCEL_QSV ||
!ist->dec || !ist->dec->pix_fmts)
if (ist->hwaccel_id != HWACCEL_QSV || !ist->dec || !ist->dec->pix_fmts)
return 0;
for (pix_fmt = ist->dec->pix_fmts; *pix_fmt != AV_PIX_FMT_NONE; pix_fmt++)
if (*pix_fmt == AV_PIX_FMT_QSV)

View File

@ -302,7 +302,7 @@ static int vaapi_build_decoder_config(VAAPIDecoderContext *ctx,
if (ctx->output_format != AV_PIX_FMT_NONE &&
ctx->output_format != AV_PIX_FMT_VAAPI) {
for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
if (constraints->valid_sw_formats[i] == ctx->decode_format) {
if (constraints->valid_sw_formats[i] == ctx->output_format) {
ctx->decode_format = ctx->output_format;
av_log(ctx, AV_LOG_DEBUG, "Using decode format %s (output "
"format).\n", av_get_pix_fmt_name(ctx->decode_format));

604
ffplay.c
View File

@ -105,6 +105,8 @@ const int program_birth_year = 2003;
#define CURSOR_HIDE_DELAY 1000000
#define USE_ONEPASS_SUBTITLE_RENDER 1
static unsigned sws_flags = SWS_BICUBIC;
typedef struct MyAVPacketList {
@ -152,17 +154,17 @@ typedef struct Clock {
typedef struct Frame {
AVFrame *frame;
AVSubtitle sub;
AVSubtitleRect **subrects; /* rescaled subtitle rectangles in yuva */
int serial;
double pts; /* presentation timestamp for the frame */
double duration; /* estimated duration of the frame */
int64_t pos; /* byte position of the frame in the input file */
SDL_Overlay *bmp;
SDL_Texture *bmp;
int allocated;
int reallocate;
int width;
int height;
int format;
AVRational sar;
int uploaded;
} Frame;
typedef struct FrameQueue {
@ -228,9 +230,6 @@ typedef struct VideoState {
Decoder viddec;
Decoder subdec;
int viddec_width;
int viddec_height;
int audio_stream;
int av_sync_type;
@ -272,6 +271,8 @@ typedef struct VideoState {
FFTSample *rdft_data;
int xpos;
double last_vis_time;
SDL_Texture *vis_texture;
SDL_Texture *sub_texture;
int subtitle_stream;
AVStream *subtitle_st;
@ -284,11 +285,8 @@ typedef struct VideoState {
AVStream *video_st;
PacketQueue videoq;
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
#if !CONFIG_AVFILTER
struct SwsContext *img_convert_ctx;
#endif
struct SwsContext *sub_convert_ctx;
SDL_Rect last_display_rect;
int eof;
char *filename;
@ -313,8 +311,6 @@ typedef struct VideoState {
static AVInputFormat *file_iformat;
static const char *input_filename;
static const char *window_title;
static int fs_screen_width;
static int fs_screen_height;
static int default_width = 640;
static int default_height = 480;
static int screen_width = 0;
@ -362,7 +358,8 @@ static AVPacket flush_pkt;
#define FF_ALLOC_EVENT (SDL_USEREVENT)
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
static SDL_Surface *screen;
static SDL_Window *window;
static SDL_Renderer *renderer;
#if CONFIG_AVFILTER
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
@ -591,9 +588,7 @@ static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
if (got_frame) {
if (decoder_reorder_pts == -1) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
} else if (decoder_reorder_pts) {
frame->pts = frame->pkt_pts;
} else {
} else if (!decoder_reorder_pts) {
frame->pts = frame->pkt_dts;
}
}
@ -603,9 +598,7 @@ static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
if (got_frame) {
AVRational tb = (AVRational){1, frame->sample_rate};
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
else if (frame->pkt_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
else if (d->next_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
if (frame->pts != AV_NOPTS_VALUE) {
@ -650,12 +643,6 @@ static void decoder_destroy(Decoder *d) {
static void frame_queue_unref_item(Frame *vp)
{
int i;
for (i = 0; i < vp->sub.num_rects; i++) {
av_freep(&vp->subrects[i]->data[0]);
av_freep(&vp->subrects[i]);
}
av_freep(&vp->subrects);
av_frame_unref(vp->frame);
avsubtitle_free(&vp->sub);
}
@ -798,113 +785,47 @@ static void decoder_abort(Decoder *d, FrameQueue *fq)
packet_queue_flush(d->queue);
}
static inline void fill_rectangle(SDL_Surface *screen,
int x, int y, int w, int h, int color, int update)
static inline void fill_rectangle(int x, int y, int w, int h)
{
SDL_Rect rect;
rect.x = x;
rect.y = y;
rect.w = w;
rect.h = h;
SDL_FillRect(screen, &rect, color);
if (update && w > 0 && h > 0)
SDL_UpdateRect(screen, x, y, w, h);
}
/* draw only the border of a rectangle */
static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
{
int w1, w2, h1, h2;
/* fill the background */
w1 = x;
if (w1 < 0)
w1 = 0;
w2 = width - (x + w);
if (w2 < 0)
w2 = 0;
h1 = y;
if (h1 < 0)
h1 = 0;
h2 = height - (y + h);
if (h2 < 0)
h2 = 0;
fill_rectangle(screen,
xleft, ytop,
w1, height,
color, update);
fill_rectangle(screen,
xleft + width - w2, ytop,
w2, height,
color, update);
fill_rectangle(screen,
xleft + w1, ytop,
width - w1 - w2, h1,
color, update);
fill_rectangle(screen,
xleft + w1, ytop + height - h2,
width - w1 - w2, h2,
color, update);
}
#define ALPHA_BLEND(a, oldp, newp, s)\
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
#define BPP 1
static void blend_subrect(uint8_t **data, int *linesize, const AVSubtitleRect *rect, int imgw, int imgh)
{
int x, y, Y, U, V, A;
uint8_t *lum, *cb, *cr;
int dstx, dsty, dstw, dsth;
const AVSubtitleRect *src = rect;
dstw = av_clip(rect->w, 0, imgw);
dsth = av_clip(rect->h, 0, imgh);
dstx = av_clip(rect->x, 0, imgw - dstw);
dsty = av_clip(rect->y, 0, imgh - dsth);
lum = data[0] + dstx + dsty * linesize[0];
cb = data[1] + dstx/2 + (dsty >> 1) * linesize[1];
cr = data[2] + dstx/2 + (dsty >> 1) * linesize[2];
for (y = 0; y<dsth; y++) {
for (x = 0; x<dstw; x++) {
Y = src->data[0][x + y*src->linesize[0]];
A = src->data[3][x + y*src->linesize[3]];
lum[0] = ALPHA_BLEND(A, lum[0], Y, 0);
lum++;
}
lum += linesize[0] - dstw;
}
for (y = 0; y<dsth/2; y++) {
for (x = 0; x<dstw/2; x++) {
U = src->data[1][x + y*src->linesize[1]];
V = src->data[2][x + y*src->linesize[2]];
A = src->data[3][2*x + 2*y *src->linesize[3]]
+ src->data[3][2*x + 1 + 2*y *src->linesize[3]]
+ src->data[3][2*x + 1 + (2*y+1)*src->linesize[3]]
+ src->data[3][2*x + (2*y+1)*src->linesize[3]];
cb[0] = ALPHA_BLEND(A>>2, cb[0], U, 0);
cr[0] = ALPHA_BLEND(A>>2, cr[0], V, 0);
cb++;
cr++;
}
cb += linesize[1] - dstw/2;
cr += linesize[2] - dstw/2;
}
if (w && h)
SDL_RenderFillRect(renderer, &rect);
}
static void free_picture(Frame *vp)
{
if (vp->bmp) {
SDL_FreeYUVOverlay(vp->bmp);
SDL_DestroyTexture(vp->bmp);
vp->bmp = NULL;
}
}
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
{
Uint32 format;
int access, w, h;
if (SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
void *pixels;
int pitch;
SDL_DestroyTexture(*texture);
if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
return -1;
if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
return -1;
if (init_texture) {
if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
return -1;
memset(pixels, 0, pitch * new_height);
SDL_UnlockTexture(*texture);
}
}
return 0;
}
static void calculate_display_rect(SDL_Rect *rect,
int scr_xleft, int scr_ytop, int scr_width, int scr_height,
int pic_width, int pic_height, AVRational pic_sar)
@ -936,12 +857,44 @@ static void calculate_display_rect(SDL_Rect *rect,
rect->h = FFMAX(height, 1);
}
static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
int ret = 0;
switch (frame->format) {
case AV_PIX_FMT_YUV420P:
ret = SDL_UpdateYUVTexture(tex, NULL, frame->data[0], frame->linesize[0],
frame->data[1], frame->linesize[1],
frame->data[2], frame->linesize[2]);
break;
case AV_PIX_FMT_BGRA:
ret = SDL_UpdateTexture(tex, NULL, frame->data[0], frame->linesize[0]);
break;
default:
/* This should only happen if we are not using avfilter... */
*img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
frame->width, frame->height, frame->format, frame->width, frame->height,
AV_PIX_FMT_BGRA, sws_flags, NULL, NULL, NULL);
if (*img_convert_ctx != NULL) {
uint8_t *pixels;
int pitch;
if (!SDL_LockTexture(tex, NULL, (void **)&pixels, &pitch)) {
sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
0, frame->height, &pixels, &pitch);
SDL_UnlockTexture(tex);
}
} else {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
ret = -1;
}
break;
}
return ret;
}
static void video_image_display(VideoState *is)
{
Frame *vp;
Frame *sp;
Frame *sp = NULL;
SDL_Rect rect;
int i;
vp = frame_queue_peek_last(&is->pictq);
if (vp->bmp) {
@ -950,36 +903,71 @@ static void video_image_display(VideoState *is)
sp = frame_queue_peek(&is->subpq);
if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
uint8_t *data[4];
int linesize[4];
if (!sp->uploaded) {
uint8_t *pixels;
int pitch;
int i;
if (!sp->width || !sp->height) {
sp->width = vp->width;
sp->height = vp->height;
}
if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
return;
SDL_LockYUVOverlay (vp->bmp);
for (i = 0; i < sp->sub.num_rects; i++) {
AVSubtitleRect *sub_rect = sp->sub.rects[i];
data[0] = vp->bmp->pixels[0];
data[1] = vp->bmp->pixels[2];
data[2] = vp->bmp->pixels[1];
sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
linesize[0] = vp->bmp->pitches[0];
linesize[1] = vp->bmp->pitches[2];
linesize[2] = vp->bmp->pitches[1];
for (i = 0; i < sp->sub.num_rects; i++)
blend_subrect(data, linesize, sp->subrects[i],
vp->bmp->w, vp->bmp->h);
SDL_UnlockYUVOverlay (vp->bmp);
}
is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
0, NULL, NULL, NULL);
if (!is->sub_convert_ctx) {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
return;
}
if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
0, sub_rect->h, &pixels, &pitch);
SDL_UnlockTexture(is->sub_texture);
}
}
sp->uploaded = 1;
}
} else
sp = NULL;
}
}
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
if (!vp->uploaded) {
if (upload_texture(vp->bmp, vp->frame, &is->img_convert_ctx) < 0)
return;
vp->uploaded = 1;
}
if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
is->last_display_rect = rect;
SDL_RenderCopy(renderer, vp->bmp, NULL, &rect);
if (sp) {
#if USE_ONEPASS_SUBTITLE_RENDER
SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
#else
int i;
double xratio = (double)rect.w / (double)sp->width;
double yratio = (double)rect.h / (double)sp->height;
for (i = 0; i < sp->sub.num_rects; i++) {
SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
.y = rect.y + sub_rect->y * yratio,
.w = sub_rect->w * xratio,
.h = sub_rect->h * yratio};
SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
}
#endif
}
}
}
@ -992,7 +980,7 @@ static inline int compute_mod(int a, int b)
static void video_audio_display(VideoState *s)
{
int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
int ch, channels, h, h2, bgcolor, fgcolor;
int ch, channels, h, h2;
int64_t time_diff;
int rdft_bits, nb_freq;
@ -1042,13 +1030,8 @@ static void video_audio_display(VideoState *s)
i_start = s->last_i_start;
}
bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
if (s->show_mode == SHOW_MODE_WAVES) {
fill_rectangle(screen,
s->xleft, s->ytop, s->width, s->height,
bgcolor, 0);
fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
/* total height for one channel */
h = s->height / nb_display_channels;
@ -1065,25 +1048,23 @@ static void video_audio_display(VideoState *s)
} else {
ys = y1;
}
fill_rectangle(screen,
s->xleft + x, ys, 1, y,
fgcolor, 0);
fill_rectangle(s->xleft + x, ys, 1, y);
i += channels;
if (i >= SAMPLE_ARRAY_SIZE)
i -= SAMPLE_ARRAY_SIZE;
}
}
fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
for (ch = 1; ch < nb_display_channels; ch++) {
y = s->ytop + ch * h;
fill_rectangle(screen,
s->xleft, y, s->width, 1,
fgcolor, 0);
fill_rectangle(s->xleft, y, s->width, 1);
}
SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
} else {
if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
return;
nb_display_channels= FFMIN(nb_display_channels, 2);
if (rdft_bits != s->rdft_bits) {
av_rdft_end(s->rdft);
@ -1097,6 +1078,9 @@ static void video_audio_display(VideoState *s)
s->show_mode = SHOW_MODE_WAVES;
} else {
FFTSample *data[2];
SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
uint32_t *pixels;
int pitch;
for (ch = 0; ch < nb_display_channels; ch++) {
data[ch] = s->rdft_data + 2 * nb_freq * ch;
i = i_start + ch;
@ -1111,21 +1095,23 @@ static void video_audio_display(VideoState *s)
}
/* Least efficient way to do this, we should of course
* directly access it but it is more than fast enough. */
for (y = 0; y < s->height; y++) {
double w = 1 / sqrt(nb_freq);
int a = sqrt(w * hypot(data[0][2 * y + 0], data[0][2 * y + 1]));
int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
: a;
a = FFMIN(a, 255);
b = FFMIN(b, 255);
fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
fill_rectangle(screen,
s->xpos, s->height-y, 1, 1,
fgcolor, 0);
if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
pitch >>= 2;
pixels += pitch * s->height;
for (y = 0; y < s->height; y++) {
double w = 1 / sqrt(nb_freq);
int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
: a;
a = FFMIN(a, 255);
b = FFMIN(b, 255);
pixels -= pitch;
*pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
}
SDL_UnlockTexture(s->vis_texture);
}
SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
}
SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
if (!s->paused)
s->xpos++;
if (s->xpos >= s->width)
@ -1215,11 +1201,13 @@ static void stream_close(VideoState *is)
frame_queue_destory(&is->sampq);
frame_queue_destory(&is->subpq);
SDL_DestroyCond(is->continue_read_thread);
#if !CONFIG_AVFILTER
sws_freeContext(is->img_convert_ctx);
#endif
sws_freeContext(is->sub_convert_ctx);
av_free(is->filename);
if (is->vis_texture)
SDL_DestroyTexture(is->vis_texture);
if (is->sub_texture)
SDL_DestroyTexture(is->sub_texture);
av_free(is);
}
@ -1228,6 +1216,10 @@ static void do_exit(VideoState *is)
if (is) {
stream_close(is);
}
if (renderer)
SDL_DestroyRenderer(renderer);
if (window)
SDL_DestroyWindow(window);
av_lockmgr_register(NULL);
uninit_opts();
#if CONFIG_AVFILTER
@ -1254,42 +1246,48 @@ static void set_default_window_size(int width, int height, AVRational sar)
default_height = rect.h;
}
static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
static int video_open(VideoState *is, Frame *vp)
{
int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
int w,h;
if (is_full_screen) flags |= SDL_FULLSCREEN;
else flags |= SDL_RESIZABLE;
if (vp && vp->width)
set_default_window_size(vp->width, vp->height, vp->sar);
if (is_full_screen && fs_screen_width) {
w = fs_screen_width;
h = fs_screen_height;
} else if (!is_full_screen && screen_width) {
if (screen_width) {
w = screen_width;
h = screen_height;
} else {
w = default_width;
h = default_height;
}
w = FFMIN(16383, w);
if (screen && is->width == screen->w && screen->w == w
&& is->height== screen->h && screen->h == h && !force_set_video_mode)
return 0;
screen = SDL_SetVideoMode(w, h, 0, flags);
if (!screen) {
if (!window) {
int flags = SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE;
if (!window_title)
window_title = input_filename;
if (is_full_screen)
flags |= SDL_WINDOW_FULLSCREEN_DESKTOP;
window = SDL_CreateWindow(window_title, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, flags);
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
if (window) {
SDL_RendererInfo info;
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
if (renderer) {
if (!SDL_GetRendererInfo(renderer, &info))
av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", info.name);
}
}
} else {
SDL_SetWindowSize(window, w, h);
}
if (!window || !renderer) {
av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
do_exit(is);
}
if (!window_title)
window_title = input_filename;
SDL_WM_SetCaption(window_title, window_title);
is->width = screen->w;
is->height = screen->h;
is->width = w;
is->height = h;
return 0;
}
@ -1297,12 +1295,16 @@ static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
/* display the current picture, if any */
static void video_display(VideoState *is)
{
if (!screen)
video_open(is, 0, NULL);
if (!window)
video_open(is, NULL);
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderClear(renderer);
if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
video_audio_display(is);
else if (is->video_st)
video_image_display(is);
SDL_RenderPresent(renderer);
}
static double get_clock(Clock *c)
@ -1587,6 +1589,20 @@ retry:
|| (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
|| (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
{
if (sp->uploaded) {
int i;
for (i = 0; i < sp->sub.num_rects; i++) {
AVSubtitleRect *sub_rect = sp->sub.rects[i];
uint8_t *pixels;
int pitch, j;
if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
for (j = 0; j < sub_rect->h; j++, pixels += pitch)
memset(pixels, 0, sub_rect->w << 2);
SDL_UnlockTexture(is->sub_texture);
}
}
}
frame_queue_next(&is->subpq);
} else {
break;
@ -1652,19 +1668,18 @@ display:
static void alloc_picture(VideoState *is)
{
Frame *vp;
int64_t bufferdiff;
int sdl_format;
vp = &is->pictq.queue[is->pictq.windex];
free_picture(vp);
video_open(is, vp);
video_open(is, 0, vp);
if (vp->format == AV_PIX_FMT_YUV420P)
sdl_format = SDL_PIXELFORMAT_YV12;
else
sdl_format = SDL_PIXELFORMAT_ARGB8888;
vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
SDL_YV12_OVERLAY,
screen);
bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
if (realloc_texture(&vp->bmp, sdl_format, vp->width, vp->height, SDL_BLENDMODE_NONE, 0) < 0) {
/* SDL allocates a buffer smaller than requested if the video
* overlay hardware is unable to support the requested size. */
av_log(NULL, AV_LOG_FATAL,
@ -1680,24 +1695,6 @@ static void alloc_picture(VideoState *is)
SDL_UnlockMutex(is->pictq.mutex);
}
static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
int i, width, height;
Uint8 *p, *maxp;
for (i = 0; i < 3; i++) {
width = bmp->w;
height = bmp->h;
if (i > 0) {
width >>= 1;
height >>= 1;
}
if (bmp->pitches[i] > width) {
maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
*(p+1) = *p;
}
}
}
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
{
Frame *vp;
@ -1711,17 +1708,19 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
return -1;
vp->sar = src_frame->sample_aspect_ratio;
vp->uploaded = 0;
/* alloc or resize hardware picture buffer */
if (!vp->bmp || vp->reallocate || !vp->allocated ||
if (!vp->bmp || !vp->allocated ||
vp->width != src_frame->width ||
vp->height != src_frame->height) {
vp->height != src_frame->height ||
vp->format != src_frame->format) {
SDL_Event event;
vp->allocated = 0;
vp->reallocate = 0;
vp->allocated = 0;
vp->width = src_frame->width;
vp->height = src_frame->height;
vp->format = src_frame->format;
/* the allocation must be done in the main thread to avoid
locking problems. */
@ -1735,7 +1734,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
SDL_CondWait(is->pictq.cond, is->pictq.mutex);
}
/* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, FF_ALLOC_EVENT, FF_ALLOC_EVENT) != 1) {
while (!vp->allocated && !is->abort_request) {
SDL_CondWait(is->pictq.cond, is->pictq.mutex);
}
@ -1748,58 +1747,12 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
/* if the frame is not skipped, then display it */
if (vp->bmp) {
uint8_t *data[4];
int linesize[4];
/* get a pointer on the bitmap */
SDL_LockYUVOverlay (vp->bmp);
data[0] = vp->bmp->pixels[0];
data[1] = vp->bmp->pixels[2];
data[2] = vp->bmp->pixels[1];
linesize[0] = vp->bmp->pitches[0];
linesize[1] = vp->bmp->pitches[2];
linesize[2] = vp->bmp->pitches[1];
#if CONFIG_AVFILTER
// FIXME use direct rendering
av_image_copy(data, linesize, (const uint8_t **)src_frame->data, src_frame->linesize,
src_frame->format, vp->width, vp->height);
#else
{
AVDictionaryEntry *e = av_dict_get(sws_dict, "sws_flags", NULL, 0);
if (e) {
const AVClass *class = sws_get_class();
const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ);
int ret = av_opt_eval_flags(&class, o, e->value, &sws_flags);
if (ret < 0)
exit(1);
}
}
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
vp->width, vp->height, src_frame->format, vp->width, vp->height,
AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
if (!is->img_convert_ctx) {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
exit(1);
}
sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
0, vp->height, data, linesize);
#endif
/* workaround SDL PITCH_WORKAROUND */
duplicate_right_border_pixels(vp->bmp);
/* update the bitmap content */
SDL_UnlockYUVOverlay(vp->bmp);
vp->pts = pts;
vp->duration = duration;
vp->pos = pos;
vp->serial = serial;
/* now we can update the picture count */
av_frame_move_ref(vp->frame, src_frame);
frame_queue_push(&is->pictq);
}
return 0;
@ -1820,9 +1773,6 @@ static int get_video_frame(VideoState *is, AVFrame *frame)
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
is->viddec_width = frame->width;
is->viddec_height = frame->height;
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
if (frame->pts != AV_NOPTS_VALUE) {
double diff = dpts - get_master_clock(is);
@ -1887,7 +1837,7 @@ fail:
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
{
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
char sws_flags_str[512] = "";
char buffersrc_args[256];
int ret;
@ -1950,10 +1900,6 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
last_filter = filt_ctx; \
} while (0)
/* SDL YUV code is not handling odd width/height for some driver
* combinations, therefore we crop the picture to an even width/height. */
INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
if (autorotate) {
double theta = get_rotation(is->video_st);
@ -2148,7 +2094,7 @@ static int audio_thread(void *arg)
static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
{
packet_queue_start(d->queue);
d->decoder_tid = SDL_CreateThread(fn, arg);
d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
if (!d->decoder_tid) {
av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
return AVERROR(ENOMEM);
@ -2271,7 +2217,6 @@ static int subtitle_thread(void *arg)
Frame *sp;
int got_subtitle;
double pts;
int i;
for (;;) {
if (!(sp = frame_queue_peek_writable(&is->subpq)))
@ -2287,42 +2232,9 @@ static int subtitle_thread(void *arg)
pts = sp->sub.pts / (double)AV_TIME_BASE;
sp->pts = pts;
sp->serial = is->subdec.pkt_serial;
if (!(sp->subrects = av_mallocz_array(sp->sub.num_rects, sizeof(AVSubtitleRect*)))) {
av_log(NULL, AV_LOG_FATAL, "Cannot allocate subrects\n");
exit(1);
}
for (i = 0; i < sp->sub.num_rects; i++)
{
int in_w = sp->sub.rects[i]->w;
int in_h = sp->sub.rects[i]->h;
int subw = is->subdec.avctx->width ? is->subdec.avctx->width : is->viddec_width;
int subh = is->subdec.avctx->height ? is->subdec.avctx->height : is->viddec_height;
int out_w = is->viddec_width ? in_w * is->viddec_width / subw : in_w;
int out_h = is->viddec_height ? in_h * is->viddec_height / subh : in_h;
if (!(sp->subrects[i] = av_mallocz(sizeof(AVSubtitleRect))) ||
av_image_alloc(sp->subrects[i]->data, sp->subrects[i]->linesize, out_w, out_h, AV_PIX_FMT_YUVA420P, 16) < 0) {
av_log(NULL, AV_LOG_FATAL, "Cannot allocate subtitle data\n");
exit(1);
}
is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
in_w, in_h, AV_PIX_FMT_PAL8, out_w, out_h,
AV_PIX_FMT_YUVA420P, sws_flags, NULL, NULL, NULL);
if (!is->sub_convert_ctx) {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the sub conversion context\n");
exit(1);
}
sws_scale(is->sub_convert_ctx,
(void*)sp->sub.rects[i]->data, sp->sub.rects[i]->linesize,
0, in_h, sp->subrects[i]->data, sp->subrects[i]->linesize);
sp->subrects[i]->w = out_w;
sp->subrects[i]->h = out_h;
sp->subrects[i]->x = sp->sub.rects[i]->x * out_w / in_w;
sp->subrects[i]->y = sp->sub.rects[i]->y * out_h / in_h;
}
sp->width = is->subdec.avctx->width;
sp->height = is->subdec.avctx->height;
sp->uploaded = 0;
/* now we can update the picture count */
frame_queue_push(&is->subpq);
@ -2765,9 +2677,6 @@ static int stream_component_open(VideoState *is, int stream_index)
is->video_stream = stream_index;
is->video_st = ic->streams[stream_index];
is->viddec_width = avctx->width;
is->viddec_height = avctx->height;
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
goto out;
@ -3185,7 +3094,7 @@ static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
is->audio_volume = SDL_MIX_MAXVOLUME;
is->muted = 0;
is->av_sync_type = av_sync_type;
is->read_tid = SDL_CreateThread(read_thread, is);
is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
if (!is->read_tid) {
av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
fail:
@ -3276,27 +3185,17 @@ static void stream_cycle_channel(VideoState *is, int codec_type)
static void toggle_full_screen(VideoState *is)
{
#if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
/* OS X needs to reallocate the SDL overlays */
int i;
for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
is->pictq.queue[i].reallocate = 1;
#endif
is_full_screen = !is_full_screen;
video_open(is, 1, NULL);
SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
}
static void toggle_audio_display(VideoState *is)
{
int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
int next = is->show_mode;
do {
next = (next + 1) % SHOW_MODE_NB;
} while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
if (is->show_mode != next) {
fill_rectangle(screen,
is->xleft, is->ytop, is->width, is->height,
bgcolor, 1);
is->force_refresh = 1;
is->show_mode = next;
}
@ -3305,7 +3204,7 @@ static void toggle_audio_display(VideoState *is)
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
double remaining_time = 0.0;
SDL_PumpEvents();
while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
SDL_ShowCursor(0);
cursor_hidden = 1;
@ -3469,9 +3368,6 @@ static void event_loop(VideoState *cur_stream)
break;
}
break;
case SDL_VIDEOEXPOSE:
cur_stream->force_refresh = 1;
break;
case SDL_MOUSEBUTTONDOWN:
if (exit_on_mousedown) {
do_exit(cur_stream);
@ -3527,16 +3423,18 @@ static void event_loop(VideoState *cur_stream)
stream_seek(cur_stream, ts, 0, 0);
}
break;
case SDL_VIDEORESIZE:
screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
if (!screen) {
av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
do_exit(cur_stream);
}
screen_width = cur_stream->width = screen->w;
screen_height = cur_stream->height = screen->h;
cur_stream->force_refresh = 1;
case SDL_WINDOWEVENT:
switch (event.window.event) {
case SDL_WINDOWEVENT_RESIZED:
screen_width = cur_stream->width = event.window.data1;
screen_height = cur_stream->height = event.window.data2;
if (cur_stream->vis_texture) {
SDL_DestroyTexture(cur_stream->vis_texture);
cur_stream->vis_texture = NULL;
}
case SDL_WINDOWEVENT_EXPOSED:
cur_stream->force_refresh = 1;
}
break;
case SDL_QUIT:
case FF_QUIT_EVENT:
@ -3773,8 +3671,6 @@ int main(int argc, char **argv)
{
int flags;
VideoState *is;
char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
char alsa_bufsize[] = "SDL_AUDIO_ALSA_SET_BUFFER_SIZE=1";
init_dynload();
@ -3818,31 +3714,19 @@ int main(int argc, char **argv)
/* Try to work around an occasional ALSA buffer underflow issue when the
* period size is NPOT due to ALSA resampling by forcing the buffer size. */
if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
SDL_putenv(alsa_bufsize);
SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
}
if (display_disable)
SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
#if !defined(_WIN32) && !defined(__APPLE__)
flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
#endif
flags &= ~SDL_INIT_VIDEO;
if (SDL_Init (flags)) {
av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
exit(1);
}
if (!display_disable) {
const SDL_VideoInfo *vi = SDL_GetVideoInfo();
fs_screen_width = vi->current_w;
fs_screen_height = vi->current_h;
}
SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
SDL_EnableKeyRepeat(SDL_DEFAULT_REPEAT_DELAY, SDL_DEFAULT_REPEAT_INTERVAL);
if (av_lockmgr_register(lockmgr)) {
av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
do_exit(NULL);

View File

@ -1815,7 +1815,8 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
print_val("size", pkt->size, unit_byte_str);
if (pkt->pos != -1) print_fmt ("pos", "%"PRId64, pkt->pos);
else print_str_opt("pos", "N/A");
print_fmt("flags", "%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
print_fmt("flags", "%c%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_',
pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_');
if (pkt->side_data_elems) {
int size;
@ -1883,8 +1884,8 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
else print_str_opt("media_type", "unknown");
print_int("stream_index", stream->index);
print_int("key_frame", frame->key_frame);
print_ts ("pkt_pts", frame->pkt_pts);
print_time("pkt_pts_time", frame->pkt_pts, &stream->time_base);
print_ts ("pkt_pts", frame->pts);
print_time("pkt_pts_time", frame->pts, &stream->time_base);
print_ts ("pkt_dts", frame->pkt_dts);
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
print_ts ("best_effort_timestamp", av_frame_get_best_effort_timestamp(frame));
@ -2267,6 +2268,19 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
else
print_str_opt("chroma_location", av_chroma_location_name(par->chroma_location));
if (par->field_order == AV_FIELD_PROGRESSIVE)
print_str("field_order", "progressive");
else if (par->field_order == AV_FIELD_TT)
print_str("field_order", "tt");
else if (par->field_order == AV_FIELD_BB)
print_str("field_order", "bb");
else if (par->field_order == AV_FIELD_TB)
print_str("field_order", "tb");
else if (par->field_order == AV_FIELD_BT)
print_str("field_order", "bt");
else
print_str_opt("field_order", "unknown");
#if FF_API_PRIVATE_OPT
if (dec_ctx && dec_ctx->timecode_frame_start >= 0) {
char tcbuf[AV_TIMECODE_STR_SIZE];
@ -2369,6 +2383,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
PRINT_DISPOSITION(VISUAL_IMPAIRED, "visual_impaired");
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
PRINT_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");
writer_print_section_footer(w);
}
@ -2610,11 +2625,8 @@ static int open_input_file(InputFile *ifile, const char *filename)
if (err < 0)
exit(1);
ist->dec_ctx->pkt_timebase = stream->time_base;
#if FF_API_LAVF_AVCTX
ist->dec_ctx->time_base = stream->codec->time_base;
ist->dec_ctx->framerate = stream->codec->framerate;
#endif
av_codec_set_pkt_timebase(ist->dec_ctx, stream->time_base);
ist->dec_ctx->framerate = stream->avg_frame_rate;
if (avcodec_open2(ist->dec_ctx, codec, &opts) < 0) {
av_log(NULL, AV_LOG_WARNING, "Could not open codec for input stream %d\n",
@ -3279,6 +3291,7 @@ int main(int argc, char **argv)
SET_DO_SHOW(FRAME_TAGS, frame_tags);
SET_DO_SHOW(PROGRAM_TAGS, program_tags);
SET_DO_SHOW(STREAM_TAGS, stream_tags);
SET_DO_SHOW(PROGRAM_STREAM_TAGS, stream_tags);
SET_DO_SHOW(PACKET_TAGS, packet_tags);
if (do_bitexact && (do_show_program_version || do_show_library_versions)) {

View File

@ -41,7 +41,7 @@
static const enum AVPixelFormat pixfmt_rgb24[] = {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE };
AV_PIX_FMT_BGR24, AV_PIX_FMT_0RGB32, AV_PIX_FMT_NONE };
typedef struct EightBpsContext {
AVCodecContext *avctx;
@ -65,6 +65,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
unsigned int dlen, p, row;
const unsigned char *lp, *dp, *ep;
unsigned char count;
unsigned int px_inc;
unsigned int planes = c->planes;
unsigned char *planemap = c->planemap;
int ret;
@ -77,6 +78,8 @@ static int decode_frame(AVCodecContext *avctx, void *data,
/* Set data pointer after line lengths */
dp = encoded + planes * (height << 1);
px_inc = planes + (avctx->pix_fmt == AV_PIX_FMT_0RGB32);
for (p = 0; p < planes; p++) {
/* Lines length pointer for this plane */
lp = encoded + p * (height << 1);
@ -95,21 +98,21 @@ static int decode_frame(AVCodecContext *avctx, void *data,
if ((count = *dp++) <= 127) {
count++;
dlen -= count + 1;
if (pixptr_end - pixptr < count * planes)
if (pixptr_end - pixptr < count * px_inc)
break;
if (ep - dp < count)
return AVERROR_INVALIDDATA;
while (count--) {
*pixptr = *dp++;
pixptr += planes;
pixptr += px_inc;
}
} else {
count = 257 - count;
if (pixptr_end - pixptr < count * planes)
if (pixptr_end - pixptr < count * px_inc)
break;
while (count--) {
*pixptr = *dp;
pixptr += planes;
pixptr += px_inc;
}
dp++;
dlen -= 2;

View File

@ -10,6 +10,7 @@ HEADERS = avcodec.h \
dv_profile.h \
dxva2.h \
jni.h \
mediacodec.h \
qsv.h \
vaapi.h \
vda.h \
@ -35,6 +36,7 @@ OBJS = allcodecs.o \
imgconvert.o \
jni.o \
mathtables.o \
mediacodec.o \
options.o \
parser.o \
profiles.o \
@ -92,7 +94,7 @@ OBJS-$(CONFIG_LSP) += lsp.o
OBJS-$(CONFIG_LZF) += lzf.o
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o mdct_fixed_32.o
OBJS-$(CONFIG_ME_CMP) += me_cmp.o
OBJS-$(CONFIG_MEDIACODEC) += mediacodecdec.o mediacodec_wrapper.o mediacodec_sw_buffer.o
OBJS-$(CONFIG_MEDIACODEC) += mediacodecdec_common.o mediacodec_surface.o mediacodec_wrapper.o mediacodec_sw_buffer.o
OBJS-$(CONFIG_MPEG_ER) += mpeg_er.o
OBJS-$(CONFIG_MPEGAUDIO) += mpegaudio.o mpegaudiodata.o \
mpegaudiodecheader.o
@ -161,7 +163,7 @@ OBJS-$(CONFIG_ALAC_DECODER) += alac.o alac_data.o alacdsp.o
OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o alac_data.o
OBJS-$(CONFIG_ALIAS_PIX_DECODER) += aliaspixdec.o
OBJS-$(CONFIG_ALIAS_PIX_ENCODER) += aliaspixenc.o
OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mpeg4audio.o
OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mlz.o mpeg4audio.o
OBJS-$(CONFIG_AMRNB_DECODER) += amrnbdec.o celp_filters.o \
celp_math.o acelp_filters.o \
acelp_vectors.o \
@ -236,7 +238,8 @@ OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadata.o dcahuff.o \
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dca.o dcadata.o
OBJS-$(CONFIG_DDS_DECODER) += dds.o
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o diractab.o \
dirac_arith.o mpeg12data.o dirac_dwt.o
dirac_arith.o mpeg12data.o dirac_dwt.o \
dirac_vlc.o
OBJS-$(CONFIG_DFA_DECODER) += dfa.o
OBJS-$(CONFIG_DNXHD_DECODER) += dnxhddec.o dnxhddata.o
OBJS-$(CONFIG_DNXHD_ENCODER) += dnxhdenc.o dnxhddata.o
@ -294,7 +297,7 @@ OBJS-$(CONFIG_G723_1_DECODER) += g723_1dec.o g723_1.o \
acelp_vectors.o celp_filters.o celp_math.o
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1enc.o g723_1.o \
acelp_vectors.o celp_filters.o celp_math.o
OBJS-$(CONFIG_G729_DECODER) += g729dec.o lsp.o celp_math.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
OBJS-$(CONFIG_G729_DECODER) += g729dec.o lsp.o celp_math.o celp_filters.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
OBJS-$(CONFIG_GIF_ENCODER) += gif.o lzwenc.o
OBJS-$(CONFIG_GSM_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
@ -306,14 +309,14 @@ OBJS-$(CONFIG_H263_DECODER) += h263dec.o h263.o ituh263dec.o \
intelh263dec.o h263data.o
OBJS-$(CONFIG_H263_ENCODER) += mpeg4videoenc.o mpeg4video.o \
h263.o ituh263enc.o flvenc.o h263data.o
OBJS-$(CONFIG_H264_DECODER) += h264.o h264_cabac.o h264_cavlc.o \
OBJS-$(CONFIG_H264_DECODER) += h264dec.o h264_cabac.o h264_cavlc.o \
h264_direct.o h264_loopfilter.o \
h264_mb.o h264_picture.o h264_ps.o \
h264_refs.o h264_sei.o \
h264_slice.o h264data.o h264_parse.o \
h2645_parse.o
OBJS-$(CONFIG_H264_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec_h264.o
OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_H264_MMAL_DECODER) += mmaldec.o
OBJS-$(CONFIG_H264_NVENC_ENCODER) += nvenc_h264.o
OBJS-$(CONFIG_NVENC_ENCODER) += nvenc_h264.o
@ -330,6 +333,7 @@ OBJS-$(CONFIG_HEVC_DECODER) += hevc.o hevc_mvs.o hevc_ps.o hevc_sei.o
hevc_cabac.o hevc_refs.o hevcpred.o \
hevcdsp.o hevc_filter.o h2645_parse.o hevc_data.o
OBJS-$(CONFIG_HEVC_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o hevc_parse.o
OBJS-$(CONFIG_HEVC_NVENC_ENCODER) += nvenc_hevc.o
OBJS-$(CONFIG_NVENC_HEVC_ENCODER) += nvenc_hevc.o
OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec_h2645.o
@ -379,6 +383,7 @@ OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpegenc_common.o
OBJS-$(CONFIG_MJPEGB_DECODER) += mjpegbdec.o
OBJS-$(CONFIG_MJPEG_VAAPI_ENCODER) += vaapi_encode_mjpeg.o
OBJS-$(CONFIG_MLP_DECODER) += mlpdec.o mlpdsp.o
OBJS-$(CONFIG_MLP_ENCODER) += mlpenc.o mlp.o
OBJS-$(CONFIG_MMVIDEO_DECODER) += mmvideo.o
OBJS-$(CONFIG_MOTIONPIXELS_DECODER) += motionpixels.o
OBJS-$(CONFIG_MOVTEXT_DECODER) += movtextdec.o ass.o
@ -408,6 +413,7 @@ OBJS-$(CONFIG_MPEG2_QSV_ENCODER) += qsvenc_mpeg2.o
OBJS-$(CONFIG_MPEG2VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpeg12.o
OBJS-$(CONFIG_MPEG4_DECODER) += xvididct.o
OBJS-$(CONFIG_MPEG4_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_MPEG4_OMX_ENCODER) += omx.o
OBJS-$(CONFIG_MPL2_DECODER) += mpl2dec.o ass.o
OBJS-$(CONFIG_MSA1_DECODER) += mss3.o
@ -542,6 +548,7 @@ OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o ti
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o tiff_data.o
OBJS-$(CONFIG_TMV_DECODER) += tmv.o cga_data.o
OBJS-$(CONFIG_TRUEHD_DECODER) += mlpdec.o mlpdsp.o
OBJS-$(CONFIG_TRUEHD_ENCODER) += mlpenc.o
OBJS-$(CONFIG_TRUEMOTION1_DECODER) += truemotion1.o
OBJS-$(CONFIG_TRUEMOTION2_DECODER) += truemotion2.o
OBJS-$(CONFIG_TRUEMOTION2RT_DECODER) += truemotion2rt.o
@ -549,7 +556,7 @@ OBJS-$(CONFIG_TRUESPEECH_DECODER) += truespeech.o
OBJS-$(CONFIG_TSCC_DECODER) += tscc.o msrledec.o
OBJS-$(CONFIG_TSCC2_DECODER) += tscc2.o
OBJS-$(CONFIG_TTA_DECODER) += tta.o ttadata.o ttadsp.o
OBJS-$(CONFIG_TTA_ENCODER) += ttaenc.o ttadata.o
OBJS-$(CONFIG_TTA_ENCODER) += ttaenc.o ttaencdsp.o ttadata.o
OBJS-$(CONFIG_TWINVQ_DECODER) += twinvqdec.o twinvq.o
OBJS-$(CONFIG_TXD_DECODER) += txd.o
OBJS-$(CONFIG_ULTI_DECODER) += ulti.o
@ -589,9 +596,11 @@ OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o \
OBJS-$(CONFIG_VP7_DECODER) += vp8.o vp56rac.o
OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp56rac.o
OBJS-$(CONFIG_VP8_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_VP8_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9dsp.o vp56rac.o vp9dsp_8bpp.o \
vp9dsp_10bpp.o vp9dsp_12bpp.o
OBJS-$(CONFIG_VP9_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_VP9_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
@ -686,6 +695,10 @@ OBJS-$(CONFIG_PCM_S32LE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S32LE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S32LE_PLANAR_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S32LE_PLANAR_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S64BE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S64BE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S64LE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S64LE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_U8_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_U8_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_U16BE_DECODER) += pcm.o
@ -746,6 +759,7 @@ OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcmenc.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_THP_LE_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_VIMA_DECODER) += vima.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o adpcm_data.o
@ -838,9 +852,9 @@ OBJS-$(CONFIG_AMR_NB_AT_DECODER) += audiotoolboxdec.o
OBJS-$(CONFIG_EAC3_AT_DECODER) += audiotoolboxdec.o
OBJS-$(CONFIG_GSM_MS_AT_DECODER) += audiotoolboxdec.o
OBJS-$(CONFIG_ILBC_AT_DECODER) += audiotoolboxdec.o
OBJS-$(CONFIG_MP1_AT_DECODER) += audiotoolboxdec.o mpegaudiodecheader.o
OBJS-$(CONFIG_MP2_AT_DECODER) += audiotoolboxdec.o mpegaudiodecheader.o
OBJS-$(CONFIG_MP3_AT_DECODER) += audiotoolboxdec.o mpegaudiodecheader.o
OBJS-$(CONFIG_MP1_AT_DECODER) += audiotoolboxdec.o mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MP2_AT_DECODER) += audiotoolboxdec.o mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MP3_AT_DECODER) += audiotoolboxdec.o mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_PCM_MULAW_AT_DECODER) += audiotoolboxdec.o
OBJS-$(CONFIG_PCM_ALAW_AT_DECODER) += audiotoolboxdec.o
OBJS-$(CONFIG_QDMC_AT_DECODER) += audiotoolboxdec.o
@ -851,7 +865,6 @@ OBJS-$(CONFIG_ILBC_AT_ENCODER) += audiotoolboxenc.o
OBJS-$(CONFIG_PCM_ALAW_AT_ENCODER) += audiotoolboxenc.o
OBJS-$(CONFIG_PCM_MULAW_AT_ENCODER) += audiotoolboxenc.o
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
OBJS-$(CONFIG_LIBFDK_AAC_DECODER) += libfdk-aacdec.o
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsmdec.o
@ -861,11 +874,12 @@ OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsmenc.o
OBJS-$(CONFIG_LIBILBC_DECODER) += libilbc.o
OBJS-$(CONFIG_LIBILBC_ENCODER) += libilbc.o
OBJS-$(CONFIG_LIBKVAZAAR_ENCODER) += libkvazaar.o
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENH264_ENCODER) += libopenh264enc.o
OBJS-$(CONFIG_LIBOPENH264_DECODER) += libopenh264dec.o libopenh264.o
OBJS-$(CONFIG_LIBOPENH264_ENCODER) += libopenh264enc.o libopenh264.o
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o libopus.o \
@ -897,7 +911,7 @@ OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
OBJS-$(CONFIG_LIBX265_ENCODER) += libx265.o
OBJS-$(CONFIG_LIBXAVS_ENCODER) += libxavs.o
OBJS-$(CONFIG_LIBXVID_ENCODER) += libxvid.o
OBJS-$(CONFIG_LIBZVBI_TELETEXT_DECODER) += libzvbi-teletextdec.o
OBJS-$(CONFIG_LIBZVBI_TELETEXT_DECODER) += libzvbi-teletextdec.o ass.o
# parsers
OBJS-$(CONFIG_AAC_LATM_PARSER) += latm_parser.o
@ -994,7 +1008,7 @@ SKIPHEADERS-$(CONFIG_JNI) += ffjni.h
SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
SKIPHEADERS-$(CONFIG_LIBVPX) += libvpx.h
SKIPHEADERS-$(CONFIG_LIBWEBP_ENCODER) += libwebpenc_common.h
SKIPHEADERS-$(CONFIG_MEDIACODEC) += mediacodecdec.h mediacodec_wrapper.h mediacodec_sw_buffer.h
SKIPHEADERS-$(CONFIG_MEDIACODEC) += mediacodecdec_common.h mediacodec_surface.h mediacodec_wrapper.h mediacodec_sw_buffer.h
SKIPHEADERS-$(CONFIG_NVENC) += nvenc.h
SKIPHEADERS-$(CONFIG_QSV) += qsv.h qsv_internal.h
SKIPHEADERS-$(CONFIG_QSVDEC) += qsvdec.h

View File

@ -88,7 +88,7 @@ static void encode_window_bands_info(AACEncContext *s, SingleChannelElement *sce
float next_minrd = INFINITY;
int next_mincb = 0;
abs_pow34_v(s->scoefs, sce->coeffs, 1024);
s->abs_pow34(s->scoefs, sce->coeffs, 1024);
start = win*128;
for (cb = 0; cb < CB_TOT_ALL; cb++) {
path[0][cb].cost = 0.0f;
@ -200,7 +200,7 @@ static void set_special_band_scalefactors(AACEncContext *s, SingleChannelElement
int bands = 0;
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
for (g = 0; g < sce->ics.num_swb; g++) {
for (g = 0; g < sce->ics.num_swb; g++) {
if (sce->zeroes[w*16+g])
continue;
if (sce->band_type[w*16+g] == INTENSITY_BT || sce->band_type[w*16+g] == INTENSITY_BT2) {
@ -220,7 +220,7 @@ static void set_special_band_scalefactors(AACEncContext *s, SingleChannelElement
/* Clip the scalefactor indices */
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
for (g = 0; g < sce->ics.num_swb; g++) {
for (g = 0; g < sce->ics.num_swb; g++) {
if (sce->zeroes[w*16+g])
continue;
if (sce->band_type[w*16+g] == INTENSITY_BT || sce->band_type[w*16+g] == INTENSITY_BT2) {
@ -299,7 +299,7 @@ static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s,
}
}
idx = 1;
abs_pow34_v(s->scoefs, sce->coeffs, 1024);
s->abs_pow34(s->scoefs, sce->coeffs, 1024);
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
start = w*128;
for (g = 0; g < sce->ics.num_swb; g++) {
@ -387,7 +387,7 @@ static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s,
}
//set the same quantizers inside window groups
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w])
for (g = 0; g < sce->ics.num_swb; g++)
for (g = 0; g < sce->ics.num_swb; g++)
for (w2 = 1; w2 < sce->ics.group_len[w]; w2++)
sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g];
}
@ -396,34 +396,148 @@ static void search_for_quantizers_fast(AVCodecContext *avctx, AACEncContext *s,
SingleChannelElement *sce,
const float lambda)
{
int i, w, w2, g;
int minq = 255;
int start = 0, i, w, w2, g;
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels * (lambda / 120.f);
float dists[128] = { 0 }, uplims[128] = { 0 };
float maxvals[128];
int fflag, minscaler;
int its = 0;
int allz = 0;
float minthr = INFINITY;
memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
// for values above this the decoder might end up in an endless loop
// due to always having more bits than what can be encoded.
destbits = FFMIN(destbits, 5800);
//some heuristic to determine initial quantizers will reduce search time
//determine zero bands and upper limits
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
start = 0;
for (g = 0; g < sce->ics.num_swb; g++) {
int nz = 0;
float uplim = 0.0f, energy = 0.0f;
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
if (band->energy <= band->threshold) {
sce->sf_idx[(w+w2)*16+g] = 218;
uplim += band->threshold;
energy += band->energy;
if (band->energy <= band->threshold || band->threshold == 0.0f) {
sce->zeroes[(w+w2)*16+g] = 1;
} else {
sce->sf_idx[(w+w2)*16+g] = av_clip(SCALE_ONE_POS - SCALE_DIV_512 + log2f(band->threshold), 80, 218);
sce->zeroes[(w+w2)*16+g] = 0;
continue;
}
minq = FFMIN(minq, sce->sf_idx[(w+w2)*16+g]);
nz = 1;
}
uplims[w*16+g] = uplim *512;
sce->band_type[w*16+g] = 0;
sce->zeroes[w*16+g] = !nz;
if (nz)
minthr = FFMIN(minthr, uplim);
allz |= nz;
start += sce->ics.swb_sizes[g];
}
}
for (i = 0; i < 128; i++) {
sce->sf_idx[i] = 140;
//av_clip(sce->sf_idx[i], minq, minq + SCALE_MAX_DIFF - 1);
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
for (g = 0; g < sce->ics.num_swb; g++) {
if (sce->zeroes[w*16+g]) {
sce->sf_idx[w*16+g] = SCALE_ONE_POS;
continue;
}
sce->sf_idx[w*16+g] = SCALE_ONE_POS + FFMIN(log2f(uplims[w*16+g]/minthr)*4,59);
}
}
//set the same quantizers inside window groups
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w])
for (g = 0; g < sce->ics.num_swb; g++)
for (w2 = 1; w2 < sce->ics.group_len[w]; w2++)
sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g];
if (!allz)
return;
s->abs_pow34(s->scoefs, sce->coeffs, 1024);
ff_quantize_band_cost_cache_init(s);
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
start = w*128;
for (g = 0; g < sce->ics.num_swb; g++) {
const float *scaled = s->scoefs + start;
maxvals[w*16+g] = find_max_val(sce->ics.group_len[w], sce->ics.swb_sizes[g], scaled);
start += sce->ics.swb_sizes[g];
}
}
//perform two-loop search
//outer loop - improve quality
do {
int tbits, qstep;
minscaler = sce->sf_idx[0];
//inner loop - quantize spectrum to fit into given number of bits
qstep = its ? 1 : 32;
do {
int prev = -1;
tbits = 0;
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
start = w*128;
for (g = 0; g < sce->ics.num_swb; g++) {
const float *coefs = sce->coeffs + start;
const float *scaled = s->scoefs + start;
int bits = 0;
int cb;
float dist = 0.0f;
if (sce->zeroes[w*16+g] || sce->sf_idx[w*16+g] >= 218) {
start += sce->ics.swb_sizes[g];
continue;
}
minscaler = FFMIN(minscaler, sce->sf_idx[w*16+g]);
cb = find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]);
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
int b;
dist += quantize_band_cost_cached(s, w + w2, g,
coefs + w2*128,
scaled + w2*128,
sce->ics.swb_sizes[g],
sce->sf_idx[w*16+g],
cb, 1.0f, INFINITY,
&b, NULL, 0);
bits += b;
}
dists[w*16+g] = dist - bits;
if (prev != -1) {
bits += ff_aac_scalefactor_bits[sce->sf_idx[w*16+g] - prev + SCALE_DIFF_ZERO];
}
tbits += bits;
start += sce->ics.swb_sizes[g];
prev = sce->sf_idx[w*16+g];
}
}
if (tbits > destbits) {
for (i = 0; i < 128; i++)
if (sce->sf_idx[i] < 218 - qstep)
sce->sf_idx[i] += qstep;
} else {
for (i = 0; i < 128; i++)
if (sce->sf_idx[i] > 60 - qstep)
sce->sf_idx[i] -= qstep;
}
qstep >>= 1;
if (!qstep && tbits > destbits*1.02 && sce->sf_idx[0] < 217)
qstep = 1;
} while (qstep);
fflag = 0;
minscaler = av_clip(minscaler, 60, 255 - SCALE_MAX_DIFF);
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
for (g = 0; g < sce->ics.num_swb; g++) {
int prevsc = sce->sf_idx[w*16+g];
if (dists[w*16+g] > uplims[w*16+g] && sce->sf_idx[w*16+g] > 60) {
if (find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]-1))
sce->sf_idx[w*16+g]--;
else //Try to make sure there is some energy in every band
sce->sf_idx[w*16+g]-=2;
}
sce->sf_idx[w*16+g] = av_clip(sce->sf_idx[w*16+g], minscaler, minscaler + SCALE_MAX_DIFF);
sce->sf_idx[w*16+g] = FFMIN(sce->sf_idx[w*16+g], 219);
if (sce->sf_idx[w*16+g] != prevsc)
fflag = 1;
sce->band_type[w*16+g] = find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]);
}
}
its++;
} while (fflag && its < 10);
}
static void search_for_pns(AACEncContext *s, AVCodecContext *avctx, SingleChannelElement *sce)
@ -467,7 +581,7 @@ static void search_for_pns(AACEncContext *s, AVCodecContext *avctx, SingleChanne
ff_init_nextband_map(sce, nextband);
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
int wstart = w*128;
for (g = 0; g < sce->ics.num_swb; g++) {
for (g = 0; g < sce->ics.num_swb; g++) {
int noise_sfi;
float dist1 = 0.0f, dist2 = 0.0f, noise_amp;
float pns_energy = 0.0f, pns_tgt_energy, energy_ratio, dist_thresh;
@ -529,19 +643,17 @@ static void search_for_pns(AACEncContext *s, AVCodecContext *avctx, SingleChanne
float band_energy, scale, pns_senergy;
const int start_c = (w+w2)*128+sce->ics.swb_offset[g];
band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
for (i = 0; i < sce->ics.swb_sizes[g]; i+=2) {
double rnd[2];
av_bmg_get(&s->lfg, rnd);
PNS[i+0] = (float)rnd[0];
PNS[i+1] = (float)rnd[1];
for (i = 0; i < sce->ics.swb_sizes[g]; i++) {
s->random_state = lcg_random(s->random_state);
PNS[i] = s->random_state;
}
band_energy = s->fdsp->scalarproduct_float(PNS, PNS, sce->ics.swb_sizes[g]);
scale = noise_amp/sqrtf(band_energy);
s->fdsp->vector_fmul_scalar(PNS, PNS, scale, sce->ics.swb_sizes[g]);
pns_senergy = s->fdsp->scalarproduct_float(PNS, PNS, sce->ics.swb_sizes[g]);
pns_energy += pns_senergy;
abs_pow34_v(NOR34, &sce->coeffs[start_c], sce->ics.swb_sizes[g]);
abs_pow34_v(PNS34, PNS, sce->ics.swb_sizes[g]);
s->abs_pow34(NOR34, &sce->coeffs[start_c], sce->ics.swb_sizes[g]);
s->abs_pow34(PNS34, PNS, sce->ics.swb_sizes[g]);
dist1 += quantize_band_cost(s, &sce->coeffs[start_c],
NOR34,
sce->ics.swb_sizes[g],
@ -603,7 +715,7 @@ static void mark_pns(AACEncContext *s, AVCodecContext *avctx, SingleChannelEleme
memcpy(sce->band_alt, sce->band_type, sizeof(sce->band_type));
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
for (g = 0; g < sce->ics.num_swb; g++) {
for (g = 0; g < sce->ics.num_swb; g++) {
float sfb_energy = 0.0f, threshold = 0.0f, spread = 2.0f;
float min_energy = -1.0f, max_energy = 0.0f;
const int start = sce->ics.swb_offset[g];
@ -645,8 +757,9 @@ static void search_for_ms(AACEncContext *s, ChannelElement *cpe)
{
int start = 0, i, w, w2, g, sid_sf_boost, prev_mid, prev_side;
uint8_t nextband0[128], nextband1[128];
float M[128], S[128];
float *L34 = s->scoefs, *R34 = s->scoefs + 128, *M34 = s->scoefs + 128*2, *S34 = s->scoefs + 128*3;
float *M = s->scoefs + 128*0, *S = s->scoefs + 128*1;
float *L34 = s->scoefs + 128*2, *R34 = s->scoefs + 128*3;
float *M34 = s->scoefs + 128*4, *S34 = s->scoefs + 128*5;
const float lambda = s->lambda;
const float mslambda = FFMIN(1.0f, lambda / 120.f);
SingleChannelElement *sce0 = &cpe->ch[0];
@ -662,7 +775,7 @@ static void search_for_ms(AACEncContext *s, ChannelElement *cpe)
prev_side = sce1->sf_idx[0];
for (w = 0; w < sce0->ics.num_windows; w += sce0->ics.group_len[w]) {
start = 0;
for (g = 0; g < sce0->ics.num_swb; g++) {
for (g = 0; g < sce0->ics.num_swb; g++) {
float bmax = bval2bmax(g * 17.0f / sce0->ics.num_swb) / 0.0045f;
if (!cpe->is_mask[w*16+g])
cpe->ms_mask[w*16+g] = 0;
@ -677,8 +790,8 @@ static void search_for_ms(AACEncContext *s, ChannelElement *cpe)
S[i] = M[i]
- sce1->coeffs[start+(w+w2)*128+i];
}
abs_pow34_v(M34, M, sce0->ics.swb_sizes[g]);
abs_pow34_v(S34, S, sce0->ics.swb_sizes[g]);
s->abs_pow34(M34, M, sce0->ics.swb_sizes[g]);
s->abs_pow34(S34, S, sce0->ics.swb_sizes[g]);
for (i = 0; i < sce0->ics.swb_sizes[g]; i++ ) {
Mmax = FFMAX(Mmax, M34[i]);
Smax = FFMAX(Smax, S34[i]);
@ -721,10 +834,10 @@ static void search_for_ms(AACEncContext *s, ChannelElement *cpe)
- sce1->coeffs[start+(w+w2)*128+i];
}
abs_pow34_v(L34, sce0->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]);
abs_pow34_v(R34, sce1->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]);
abs_pow34_v(M34, M, sce0->ics.swb_sizes[g]);
abs_pow34_v(S34, S, sce0->ics.swb_sizes[g]);
s->abs_pow34(L34, sce0->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]);
s->abs_pow34(R34, sce1->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]);
s->abs_pow34(M34, M, sce0->ics.swb_sizes[g]);
s->abs_pow34(S34, S, sce0->ics.swb_sizes[g]);
dist1 += quantize_band_cost(s, &sce0->coeffs[start + (w+w2)*128],
L34,
sce0->ics.swb_sizes[g],
@ -828,7 +941,7 @@ AACCoefficientsEncoder ff_aac_coders[AAC_CODER_NB] = {
},
[AAC_CODER_FAST] = {
search_for_quantizers_fast,
encode_window_bands_info,
codebook_trellis_rate,
quantize_and_encode_band,
ff_aac_encode_tns_info,
ff_aac_encode_ltp_info,

View File

@ -70,7 +70,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
float next_minbits = INFINITY;
int next_mincb = 0;
abs_pow34_v(s->scoefs, sce->coeffs, 1024);
s->abs_pow34(s->scoefs, sce->coeffs, 1024);
start = win*128;
for (cb = 0; cb < CB_TOT_ALL; cb++) {
path[0][cb].cost = run_bits+4;

View File

@ -87,7 +87,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
* will keep iterating until it fails to lower it or it reaches
* ulimit * rdlambda. Keeping it low increases quality on difficult
* signals, but lower it too much, and bits will be taken from weak
* signals, creating "holes". A balance is necesary.
* signals, creating "holes". A balance is necessary.
* rdmax and rdmin specify the relative deviation from rdlambda
* allowed for tonality compensation
*/
@ -291,7 +291,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
if (!allz)
return;
abs_pow34_v(s->scoefs, sce->coeffs, 1024);
s->abs_pow34(s->scoefs, sce->coeffs, 1024);
ff_quantize_band_cost_cache_init(s);
for (i = 0; i < sizeof(minsf) / sizeof(minsf[0]); ++i)

View File

@ -999,9 +999,9 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
/* Coder limitations */
s->coder = &ff_aac_coders[s->options.coder];
if (s->options.coder != AAC_CODER_TWOLOOP) {
if (s->options.coder == AAC_CODER_ANMR) {
ERROR_IF(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL,
"Coders other than twoloop require -strict -2 and some may be removed in the future\n");
"The ANMR coder is considered experimental, add -strict -2 to enable!\n");
s->options.intensity_stereo = 0;
s->options.pns = 0;
}
@ -1031,7 +1031,13 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
goto fail;
s->psypp = ff_psy_preprocess_init(avctx);
ff_lpc_init(&s->lpc, 2*avctx->frame_size, TNS_MAX_ORDER, FF_LPC_TYPE_LEVINSON);
av_lfg_init(&s->lfg, 0x72adca55);
s->random_state = 0x1f2e3d4c;
s->abs_pow34 = abs_pow34_v;
s->quant_bands = quantize_bands;
if (ARCH_X86)
ff_aac_dsp_init_x86(s);
if (HAVE_MIPSDSP)
ff_aac_coder_init_mips(s);

View File

@ -23,7 +23,6 @@
#define AVCODEC_AACENC_H
#include "libavutil/float_dsp.h"
#include "libavutil/lfg.h"
#include "avcodec.h"
#include "put_bits.h"
@ -100,7 +99,6 @@ typedef struct AACEncContext {
FFTContext mdct1024; ///< long (1024 samples) frame transform context
FFTContext mdct128; ///< short (128 samples) frame transform context
AVFloatDSPContext *fdsp;
AVLFG lfg; ///< PRNG needed for PNS
float *planar_samples[8]; ///< saved preprocessed input
int profile; ///< copied from avctx
@ -129,11 +127,17 @@ typedef struct AACEncContext {
uint16_t quantize_band_cost_cache_generation;
AACQuantizeBandCostCacheEntry quantize_band_cost_cache[256][128]; ///< memoization area for quantize_band_cost
void (*abs_pow34)(float *out, const float *in, const int size);
void (*quant_bands)(int *out, const float *in, const float *scaled,
int size, int is_signed, int maxval, const float Q34,
const float rounding);
struct {
float *samples;
} buffer;
} AACEncContext;
void ff_aac_dsp_init_x86(AACEncContext *s);
void ff_aac_coder_init_mips(AACEncContext *c);
void ff_quantize_band_cost_cache_init(struct AACEncContext *s);

View File

@ -59,9 +59,9 @@ struct AACISError ff_aac_is_encoding_err(AACEncContext *s, ChannelElement *cpe,
float minthr = FFMIN(band0->threshold, band1->threshold);
for (i = 0; i < sce0->ics.swb_sizes[g]; i++)
IS[i] = (L[start+(w+w2)*128+i] + phase*R[start+(w+w2)*128+i])*sqrt(ener0/ener01);
abs_pow34_v(L34, &L[start+(w+w2)*128], sce0->ics.swb_sizes[g]);
abs_pow34_v(R34, &R[start+(w+w2)*128], sce0->ics.swb_sizes[g]);
abs_pow34_v(I34, IS, sce0->ics.swb_sizes[g]);
s->abs_pow34(L34, &L[start+(w+w2)*128], sce0->ics.swb_sizes[g]);
s->abs_pow34(R34, &R[start+(w+w2)*128], sce0->ics.swb_sizes[g]);
s->abs_pow34(I34, IS, sce0->ics.swb_sizes[g]);
maxval = find_max_val(1, sce0->ics.swb_sizes[g], I34);
is_band_type = find_min_book(maxval, is_sf_idx);
dist1 += quantize_band_cost(s, &L[start + (w+w2)*128], L34,

View File

@ -190,8 +190,8 @@ void ff_aac_search_for_ltp(AACEncContext *s, SingleChannelElement *sce,
FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
for (i = 0; i < sce->ics.swb_sizes[g]; i++)
PCD[i] = sce->coeffs[start+(w+w2)*128+i] - sce->lcoeffs[start+(w+w2)*128+i];
abs_pow34_v(C34, &sce->coeffs[start+(w+w2)*128], sce->ics.swb_sizes[g]);
abs_pow34_v(PCD34, PCD, sce->ics.swb_sizes[g]);
s->abs_pow34(C34, &sce->coeffs[start+(w+w2)*128], sce->ics.swb_sizes[g]);
s->abs_pow34(PCD34, PCD, sce->ics.swb_sizes[g]);
dist1 += quantize_band_cost(s, &sce->coeffs[start+(w+w2)*128], C34, sce->ics.swb_sizes[g],
sce->sf_idx[(w+w2)*16+g], sce->band_type[(w+w2)*16+g],
s->lambda/band->threshold, INFINITY, &bits_tmp1, NULL, 0);

View File

@ -270,7 +270,7 @@ void ff_aac_search_for_pred(AACEncContext *s, SingleChannelElement *sce)
continue;
/* Normal coefficients */
abs_pow34_v(O34, &sce->coeffs[start_coef], num_coeffs);
s->abs_pow34(O34, &sce->coeffs[start_coef], num_coeffs);
dist1 = quantize_and_encode_band_cost(s, NULL, &sce->coeffs[start_coef], NULL,
O34, num_coeffs, sce->sf_idx[sfb],
cb_n, s->lambda / band->threshold, INFINITY, &cost1, NULL, 0);
@ -279,7 +279,7 @@ void ff_aac_search_for_pred(AACEncContext *s, SingleChannelElement *sce)
/* Encoded coefficients - needed for #bits, band type and quant. error */
for (i = 0; i < num_coeffs; i++)
SENT[i] = sce->coeffs[start_coef + i] - sce->prcoeffs[start_coef + i];
abs_pow34_v(S34, SENT, num_coeffs);
s->abs_pow34(S34, SENT, num_coeffs);
if (cb_n < RESERVED_BT)
cb_p = av_clip(find_min_book(find_max_val(1, num_coeffs, S34), sce->sf_idx[sfb]), cb_min, cb_max);
else
@ -291,7 +291,7 @@ void ff_aac_search_for_pred(AACEncContext *s, SingleChannelElement *sce)
/* Reconstructed coefficients - needed for distortion measurements */
for (i = 0; i < num_coeffs; i++)
sce->prcoeffs[start_coef + i] += QERR[i] != 0.0f ? (sce->prcoeffs[start_coef + i] - QERR[i]) : 0.0f;
abs_pow34_v(P34, &sce->prcoeffs[start_coef], num_coeffs);
s->abs_pow34(P34, &sce->prcoeffs[start_coef], num_coeffs);
if (cb_n < RESERVED_BT)
cb_p = av_clip(find_min_book(find_max_val(1, num_coeffs, P34), sce->sf_idx[sfb]), cb_min, cb_max);
else

View File

@ -74,10 +74,10 @@ static av_always_inline float quantize_and_encode_band_cost_template(
return cost * lambda;
}
if (!scaled) {
abs_pow34_v(s->scoefs, in, size);
s->abs_pow34(s->scoefs, in, size);
scaled = s->scoefs;
}
quantize_bands(s->qcoefs, in, scaled, size, Q34, !BT_UNSIGNED, aac_cb_maxval[cb], ROUNDING);
s->quant_bands(s->qcoefs, in, scaled, size, !BT_UNSIGNED, aac_cb_maxval[cb], Q34, ROUNDING);
if (BT_UNSIGNED) {
off = 0;
} else {

View File

@ -63,7 +63,7 @@ static inline int quant(float coef, const float Q, const float rounding)
}
static inline void quantize_bands(int *out, const float *in, const float *scaled,
int size, float Q34, int is_signed, int maxval,
int size, int is_signed, int maxval, const float Q34,
const float rounding)
{
int i;
@ -252,6 +252,19 @@ static inline int ff_sfdelta_can_replace(const SingleChannelElement *sce,
&& sce->sf_idx[nextband[band]] <= (new_sf + SCALE_MAX_DIFF);
}
/**
* linear congruential pseudorandom number generator
*
* @param previous_val pointer to the current state of the generator
*
* @return Returns a 32-bit pseudorandom integer
*/
static av_always_inline int lcg_random(unsigned previous_val)
{
union { unsigned u; int s; } v = { previous_val * 1664525u + 1013904223 };
return v.s;
}
#define ERROR_IF(cond, ...) \
if (cond) { \
av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \

View File

@ -14,7 +14,7 @@ OBJS-$(CONFIG_VIDEODSP) += aarch64/videodsp_init.o
# decoders/encoders
OBJS-$(CONFIG_DCA_DECODER) += aarch64/synth_filter_init.o
OBJS-$(CONFIG_RV40_DECODER) += aarch64/rv40dsp_init_aarch64.o
OBJS-$(CONFIG_VC1_DECODER) += aarch64/vc1dsp_init_aarch64.o
OBJS-$(CONFIG_VC1DSP) += aarch64/vc1dsp_init_aarch64.o
OBJS-$(CONFIG_VORBIS_DECODER) += aarch64/vorbisdsp_init.o
# ARMv8 optimizations

View File

@ -445,7 +445,7 @@ endconst
h264_chroma_mc4 avg, rv40
#endif
#if CONFIG_VC1_DECODER
#if CONFIG_VC1DSP
h264_chroma_mc8 put, vc1
h264_chroma_mc8 avg, vc1
h264_chroma_mc4 put, vc1

View File

@ -77,3 +77,23 @@ wrap(avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
{
testneonclobbers(avcodec_encode_video2, avctx, avpkt, frame, got_packet_ptr);
}
wrap(avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt))
{
testneonclobbers(avcodec_send_packet, avctx, avpkt);
}
wrap(avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame))
{
testneonclobbers(avcodec_receive_frame, avctx, frame);
}
wrap(avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame))
{
testneonclobbers(avcodec_send_frame, avctx, frame);
}
wrap(avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt))
{
testneonclobbers(avcodec_receive_packet, avctx, avpkt);
}

View File

@ -24,9 +24,10 @@
* Common code between the AC-3 encoder and decoder.
*/
#include "libavutil/common.h"
#include "avcodec.h"
#include "ac3.h"
#include "get_bits.h"
/**
* Starting frequency coefficient bin for each critical band.

View File

@ -87,7 +87,7 @@ typedef int16_t SHORTFLOAT;
#define AC3_NORM(norm) (1.0f/(norm))
#define AC3_MUL(a,b) ((a) * (b))
#define AC3_RANGE(x) (dynamic_range_tab[(x)])
#define AC3_HEAVY_RANGE(x) (heavy_dynamic_range_tab[(x)])
#define AC3_HEAVY_RANGE(x) (ff_ac3_heavy_dynamic_range_tab[(x)])
#define AC3_DYNAMIC_RANGE(x) (powf(x, s->drc_scale))
#define AC3_SPX_BLEND(x) (x)* (1.0f/32)
#define AC3_DYNAMIC_RANGE1 1.0f

View File

@ -63,9 +63,11 @@ static const uint8_t quantization_tab[16] = {
5, 6, 7, 8, 9, 10, 11, 12, 14, 16
};
#if (!USE_FIXED)
/** dynamic range table. converts codes to scale factors. */
static float dynamic_range_tab[256];
static float heavy_dynamic_range_tab[256];
float ff_ac3_heavy_dynamic_range_tab[256];
#endif
/** Adjustments in dB gain */
static const float gain_levels[9] = {
@ -159,6 +161,7 @@ static av_cold void ac3_tables_init(void)
b5_mantissas[i] = symmetric_dequant(i, 15);
}
#if (!USE_FIXED)
/* generate dynamic range table
reference: Section 7.7.1 Dynamic Range Control */
for (i = 0; i < 256; i++) {
@ -170,9 +173,9 @@ static av_cold void ac3_tables_init(void)
reference: Section 7.7.2 Heavy Compression */
for (i = 0; i < 256; i++) {
int v = (i >> 4) - ((i >> 7) << 4) - 4;
heavy_dynamic_range_tab[i] = powf(2.0f, v) * ((i & 0xF) | 0x10);
ff_ac3_heavy_dynamic_range_tab[i] = powf(2.0f, v) * ((i & 0xF) | 0x10);
}
#endif
}
/**

View File

@ -260,4 +260,8 @@ static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
*/
static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s);
#if (!USE_FIXED)
extern float ff_ac3_heavy_dynamic_range_tab[256];
#endif
#endif /* AVCODEC_AC3DEC_H */

View File

@ -909,8 +909,8 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
case AV_CODEC_ID_ADPCM_MTAF:
for (channel = 0; channel < avctx->channels; channel+=2) {
bytestream2_skipu(&gb, 4);
c->status[channel ].step = bytestream2_get_le16u(&gb);
c->status[channel + 1].step = bytestream2_get_le16u(&gb);
c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
bytestream2_skipu(&gb, 2);
c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);

View File

@ -38,6 +38,7 @@
#define DEFAULT_MAX_PRED_ORDER 6
#define DEFAULT_MIN_PRED_ORDER 4
#define ALAC_MAX_LPC_PRECISION 9
#define ALAC_MIN_LPC_SHIFT 0
#define ALAC_MAX_LPC_SHIFT 9
#define ALAC_CHMODE_LEFT_RIGHT 0
@ -171,7 +172,8 @@ static void calc_predictor_params(AlacEncodeContext *s, int ch)
s->max_prediction_order,
ALAC_MAX_LPC_PRECISION, coefs, shift,
FF_LPC_TYPE_LEVINSON, 0,
ORDER_METHOD_EST, ALAC_MAX_LPC_SHIFT, 1);
ORDER_METHOD_EST, ALAC_MIN_LPC_SHIFT,
ALAC_MAX_LPC_SHIFT, 1);
s->lpc[ch].lpc_order = opt_order;
s->lpc[ch].lpc_quant = shift[opt_order-1];

View File

@ -67,11 +67,13 @@ void avcodec_register_all(void)
initialized = 1;
/* hardware accelerators */
REGISTER_HWACCEL(H263_CUVID, h263_cuvid);
REGISTER_HWACCEL(H263_VAAPI, h263_vaapi);
REGISTER_HWACCEL(H263_VIDEOTOOLBOX, h263_videotoolbox);
REGISTER_HWACCEL(H264_CUVID, h264_cuvid);
REGISTER_HWACCEL(H264_D3D11VA, h264_d3d11va);
REGISTER_HWACCEL(H264_DXVA2, h264_dxva2);
REGISTER_HWACCEL(H264_MEDIACODEC, h264_mediacodec);
REGISTER_HWACCEL(H264_MMAL, h264_mmal);
REGISTER_HWACCEL(H264_QSV, h264_qsv);
REGISTER_HWACCEL(H264_VAAPI, h264_vaapi);
@ -82,12 +84,16 @@ void avcodec_register_all(void)
REGISTER_HWACCEL(HEVC_CUVID, hevc_cuvid);
REGISTER_HWACCEL(HEVC_D3D11VA, hevc_d3d11va);
REGISTER_HWACCEL(HEVC_DXVA2, hevc_dxva2);
REGISTER_HWACCEL(HEVC_MEDIACODEC, hevc_mediacodec);
REGISTER_HWACCEL(HEVC_QSV, hevc_qsv);
REGISTER_HWACCEL(HEVC_VAAPI, hevc_vaapi);
REGISTER_HWACCEL(HEVC_VDPAU, hevc_vdpau);
REGISTER_HWACCEL(MJPEG_CUVID, mjpeg_cuvid);
REGISTER_HWACCEL(MPEG1_CUVID, mpeg1_cuvid);
REGISTER_HWACCEL(MPEG1_XVMC, mpeg1_xvmc);
REGISTER_HWACCEL(MPEG1_VDPAU, mpeg1_vdpau);
REGISTER_HWACCEL(MPEG1_VIDEOTOOLBOX, mpeg1_videotoolbox);
REGISTER_HWACCEL(MPEG2_CUVID, mpeg2_cuvid);
REGISTER_HWACCEL(MPEG2_XVMC, mpeg2_xvmc);
REGISTER_HWACCEL(MPEG2_D3D11VA, mpeg2_d3d11va);
REGISTER_HWACCEL(MPEG2_DXVA2, mpeg2_dxva2);
@ -96,6 +102,8 @@ void avcodec_register_all(void)
REGISTER_HWACCEL(MPEG2_VAAPI, mpeg2_vaapi);
REGISTER_HWACCEL(MPEG2_VDPAU, mpeg2_vdpau);
REGISTER_HWACCEL(MPEG2_VIDEOTOOLBOX, mpeg2_videotoolbox);
REGISTER_HWACCEL(MPEG4_CUVID, mpeg4_cuvid);
REGISTER_HWACCEL(MPEG4_MEDIACODEC, mpeg4_mediacodec);
REGISTER_HWACCEL(MPEG4_MMAL, mpeg4_mmal);
REGISTER_HWACCEL(MPEG4_VAAPI, mpeg4_vaapi);
REGISTER_HWACCEL(MPEG4_VDPAU, mpeg4_vdpau);
@ -108,9 +116,11 @@ void avcodec_register_all(void)
REGISTER_HWACCEL(VC1_MMAL, vc1_mmal);
REGISTER_HWACCEL(VC1_QSV, vc1_qsv);
REGISTER_HWACCEL(VP8_CUVID, vp8_cuvid);
REGISTER_HWACCEL(VP8_MEDIACODEC, vp8_mediacodec);
REGISTER_HWACCEL(VP9_CUVID, vp9_cuvid);
REGISTER_HWACCEL(VP9_D3D11VA, vp9_d3d11va);
REGISTER_HWACCEL(VP9_DXVA2, vp9_dxva2);
REGISTER_HWACCEL(VP9_MEDIACODEC, vp9_mediacodec);
REGISTER_HWACCEL(VP9_VAAPI, vp9_vaapi);
REGISTER_HWACCEL(WMV3_D3D11VA, wmv3_d3d11va);
REGISTER_HWACCEL(WMV3_DXVA2, wmv3_dxva2);
@ -418,7 +428,7 @@ void avcodec_register_all(void)
REGISTER_DECODER(MACE3, mace3);
REGISTER_DECODER(MACE6, mace6);
REGISTER_DECODER(METASOUND, metasound);
REGISTER_DECODER(MLP, mlp);
REGISTER_ENCDEC (MLP, mlp);
REGISTER_DECODER(MP1, mp1);
REGISTER_DECODER(MP1FLOAT, mp1float);
REGISTER_ENCDEC (MP2, mp2);
@ -447,7 +457,7 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (SONIC, sonic);
REGISTER_ENCODER(SONIC_LS, sonic_ls);
REGISTER_DECODER(TAK, tak);
REGISTER_DECODER(TRUEHD, truehd);
REGISTER_ENCDEC (TRUEHD, truehd);
REGISTER_DECODER(TRUESPEECH, truespeech);
REGISTER_ENCDEC (TTA, tta);
REGISTER_DECODER(TWINVQ, twinvq);
@ -486,6 +496,8 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (PCM_S32BE, pcm_s32be);
REGISTER_ENCDEC (PCM_S32LE, pcm_s32le);
REGISTER_ENCDEC (PCM_S32LE_PLANAR, pcm_s32le_planar);
REGISTER_ENCDEC (PCM_S64BE, pcm_s64be);
REGISTER_ENCDEC (PCM_S64LE, pcm_s64le);
REGISTER_ENCDEC (PCM_U8, pcm_u8);
REGISTER_ENCDEC (PCM_U16BE, pcm_u16be);
REGISTER_ENCDEC (PCM_U16LE, pcm_u16le);
@ -585,7 +597,6 @@ void avcodec_register_all(void)
REGISTER_DECODER(QDMC_AT, qdmc_at);
REGISTER_DECODER(QDM2_AT, qdm2_at);
REGISTER_DECODER(LIBCELT, libcelt);
REGISTER_ENCODER(LIBFAAC, libfaac);
REGISTER_ENCDEC (LIBFDK_AAC, libfdk_aac);
REGISTER_ENCDEC (LIBGSM, libgsm);
REGISTER_ENCDEC (LIBGSM_MS, libgsm_ms);
@ -622,7 +633,8 @@ void avcodec_register_all(void)
/* external libraries, that shouldn't be used by default if one of the
* above is available */
REGISTER_ENCODER(LIBOPENH264, libopenh264);
REGISTER_ENCDEC (LIBOPENH264, libopenh264);
REGISTER_DECODER(H263_CUVID, h263_cuvid);
REGISTER_DECODER(H264_CUVID, h264_cuvid);
REGISTER_ENCODER(H264_NVENC, h264_nvenc);
REGISTER_ENCODER(H264_OMX, h264_omx);
@ -635,15 +647,23 @@ void avcodec_register_all(void)
REGISTER_ENCODER(NVENC_HEVC, nvenc_hevc);
#endif
REGISTER_DECODER(HEVC_CUVID, hevc_cuvid);
REGISTER_DECODER(HEVC_MEDIACODEC, hevc_mediacodec);
REGISTER_ENCODER(HEVC_NVENC, hevc_nvenc);
REGISTER_ENCODER(HEVC_QSV, hevc_qsv);
REGISTER_ENCODER(HEVC_VAAPI, hevc_vaapi);
REGISTER_ENCODER(LIBKVAZAAR, libkvazaar);
REGISTER_DECODER(MJPEG_CUVID, mjpeg_cuvid);
REGISTER_ENCODER(MJPEG_VAAPI, mjpeg_vaapi);
REGISTER_DECODER(MPEG1_CUVID, mpeg1_cuvid);
REGISTER_DECODER(MPEG2_CUVID, mpeg2_cuvid);
REGISTER_ENCODER(MPEG2_QSV, mpeg2_qsv);
REGISTER_DECODER(MPEG4_CUVID, mpeg4_cuvid);
REGISTER_DECODER(MPEG4_MEDIACODEC, mpeg4_mediacodec);
REGISTER_DECODER(VC1_CUVID, vc1_cuvid);
REGISTER_DECODER(VP8_CUVID, vp8_cuvid);
REGISTER_DECODER(VP8_MEDIACODEC, vp8_mediacodec);
REGISTER_DECODER(VP9_CUVID, vp9_cuvid);
REGISTER_DECODER(VP9_MEDIACODEC, vp9_mediacodec);
/* parsers */
REGISTER_PARSER(AAC, aac);

View File

@ -35,8 +35,12 @@
#include "bgmc.h"
#include "bswapdsp.h"
#include "internal.h"
#include "mlz.h"
#include "libavutil/samplefmt.h"
#include "libavutil/crc.h"
#include "libavutil/softfloat_ieee754.h"
#include "libavutil/intfloat.h"
#include "libavutil/intreadwrite.h"
#include <stdint.h>
@ -225,6 +229,14 @@ typedef struct ALSDecContext {
int32_t **raw_samples; ///< decoded raw samples for each channel
int32_t *raw_buffer; ///< contains all decoded raw samples including carryover samples
uint8_t *crc_buffer; ///< buffer of byte order corrected samples used for CRC check
MLZ* mlz; ///< masked lz decompression structure
SoftFloat_IEEE754 *acf; ///< contains common multiplier for all channels
int *last_acf_mantissa; ///< contains the last acf mantissa data of common multiplier for all channels
int *shift_value; ///< value by which the binary point is to be shifted for all channels
int *last_shift_value; ///< contains last shift value for all channels
int **raw_mantissa; ///< decoded mantissa bits of the difference signal
unsigned char *larray; ///< buffer to store the output of masked lz decompression
int *nbits; ///< contains the number of bits to read for masked lz decompression for all samples
} ALSDecContext;
@ -441,7 +453,6 @@ static int check_specific_config(ALSDecContext *ctx)
} \
}
MISSING_ERR(sconf->floating, "Floating point decoding", AVERROR_PATCHWELCOME);
MISSING_ERR(sconf->rlslms, "Adaptive RLS-LMS prediction", AVERROR_PATCHWELCOME);
return error;
@ -1356,6 +1367,238 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
}
/** multiply two softfloats and handle the rounding off
*/
static SoftFloat_IEEE754 multiply(SoftFloat_IEEE754 a, SoftFloat_IEEE754 b) {
uint64_t mantissa_temp;
uint64_t mask_64;
int cutoff_bit_count;
unsigned char last_2_bits;
unsigned int mantissa;
int32_t sign;
uint32_t return_val = 0;
int bit_count = 48;
sign = a.sign ^ b.sign;
// Multiply mantissa bits in a 64-bit register
mantissa_temp = (uint64_t)a.mant * (uint64_t)b.mant;
mask_64 = (uint64_t)0x1 << 47;
// Count the valid bit count
while (!(mantissa_temp & mask_64) && mask_64) {
bit_count--;
mask_64 >>= 1;
}
// Round off
cutoff_bit_count = bit_count - 24;
if (cutoff_bit_count > 0) {
last_2_bits = (unsigned char)(((unsigned int)mantissa_temp >> (cutoff_bit_count - 1)) & 0x3 );
if ((last_2_bits == 0x3) || ((last_2_bits == 0x1) && ((unsigned int)mantissa_temp & ((0x1UL << (cutoff_bit_count - 1)) - 1)))) {
// Need to round up
mantissa_temp += (uint64_t)0x1 << cutoff_bit_count;
}
}
mantissa = (unsigned int)(mantissa_temp >> cutoff_bit_count);
// Need one more shift?
if (mantissa & 0x01000000ul) {
bit_count++;
mantissa >>= 1;
}
if (!sign) {
return_val = 0x80000000U;
}
return_val |= (a.exp + b.exp + bit_count - 47) << 23;
return_val |= mantissa;
return av_bits2sf_ieee754(return_val);
}
/** Read and decode the floating point sample data
*/
static int read_diff_float_data(ALSDecContext *ctx, unsigned int ra_frame) {
AVCodecContext *avctx = ctx->avctx;
GetBitContext *gb = &ctx->gb;
SoftFloat_IEEE754 *acf = ctx->acf;
int *shift_value = ctx->shift_value;
int *last_shift_value = ctx->last_shift_value;
int *last_acf_mantissa = ctx->last_acf_mantissa;
int **raw_mantissa = ctx->raw_mantissa;
int *nbits = ctx->nbits;
unsigned char *larray = ctx->larray;
int frame_length = ctx->cur_frame_length;
SoftFloat_IEEE754 scale = av_int2sf_ieee754(0x1u, 23);
unsigned int partA_flag;
unsigned int highest_byte;
unsigned int shift_amp;
uint32_t tmp_32;
int use_acf;
int nchars;
int i;
int c;
long k;
long nbits_aligned;
unsigned long acc;
unsigned long j;
uint32_t sign;
uint32_t e;
uint32_t mantissa;
skip_bits_long(gb, 32); //num_bytes_diff_float
use_acf = get_bits1(gb);
if (ra_frame) {
memset(last_acf_mantissa, 0, avctx->channels * sizeof(*last_acf_mantissa));
memset(last_shift_value, 0, avctx->channels * sizeof(*last_shift_value) );
ff_mlz_flush_dict(ctx->mlz);
}
for (c = 0; c < avctx->channels; ++c) {
if (use_acf) {
//acf_flag
if (get_bits1(gb)) {
tmp_32 = get_bits(gb, 23);
last_acf_mantissa[c] = tmp_32;
} else {
tmp_32 = last_acf_mantissa[c];
}
acf[c] = av_bits2sf_ieee754(tmp_32);
} else {
acf[c] = FLOAT_1;
}
highest_byte = get_bits(gb, 2);
partA_flag = get_bits1(gb);
shift_amp = get_bits1(gb);
if (shift_amp) {
shift_value[c] = get_bits(gb, 8);
last_shift_value[c] = shift_value[c];
} else {
shift_value[c] = last_shift_value[c];
}
if (partA_flag) {
if (!get_bits1(gb)) { //uncompressed
for (i = 0; i < frame_length; ++i) {
if (ctx->raw_samples[c][i] == 0) {
ctx->raw_mantissa[c][i] = get_bits_long(gb, 32);
}
}
} else { //compressed
nchars = 0;
for (i = 0; i < frame_length; ++i) {
if (ctx->raw_samples[c][i] == 0) {
nchars += 4;
}
}
tmp_32 = ff_mlz_decompression(ctx->mlz, gb, nchars, larray);
if(tmp_32 != nchars) {
av_log(ctx->avctx, AV_LOG_ERROR, "Error in MLZ decompression (%d, %d).\n", tmp_32, nchars);
return AVERROR_INVALIDDATA;
}
for (i = 0; i < frame_length; ++i) {
ctx->raw_mantissa[c][i] = AV_RB32(larray);
}
}
}
//decode part B
if (highest_byte) {
for (i = 0; i < frame_length; ++i) {
if (ctx->raw_samples[c][i] != 0) {
//The following logic is taken from Tabel 14.45 and 14.46 from the ISO spec
if (av_cmp_sf_ieee754(acf[c], FLOAT_1)) {
nbits[i] = 23 - av_log2(abs(ctx->raw_samples[c][i]));
} else {
nbits[i] = 23;
}
nbits[i] = FFMIN(nbits[i], highest_byte*8);
}
}
if (!get_bits1(gb)) { //uncompressed
for (i = 0; i < frame_length; ++i) {
if (ctx->raw_samples[c][i] != 0) {
raw_mantissa[c][i] = get_bitsz(gb, nbits[i]);
}
}
} else { //compressed
nchars = 0;
for (i = 0; i < frame_length; ++i) {
if (ctx->raw_samples[c][i]) {
nchars += (int) nbits[i] / 8;
if (nbits[i] & 7) {
++nchars;
}
}
}
tmp_32 = ff_mlz_decompression(ctx->mlz, gb, nchars, larray);
if(tmp_32 != nchars) {
av_log(ctx->avctx, AV_LOG_ERROR, "Error in MLZ decompression (%d, %d).\n", tmp_32, nchars);
return AVERROR_INVALIDDATA;
}
j = 0;
for (i = 0; i < frame_length; ++i) {
if (ctx->raw_samples[c][i]) {
if (nbits[i] & 7) {
nbits_aligned = 8 * ((unsigned int)(nbits[i] / 8) + 1);
} else {
nbits_aligned = nbits[i];
}
acc = 0;
for (k = 0; k < nbits_aligned/8; ++k) {
acc = (acc << 8) + larray[j++];
}
acc >>= (nbits_aligned - nbits[i]);
raw_mantissa[c][i] = acc;
}
}
}
}
for (i = 0; i < frame_length; ++i) {
SoftFloat_IEEE754 pcm_sf = av_int2sf_ieee754(ctx->raw_samples[c][i], 0);
pcm_sf = av_div_sf_ieee754(pcm_sf, scale);
if (ctx->raw_samples[c][i] != 0) {
if (!av_cmp_sf_ieee754(acf[c], FLOAT_1)) {
pcm_sf = multiply(acf[c], pcm_sf);
}
sign = pcm_sf.sign;
e = pcm_sf.exp;
mantissa = (pcm_sf.mant | 0x800000) + raw_mantissa[c][i];
while(mantissa >= 0x1000000) {
e++;
mantissa >>= 1;
}
if (mantissa) e += (shift_value[c] - 127);
mantissa &= 0x007fffffUL;
tmp_32 = (sign << 31) | ((e + EXP_BIAS) << 23) | (mantissa);
ctx->raw_samples[c][i] = tmp_32;
} else {
ctx->raw_samples[c][i] = raw_mantissa[c][i] & 0x007fffffUL;
}
}
align_get_bits(gb);
}
return 0;
}
/** Read the frame data.
*/
static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
@ -1497,7 +1740,9 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
sizeof(*ctx->raw_samples[c]) * sconf->max_order);
}
// TODO: read_diff_float_data
if (sconf->floating) {
read_diff_float_data(ctx, ra_frame);
}
if (get_bits_left(gb) < 0) {
av_log(ctx->avctx, AV_LOG_ERROR, "Overread %d\n", -get_bits_left(gb));
@ -1642,6 +1887,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
static av_cold int decode_end(AVCodecContext *avctx)
{
ALSDecContext *ctx = avctx->priv_data;
int i;
av_freep(&ctx->sconf.chan_pos);
@ -1667,6 +1913,22 @@ static av_cold int decode_end(AVCodecContext *avctx)
av_freep(&ctx->chan_data_buffer);
av_freep(&ctx->reverted_channels);
av_freep(&ctx->crc_buffer);
if (ctx->mlz) {
av_freep(&ctx->mlz->dict);
av_freep(&ctx->mlz);
}
av_freep(&ctx->acf);
av_freep(&ctx->last_acf_mantissa);
av_freep(&ctx->shift_value);
av_freep(&ctx->last_shift_value);
if (ctx->raw_mantissa) {
for (i = 0; i < avctx->channels; i++) {
av_freep(&ctx->raw_mantissa[i]);
}
av_freep(&ctx->raw_mantissa);
}
av_freep(&ctx->larray);
av_freep(&ctx->nbits);
return 0;
}
@ -1803,6 +2065,32 @@ static av_cold int decode_init(AVCodecContext *avctx)
ctx->raw_buffer = av_mallocz_array(avctx->channels * channel_size, sizeof(*ctx->raw_buffer));
ctx->raw_samples = av_malloc_array(avctx->channels, sizeof(*ctx->raw_samples));
if (sconf->floating) {
ctx->acf = av_malloc_array(avctx->channels, sizeof(*ctx->acf));
ctx->shift_value = av_malloc_array(avctx->channels, sizeof(*ctx->shift_value));
ctx->last_shift_value = av_malloc_array(avctx->channels, sizeof(*ctx->last_shift_value));
ctx->last_acf_mantissa = av_malloc_array(avctx->channels, sizeof(*ctx->last_acf_mantissa));
ctx->raw_mantissa = av_mallocz_array(avctx->channels, sizeof(*ctx->raw_mantissa));
ctx->larray = av_malloc_array(ctx->cur_frame_length * 4, sizeof(*ctx->larray));
ctx->nbits = av_malloc_array(ctx->cur_frame_length, sizeof(*ctx->nbits));
ctx->mlz = av_mallocz(sizeof(*ctx->mlz));
if (!ctx->mlz || !ctx->acf || !ctx->shift_value || !ctx->last_shift_value
|| !ctx->last_acf_mantissa || !ctx->raw_mantissa) {
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
ret = AVERROR(ENOMEM);
goto fail;
}
ff_mlz_init_dict(avctx, ctx->mlz);
ff_mlz_flush_dict(ctx->mlz);
for (c = 0; c < avctx->channels; ++c) {
ctx->raw_mantissa[c] = av_mallocz_array(ctx->cur_frame_length, sizeof(**ctx->raw_mantissa));
}
}
// allocate previous raw sample buffer
if (!ctx->prev_raw_samples || !ctx->raw_buffer|| !ctx->raw_samples) {
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");

View File

@ -455,7 +455,7 @@ endconst
h264_chroma_mc4 avg, rv40
#endif
#if CONFIG_VC1_DECODER
#if CONFIG_VC1DSP
h264_chroma_mc8 put, vc1
h264_chroma_mc8 avg, vc1
h264_chroma_mc4 put, vc1

View File

@ -77,3 +77,23 @@ wrap(avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
{
testneonclobbers(avcodec_encode_video2, avctx, avpkt, frame, got_packet_ptr);
}
wrap(avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt))
{
testneonclobbers(avcodec_send_packet, avctx, avpkt);
}
wrap(avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame))
{
testneonclobbers(avcodec_receive_frame, avctx, frame);
}
wrap(avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame))
{
testneonclobbers(avcodec_send_frame, avctx, frame);
}
wrap(avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt))
{
testneonclobbers(avcodec_receive_packet, avctx, avpkt);
}

View File

@ -229,7 +229,7 @@ static inline const char *skip_space(const char *buf)
return buf;
}
static int *get_default_field_orders(const ASSSection *section)
static int *get_default_field_orders(const ASSSection *section, int *number)
{
int i;
int *order = av_malloc_array(FF_ARRAY_ELEMS(section->fields), sizeof(*order));
@ -238,8 +238,9 @@ static int *get_default_field_orders(const ASSSection *section)
return NULL;
for (i = 0; section->fields[i].name; i++)
order[i] = i;
*number = i;
while (i < FF_ARRAY_ELEMS(section->fields))
order[i] = -1;
order[i++] = -1;
return order;
}
@ -255,30 +256,47 @@ static const char *ass_split_section(ASSSplitContext *ctx, const char *buf)
ctx->current_section = -1;
break;
}
if (buf[0] == ';' || (buf[0] == '!' && buf[1] == ':')) {
/* skip comments */
} else if (section->format_header && !order) {
len = strlen(section->format_header);
if (strncmp(buf, section->format_header, len) || buf[len] != ':')
goto next_line;
buf += len + 1;
while (!is_eol(*buf)) {
buf = skip_space(buf);
len = strcspn(buf, ", \r\n");
if (!(tmp = av_realloc_array(order, (*number + 1), sizeof(*order))))
return NULL;
order = tmp;
order[*number] = -1;
for (i=0; section->fields[i].name; i++)
if (!strncmp(buf, section->fields[i].name, len)) {
order[*number] = i;
break;
}
(*number)++;
buf = skip_space(buf + len + (buf[len] == ','));
if (buf[0] == ';' || (buf[0] == '!' && buf[1] == ':'))
goto next_line; // skip comments
len = strcspn(buf, ":\r\n");
if (buf[len] == ':' &&
(!section->fields_header || strncmp(buf, section->fields_header, len))) {
for (i = 0; i < FF_ARRAY_ELEMS(ass_sections); i++) {
if (ass_sections[i].fields_header &&
!strncmp(buf, ass_sections[i].fields_header, len)) {
ctx->current_section = i;
section = &ass_sections[ctx->current_section];
number = &ctx->field_number[ctx->current_section];
order = ctx->field_order[ctx->current_section];
break;
}
}
ctx->field_order[ctx->current_section] = order;
} else if (section->fields_header) {
}
if (section->format_header && !order) {
len = strlen(section->format_header);
if (buf[len] == ':' && !strncmp(buf, section->format_header, len)) {
buf += len + 1;
while (!is_eol(*buf)) {
buf = skip_space(buf);
len = strcspn(buf, ", \r\n");
if (!(tmp = av_realloc_array(order, (*number + 1), sizeof(*order))))
return NULL;
order = tmp;
order[*number] = -1;
for (i=0; section->fields[i].name; i++)
if (!strncmp(buf, section->fields[i].name, len)) {
order[*number] = i;
break;
}
(*number)++;
buf = skip_space(buf + len + (buf[len] == ','));
}
ctx->field_order[ctx->current_section] = order;
goto next_line;
}
}
if (section->fields_header) {
len = strlen(section->fields_header);
if (!strncmp(buf, section->fields_header, len) && buf[len] == ':') {
uint8_t *ptr, *struct_ptr = realloc_section_array(ctx);
@ -286,7 +304,7 @@ static const char *ass_split_section(ASSSplitContext *ctx, const char *buf)
/* No format header line found so far, assume default */
if (!order) {
order = get_default_field_orders(section);
order = get_default_field_orders(section, number);
if (!order)
return NULL;
ctx->field_order[ctx->current_section] = order;

View File

@ -554,7 +554,12 @@ static int ffat_decode(AVCodecContext *avctx, void *data,
ffat_copy_samples(avctx, frame);
*got_frame_ptr = 1;
if (at->last_pts != AV_NOPTS_VALUE) {
frame->pts = at->last_pts;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_pts = at->last_pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
at->last_pts = avpkt->pts;
}
} else if (ret && ret != 1) {

View File

@ -43,7 +43,9 @@
#include "version.h"
/**
* @defgroup libavc Encoding/Decoding Library
* @defgroup libavc libavcodec
* Encoding/Decoding Library
*
* @{
*
* @defgroup lavc_decoding Decoding
@ -443,9 +445,9 @@ enum AVCodecID {
AV_CODEC_ID_PCM_S24LE_PLANAR,
AV_CODEC_ID_PCM_S32LE_PLANAR,
AV_CODEC_ID_PCM_S16BE_PLANAR,
/* new PCM "codecs" should be added right below this line starting with
* an explicit value of for example 0x10800
*/
AV_CODEC_ID_PCM_S64LE = 0x10800,
AV_CODEC_ID_PCM_S64BE,
/* various ADPCM codecs */
AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
@ -629,6 +631,7 @@ enum AVCodecID {
AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
AV_CODEC_ID_TTF = 0x18000,
AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream.
AV_CODEC_ID_BINTEXT = 0x18800,
AV_CODEC_ID_XBIN,
AV_CODEC_ID_IDF,
@ -1033,6 +1036,16 @@ typedef struct RcOverride{
* Audio encoder supports receiving a different number of samples in each call.
*/
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
/**
* Decoder is not a preferred choice for probing.
* This indicates that the decoder is not a good choice for probing.
* It could for example be an expensive to spin up hardware decoder,
* or it could simply not provide a lot of useful information about
* the stream.
* A decoder marked with this flag should only be used as last resort
* choice for probing.
*/
#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
/**
* Codec is intra only.
*/
@ -1348,6 +1361,14 @@ typedef struct AVCPBProperties {
*/
enum AVPacketSideDataType {
AV_PKT_DATA_PALETTE,
/**
* The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format
* that the extradata buffer was changed and the receiving side should
* act upon it appropriately. The new extradata is embedded in the side
* data buffer and should be immediately used for processing the current
* frame or packet.
*/
AV_PKT_DATA_NEW_EXTRADATA,
/**
@ -1611,6 +1632,12 @@ typedef struct AVPacket {
} AVPacket;
#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
/**
* Flag is used to discard packets which are required to maintain valid
* decoder state but are not required for output and should be dropped
* after decoding.
**/
#define AV_PKT_FLAG_DISCARD 0x0004
enum AVSideDataParamChangeFlags {
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
@ -2083,22 +2110,23 @@ typedef struct AVCodecContext {
* - decoding: unused
*/
int ildct_cmp;
#define FF_CMP_SAD 0
#define FF_CMP_SSE 1
#define FF_CMP_SATD 2
#define FF_CMP_DCT 3
#define FF_CMP_PSNR 4
#define FF_CMP_BIT 5
#define FF_CMP_RD 6
#define FF_CMP_ZERO 7
#define FF_CMP_VSAD 8
#define FF_CMP_VSSE 9
#define FF_CMP_NSSE 10
#define FF_CMP_W53 11
#define FF_CMP_W97 12
#define FF_CMP_DCTMAX 13
#define FF_CMP_DCT264 14
#define FF_CMP_CHROMA 256
#define FF_CMP_SAD 0
#define FF_CMP_SSE 1
#define FF_CMP_SATD 2
#define FF_CMP_DCT 3
#define FF_CMP_PSNR 4
#define FF_CMP_BIT 5
#define FF_CMP_RD 6
#define FF_CMP_ZERO 7
#define FF_CMP_VSAD 8
#define FF_CMP_VSSE 9
#define FF_CMP_NSSE 10
#define FF_CMP_W53 11
#define FF_CMP_W97 12
#define FF_CMP_DCTMAX 13
#define FF_CMP_DCT264 14
#define FF_CMP_MEDIAN_SAD 15
#define FF_CMP_CHROMA 256
/**
* ME diamond size & shape
@ -3165,6 +3193,13 @@ typedef struct AVCodecContext {
#define FF_PROFILE_MPEG2_AAC_LOW 128
#define FF_PROFILE_MPEG2_AAC_HE 131
#define FF_PROFILE_DNXHD 0
#define FF_PROFILE_DNXHR_LB 1
#define FF_PROFILE_DNXHR_SQ 2
#define FF_PROFILE_DNXHR_HQ 3
#define FF_PROFILE_DNXHR_HQX 4
#define FF_PROFILE_DNXHR_444 5
#define FF_PROFILE_DTS 20
#define FF_PROFILE_DTS_ES 30
#define FF_PROFILE_DTS_96_24 40
@ -3189,8 +3224,10 @@ typedef struct AVCodecContext {
#define FF_PROFILE_H264_HIGH 100
#define FF_PROFILE_H264_HIGH_10 110
#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA)
#define FF_PROFILE_H264_MULTIVIEW_HIGH 118
#define FF_PROFILE_H264_HIGH_422 122
#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA)
#define FF_PROFILE_H264_STEREO_HIGH 128
#define FF_PROFILE_H264_HIGH_444 144
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA)
@ -3482,15 +3519,25 @@ typedef struct AVCodecContext {
int nb_coded_side_data;
/**
* Encoding only.
* A reference to the AVHWFramesContext describing the input (for encoding)
* or output (decoding) frames. The reference is set by the caller and
* afterwards owned (and freed) by libavcodec.
*
* For hardware encoders configured to use a hwaccel pixel format, this
* field should be set by the caller to a reference to the AVHWFramesContext
* describing input frames. AVHWFramesContext.format must be equal to
* AVCodecContext.pix_fmt.
* - decoding: This field should be set by the caller from the get_format()
* callback. The previous reference (if any) will always be
* unreffed by libavcodec before the get_format() call.
*
* This field should be set before avcodec_open2() is called and is
* afterwards owned and managed by libavcodec.
* If the default get_buffer2() is used with a hwaccel pixel
* format, then this AVHWFramesContext will be used for
* allocating the frame buffers.
*
* - encoding: For hardware encoders configured to use a hwaccel pixel
* format, this field should be set by the caller to a reference
* to the AVHWFramesContext describing input frames.
* AVHWFramesContext.format must be equal to
* AVCodecContext.pix_fmt.
*
* This field should be set before avcodec_open2() is called.
*/
AVBufferRef *hw_frames_ctx;
@ -3505,6 +3552,17 @@ typedef struct AVCodecContext {
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS 1
#endif
/**
* Audio only. The amount of padding (in samples) appended by the encoder to
* the end of the audio. I.e. this number of decoded samples must be
* discarded by the caller from the end of the stream to get the original
* audio without any trailing padding.
*
* - decoding: unused
* - encoding: unused
*/
int trailing_padding;
} AVCodecContext;
AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx);
@ -5115,7 +5173,10 @@ AVCodecParserContext *av_parser_init(int codec_id);
* @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
* @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
* @param buf input buffer.
* @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output).
* @param buf_size buffer size in bytes without the padding. I.e. the full buffer
size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE.
To signal EOF, this should be 0 (so that the last frame
can be output).
* @param pts input presentation timestamp.
* @param dts input decoding timestamp.
* @param pos input byte position in stream.
@ -5505,15 +5566,8 @@ enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt
enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
attribute_deprecated
#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list,
enum AVPixelFormat src_pix_fmt,
int has_alpha, int *loss_ptr);
#else
enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
#endif
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
@ -5881,7 +5935,8 @@ int av_bsf_init(AVBSFContext *ctx);
* av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or
* AVERROR_EOF.
*
* @param pkt the packet to filter. The bitstream filter will take ownership of
* @param pkt the packet to filter. pkt must contain some payload (i.e data or
* side data must be present in pkt). The bitstream filter will take ownership of
* the packet and reset the contents of pkt. pkt is not touched if an error occurs.
* This parameter may be NULL, which signals the end of the stream (i.e. no more
* packets will be sent). That will cause the filter to output any packets it
@ -5931,6 +5986,91 @@ void av_bsf_free(AVBSFContext **ctx);
*/
const AVClass *av_bsf_get_class(void);
/**
* Structure for chain/list of bitstream filters.
* Empty list can be allocated by av_bsf_list_alloc().
*/
typedef struct AVBSFList AVBSFList;
/**
* Allocate empty list of bitstream filters.
* The list must be later freed by av_bsf_list_free()
* or finalized by av_bsf_list_finalize().
*
* @return Pointer to @ref AVBSFList on success, NULL in case of failure
*/
AVBSFList *av_bsf_list_alloc(void);
/**
* Free list of bitstream filters.
*
* @param lst Pointer to pointer returned by av_bsf_list_alloc()
*/
void av_bsf_list_free(AVBSFList **lst);
/**
* Append bitstream filter to the list of bitstream filters.
*
* @param lst List to append to
* @param bsf Filter context to be appended
*
* @return >=0 on success, negative AVERROR in case of failure
*/
int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf);
/**
* Construct new bitstream filter context given it's name and options
* and append it to the list of bitstream filters.
*
* @param lst List to append to
* @param bsf_name Name of the bitstream filter
* @param options Options for the bitstream filter, can be set to NULL
*
* @return >=0 on success, negative AVERROR in case of failure
*/
int av_bsf_list_append2(AVBSFList *lst, const char * bsf_name, AVDictionary **options);
/**
* Finalize list of bitstream filters.
*
* This function will transform @ref AVBSFList to single @ref AVBSFContext,
* so the whole chain of bitstream filters can be treated as single filter
* freshly allocated by av_bsf_alloc().
* If the call is successful, @ref AVBSFList structure is freed and lst
* will be set to NULL. In case of failure, caller is responsible for
* freeing the structure by av_bsf_list_free()
*
* @param lst Filter list structure to be transformed
* @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure
* representing the chain of bitstream filters
*
* @return >=0 on success, negative AVERROR in case of failure
*/
int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf);
/**
* Parse string describing list of bitstream filters and create single
* @ref AVBSFContext describing the whole chain of bitstream filters.
* Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext freshly
* allocated by av_bsf_alloc().
*
* @param str String describing chain of bitstream filters in format
* `bsf1[=opt1=val1:opt2=val2][,bsf2]`
* @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure
* representing the chain of bitstream filters
*
* @return >=0 on success, negative AVERROR in case of failure
*/
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf);
/**
* Get null/pass-through bitstream filter.
*
* @param[out] bsf Pointer to be set to new instance of pass-through bitstream filter
*
* @return
*/
int av_bsf_get_null_filter(AVBSFContext **bsf);
/* memory */
/**

View File

@ -585,7 +585,8 @@ int av_packet_ref(AVPacket *dst, const AVPacket *src)
ret = packet_alloc(&dst->buf, src->size);
if (ret < 0)
goto fail;
memcpy(dst->buf->data, src->data, src->size);
if (src->size)
memcpy(dst->buf->data, src->data, src->size);
dst->data = dst->buf->data;
} else {

View File

@ -28,7 +28,7 @@
typedef struct {
AVCodecContext *mjpeg_avctx;
int is_mjpeg;
int interlace; //FIXME use frame.interlaced_frame
int interlace;
int tff;
} AVRnContext;

View File

@ -23,17 +23,17 @@
#include "libavutil/attributes.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#define BITSTREAM_READER_LE
#include "avcodec.h"
#include "binkdata.h"
#include "binkdsp.h"
#include "blockdsp.h"
#include "get_bits.h"
#include "hpeldsp.h"
#include "internal.h"
#include "mathops.h"
#define BITSTREAM_READER_LE
#include "get_bits.h"
#define BINK_FLAG_ALPHA 0x00100000
#define BINK_FLAG_GRAY 0x00020000

View File

@ -29,15 +29,16 @@
*/
#include "libavutil/channel_layout.h"
#include "avcodec.h"
#define BITSTREAM_READER_LE
#include "get_bits.h"
#include "dct.h"
#include "rdft.h"
#include "internal.h"
#include "wma_freqs.h"
#include "libavutil/intfloat.h"
#define BITSTREAM_READER_LE
#include "avcodec.h"
#include "dct.h"
#include "get_bits.h"
#include "internal.h"
#include "rdft.h"
#include "wma_freqs.h"
static float quant_table[96];
#define MAX_CHANNELS 2

View File

@ -21,6 +21,9 @@
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "avcodec.h"
#include "bsf.h"
@ -172,11 +175,13 @@ int av_bsf_init(AVBSFContext *ctx)
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
{
if (!pkt || !pkt->data) {
if (!pkt) {
ctx->internal->eof = 1;
return 0;
}
av_assert0(pkt->data || pkt->side_data);
if (ctx->internal->eof) {
av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n");
return AVERROR(EINVAL);
@ -217,3 +222,322 @@ int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
return 0;
}
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
{
AVBSFInternal *in = ctx->internal;
if (in->eof)
return AVERROR_EOF;
if (!ctx->internal->buffer_pkt->data &&
!ctx->internal->buffer_pkt->side_data_elems)
return AVERROR(EAGAIN);
av_packet_move_ref(pkt, ctx->internal->buffer_pkt);
return 0;
}
typedef struct BSFListContext {
const AVClass *class;
AVBSFContext **bsfs;
int nb_bsfs;
unsigned idx; // index of currently processed BSF
unsigned flushed_idx; // index of BSF being flushed
char * item_name;
} BSFListContext;
static int bsf_list_init(AVBSFContext *bsf)
{
BSFListContext *lst = bsf->priv_data;
int ret, i;
const AVCodecParameters *cod_par = bsf->par_in;
AVRational tb = bsf->time_base_in;
for (i = 0; i < lst->nb_bsfs; ++i) {
ret = avcodec_parameters_copy(lst->bsfs[i]->par_in, cod_par);
if (ret < 0)
goto fail;
lst->bsfs[i]->time_base_in = tb;
ret = av_bsf_init(lst->bsfs[i]);
if (ret < 0)
goto fail;
cod_par = lst->bsfs[i]->par_out;
tb = lst->bsfs[i]->time_base_out;
}
bsf->time_base_out = tb;
ret = avcodec_parameters_copy(bsf->par_out, cod_par);
fail:
return ret;
}
static int bsf_list_filter(AVBSFContext *bsf, AVPacket *out)
{
BSFListContext *lst = bsf->priv_data;
int ret;
if (!lst->nb_bsfs)
return ff_bsf_get_packet_ref(bsf, out);
while (1) {
if (lst->idx > lst->flushed_idx) {
ret = av_bsf_receive_packet(lst->bsfs[lst->idx-1], out);
if (ret == AVERROR(EAGAIN)) {
/* no more packets from idx-1, try with previous */
ret = 0;
lst->idx--;
continue;
} else if (ret == AVERROR_EOF) {
/* filter idx-1 is done, continue with idx...nb_bsfs */
lst->flushed_idx = lst->idx;
continue;
}else if (ret < 0) {
/* filtering error */
break;
}
} else {
ret = ff_bsf_get_packet_ref(bsf, out);
if (ret == AVERROR_EOF) {
lst->idx = lst->flushed_idx;
} else if (ret < 0)
break;
}
if (lst->idx < lst->nb_bsfs) {
AVPacket *pkt;
if (ret == AVERROR_EOF && lst->idx == lst->flushed_idx) {
/* ff_bsf_get_packet_ref returned EOF and idx is first
* filter of yet not flushed filter chain */
pkt = NULL;
} else {
pkt = out;
}
ret = av_bsf_send_packet(lst->bsfs[lst->idx], pkt);
if (ret < 0)
break;
lst->idx++;
} else {
/* The end of filter chain, break to return result */
break;
}
}
if (ret < 0)
av_packet_unref(out);
return ret;
}
static void bsf_list_close(AVBSFContext *bsf)
{
BSFListContext *lst = bsf->priv_data;
int i;
for (i = 0; i < lst->nb_bsfs; ++i)
av_bsf_free(&lst->bsfs[i]);
av_freep(&lst->bsfs);
av_freep(&lst->item_name);
}
static const char *bsf_list_item_name(void *ctx)
{
static const char *null_filter_name = "null";
AVBSFContext *bsf_ctx = ctx;
BSFListContext *lst = bsf_ctx->priv_data;
if (!lst->nb_bsfs)
return null_filter_name;
if (!lst->item_name) {
int i;
AVBPrint bp;
av_bprint_init(&bp, 16, 128);
av_bprintf(&bp, "bsf_list(");
for (i = 0; i < lst->nb_bsfs; i++)
av_bprintf(&bp, i ? ",%s" : "%s", lst->bsfs[i]->filter->name);
av_bprintf(&bp, ")");
av_bprint_finalize(&bp, &lst->item_name);
}
return lst->item_name;
}
static const AVClass bsf_list_class = {
.class_name = "bsf_list",
.item_name = bsf_list_item_name,
.version = LIBAVUTIL_VERSION_INT,
};
const AVBitStreamFilter ff_list_bsf = {
.name = "bsf_list",
.priv_data_size = sizeof(BSFListContext),
.priv_class = &bsf_list_class,
.init = bsf_list_init,
.filter = bsf_list_filter,
.close = bsf_list_close,
};
struct AVBSFList {
AVBSFContext **bsfs;
int nb_bsfs;
};
AVBSFList *av_bsf_list_alloc(void)
{
return av_mallocz(sizeof(AVBSFList));
}
void av_bsf_list_free(AVBSFList **lst)
{
int i;
if (*lst)
return;
for (i = 0; i < (*lst)->nb_bsfs; ++i)
av_bsf_free(&(*lst)->bsfs[i]);
av_free((*lst)->bsfs);
av_freep(lst);
}
int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf)
{
return av_dynarray_add_nofree(&lst->bsfs, &lst->nb_bsfs, bsf);
}
int av_bsf_list_append2(AVBSFList *lst, const char *bsf_name, AVDictionary ** options)
{
int ret;
const AVBitStreamFilter *filter;
AVBSFContext *bsf;
filter = av_bsf_get_by_name(bsf_name);
if (!filter)
return AVERROR_BSF_NOT_FOUND;
ret = av_bsf_alloc(filter, &bsf);
if (ret < 0)
return ret;
if (options) {
ret = av_opt_set_dict2(bsf, options, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
}
ret = av_bsf_list_append(lst, bsf);
end:
if (ret < 0)
av_bsf_free(&bsf);
return ret;
}
int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf)
{
int ret = 0;
BSFListContext *ctx;
if ((*lst)->nb_bsfs == 1) {
*bsf = (*lst)->bsfs[0];
av_freep(&(*lst)->bsfs);
(*lst)->nb_bsfs = 0;
goto end;
}
ret = av_bsf_alloc(&ff_list_bsf, bsf);
if (ret < 0)
return ret;
ctx = (*bsf)->priv_data;
ctx->bsfs = (*lst)->bsfs;
ctx->nb_bsfs = (*lst)->nb_bsfs;
end:
av_freep(lst);
return ret;
}
static int bsf_parse_single(const char *str, AVBSFList *bsf_lst)
{
char *bsf_name, *bsf_options_str, *buf;
AVDictionary *bsf_options = NULL;
int ret = 0;
if (!(buf = av_strdup(str)))
return AVERROR(ENOMEM);
bsf_name = av_strtok(buf, "=", &bsf_options_str);
if (!bsf_name) {
ret = AVERROR(EINVAL);
goto end;
}
if (bsf_options_str) {
ret = av_dict_parse_string(&bsf_options, bsf_options_str, "=", ":", 0);
if (ret < 0)
goto end;
}
ret = av_bsf_list_append2(bsf_lst, bsf_name, &bsf_options);
av_dict_free(&bsf_options);
end:
av_free(buf);
return ret;
}
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
{
AVBSFList *lst;
char *bsf_str, *buf, *dup, *saveptr;
int ret;
if (!str)
return av_bsf_get_null_filter(bsf_lst);
lst = av_bsf_list_alloc();
if (!lst)
return AVERROR(ENOMEM);
if (!(dup = buf = av_strdup(str)))
return AVERROR(ENOMEM);
while (1) {
bsf_str = av_strtok(buf, ",", &saveptr);
if (!bsf_str)
break;
ret = bsf_parse_single(bsf_str, lst);
if (ret < 0)
goto end;
buf = NULL;
}
ret = av_bsf_list_finalize(&lst, bsf_lst);
end:
if (ret < 0)
av_bsf_list_free(&lst);
av_free(dup);
return ret;
}
int av_bsf_get_null_filter(AVBSFContext **bsf)
{
return av_bsf_alloc(&ff_list_bsf, bsf);
}

View File

@ -28,6 +28,17 @@
*/
int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt);
/**
* Called by bitstream filters to get packet for filtering.
* The reference to packet is moved to provided packet structure.
*
* @param ctx pointer to AVBSFContext of filter
* @param pkt pointer to packet to move reference to
*
* @return 0>= on success, negative AVERROR in case of failure
*/
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt);
const AVClass *ff_bsf_child_class_next(const AVClass *prev);
#endif /* AVCODEC_BSF_H */

View File

@ -182,7 +182,7 @@ int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){
#if CABAC_BITS == 16
c->low = (*c->bytestream++)<<18;
c->low+= (*c->bytestream++)<<10;
// Keep our fetches on a 2-byte boundry as this should avoid ever having to
// Keep our fetches on a 2-byte boundary as this should avoid ever having to
// do unaligned loads if the compiler (or asm) optimises the double byte
// load into a single instruction
if(((uintptr_t)c->bytestream & 1) == 0) {

View File

@ -1104,6 +1104,7 @@ static int decode_pic(AVSContext *h)
}
} while (ff_cavs_next_mb(h));
}
emms_c();
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
av_frame_unref(h->DPB[1].f);
FFSWAP(AVSFrame, h->cur, h->DPB[1]);
@ -1217,6 +1218,8 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
h->got_keyframe = 1;
}
case PIC_PB_START_CODE:
if (*got_frame)
av_frame_unref(data);
*got_frame = 0;
if (!h->got_keyframe)
break;

View File

@ -262,6 +262,7 @@ static av_cold int init_decoder(AVCodecContext *avctx)
/* taking by default roll up to 2 */
ctx->mode = CCMODE_ROLLUP;
ctx->rollup = 2;
ctx->cursor_row = 10;
ret = ff_ass_subtitle_header(avctx, "Monospace",
ASS_DEFAULT_FONT_SIZE,
ASS_DEFAULT_COLOR,
@ -296,7 +297,7 @@ static void flush_decoder(AVCodecContext *avctx)
ctx->prev_cmd[1] = 0;
ctx->mode = CCMODE_ROLLUP;
ctx->rollup = 2;
ctx->cursor_row = 0;
ctx->cursor_row = 10;
ctx->cursor_column = 0;
ctx->cursor_font = 0;
ctx->cursor_color = 0;
@ -313,7 +314,7 @@ static void flush_decoder(AVCodecContext *avctx)
/**
* @param ctx closed caption context just to print log
*/
static int write_char(CCaptionSubContext *ctx, struct Screen *screen, char ch)
static void write_char(CCaptionSubContext *ctx, struct Screen *screen, char ch)
{
uint8_t col = ctx->cursor_column;
char *row = screen->characters[ctx->cursor_row];
@ -326,16 +327,16 @@ static int write_char(CCaptionSubContext *ctx, struct Screen *screen, char ch)
charset[col] = ctx->cursor_charset;
ctx->cursor_charset = CCSET_BASIC_AMERICAN;
if (ch) ctx->cursor_column++;
return 0;
return;
}
/* We have extra space at end only for null character */
else if (col == SCREEN_COLUMNS && ch == 0) {
row[col] = ch;
return 0;
return;
}
else {
av_log(ctx, AV_LOG_WARNING, "Data Ignored since exceeding screen width\n");
return AVERROR_INVALIDDATA;
return;
}
}
@ -433,11 +434,24 @@ static void roll_up(CCaptionSubContext *ctx)
static int capture_screen(CCaptionSubContext *ctx)
{
int i;
int i, j, tab = 0;
struct Screen *screen = ctx->screen + ctx->active_screen;
enum cc_font prev_font = CCFONT_REGULAR;
av_bprint_clear(&ctx->buffer);
for (i = 0; screen->row_used && i < SCREEN_ROWS; i++)
{
if (CHECK_FLAG(screen->row_used, i)) {
const char *row = screen->characters[i];
const char *charset = screen->charsets[i];
j = 0;
while (row[j] == ' ' && charset[j] == CCSET_BASIC_AMERICAN)
j++;
if (!tab || j < tab)
tab = j;
}
}
for (i = 0; screen->row_used && i < SCREEN_ROWS; i++)
{
if (CHECK_FLAG(screen->row_used, i)) {
@ -445,12 +459,17 @@ static int capture_screen(CCaptionSubContext *ctx)
const char *font = screen->fonts[i];
const char *charset = screen->charsets[i];
const char *override;
int j = 0;
int x, y, seen_char = 0;
j = 0;
/* skip leading space */
while (row[j] == ' ' && charset[j] == CCSET_BASIC_AMERICAN)
while (row[j] == ' ' && charset[j] == CCSET_BASIC_AMERICAN && j < tab)
j++;
x = ASS_DEFAULT_PLAYRESX * (0.1 + 0.0250 * j);
y = ASS_DEFAULT_PLAYRESY * (0.1 + 0.0533 * i);
av_bprintf(&ctx->buffer, "{\\an7}{\\pos(%d,%d)}", x, y);
for (; j < SCREEN_COLUMNS; j++) {
const char *e_tag = "", *s_tag = "";
@ -485,9 +504,14 @@ static int capture_screen(CCaptionSubContext *ctx)
override = charset_overrides[(int)charset[j]][(int)row[j]];
if (override) {
av_bprintf(&ctx->buffer, "%s%s%s", e_tag, s_tag, override);
seen_char = 1;
} else if (row[j] == ' ' && !seen_char) {
av_bprintf(&ctx->buffer, "%s%s\\h", e_tag, s_tag);
} else {
av_bprintf(&ctx->buffer, "%s%s%c", e_tag, s_tag, row[j]);
seen_char = 1;
}
}
av_bprintf(&ctx->buffer, "\\N");
}
@ -711,6 +735,12 @@ static void process_cc608(CCaptionSubContext *ctx, int64_t pts, uint8_t hi, uint
/* Standard characters (always in pairs) */
handle_char(ctx, hi, lo, pts);
ctx->prev_cmd[0] = ctx->prev_cmd[1] = 0;
} else if (hi == 0x17 && lo >= 0x21 && lo <= 0x23) {
int i;
/* Tab offsets (spacing) */
for (i = 0; i < lo - 0x20; i++) {
handle_char(ctx, ' ', 0, pts);
}
} else {
/* Ignoring all other non data code */
ff_dlog(ctx, "Unknown command 0x%hhx 0x%hhx\n", hi, lo);

View File

@ -513,7 +513,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
}
if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < expected) {
av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficents\n");
av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
ret = AVERROR(EINVAL);
goto end;
}

View File

@ -665,6 +665,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
.name = "dnxhd",
.long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
.profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles),
},
{
.id = AV_CODEC_ID_THP,
@ -1303,6 +1304,41 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("innoHeim/Rsupport Screen Capture Codec"),
.props = AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_MAGICYUV,
.type = AVMEDIA_TYPE_VIDEO,
.name = "magicyuv",
.long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_TRUEMOTION2RT,
.type = AVMEDIA_TYPE_VIDEO,
.name = "truemotion2rt",
.long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0 Real Time"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_CFHD,
.type = AVMEDIA_TYPE_VIDEO,
.name = "cfhd",
.long_name = NULL_IF_CONFIG_SMALL("Cineform HD"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_SHEERVIDEO,
.type = AVMEDIA_TYPE_VIDEO,
.name = "sheervideo",
.long_name = NULL_IF_CONFIG_SMALL("BitJazz SheerVideo"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_YLC,
.type = AVMEDIA_TYPE_VIDEO,
.name = "ylc",
.long_name = NULL_IF_CONFIG_SMALL("YUY2 Lossless Codec"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
/* image codecs */
{
@ -1528,41 +1564,6 @@ static const AVCodecDescriptor codec_descriptors[] = {
.props = AV_CODEC_PROP_LOSSLESS,
.mime_types= MT("image/png"),
},
{
.id = AV_CODEC_ID_CFHD,
.type = AVMEDIA_TYPE_VIDEO,
.name = "cfhd",
.long_name = NULL_IF_CONFIG_SMALL("Cineform HD"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_TRUEMOTION2RT,
.type = AVMEDIA_TYPE_VIDEO,
.name = "truemotion2rt",
.long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0 Real Time"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MAGICYUV,
.type = AVMEDIA_TYPE_VIDEO,
.name = "magicyuv",
.long_name = NULL_IF_CONFIG_SMALL("MagicYUV Lossless Video"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_SHEERVIDEO,
.type = AVMEDIA_TYPE_VIDEO,
.name = "sheervideo",
.long_name = NULL_IF_CONFIG_SMALL("BitJazz SheerVideo"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_YLC,
.type = AVMEDIA_TYPE_VIDEO,
.name = "ylc",
.long_name = NULL_IF_CONFIG_SMALL("YUY2 Lossless Codec"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
/* various PCM "codecs" */
{
@ -1635,6 +1636,20 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit big-endian"),
.props = AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S64LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s64le",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 64-bit little-endian"),
.props = AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S64BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s64be",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 64-bit big-endian"),
.props = AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_U32LE,
.type = AVMEDIA_TYPE_AUDIO,
@ -2949,6 +2964,12 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("binary data"),
.mime_types= MT("application/octet-stream"),
},
{
.id = AV_CODEC_ID_SCTE_35,
.type = AVMEDIA_TYPE_DATA,
.name = "scte_35",
.long_name = NULL_IF_CONFIG_SMALL("SCTE 35 Message Queue"),
},
/* deprecated codec ids */
};

View File

@ -83,7 +83,7 @@
#include <libcrystalhd/libcrystalhd_if.h>
#include "avcodec.h"
#include "h264.h"
#include "h264dec.h"
#include "internal.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
@ -131,7 +131,7 @@ typedef struct {
uint8_t *orig_extradata;
uint32_t orig_extradata_size;
AVBitStreamFilterContext *bsfc;
AVBSFContext *bsfc;
AVCodecParserContext *parser;
uint8_t is_70012;
@ -150,7 +150,6 @@ typedef struct {
/* Options */
uint32_t sWidth;
uint8_t bframe_bug;
} CHDContext;
static const AVOption options[] = {
@ -359,7 +358,7 @@ static av_cold int uninit(AVCodecContext *avctx)
av_parser_close(priv->parser);
if (priv->bsfc) {
av_bitstream_filter_close(priv->bsfc);
av_bsf_free(&priv->bsfc);
}
av_freep(&priv->sps_pps_buf);
@ -379,9 +378,59 @@ static av_cold int uninit(AVCodecContext *avctx)
}
static av_cold int init_bsf(AVCodecContext *avctx, const char *bsf_name)
{
CHDContext *priv = avctx->priv_data;
const AVBitStreamFilter *bsf;
int avret;
void *extradata = NULL;
size_t size = 0;
bsf = av_bsf_get_by_name(bsf_name);
if (!bsf) {
av_log(avctx, AV_LOG_ERROR,
"Cannot open the %s BSF!\n", bsf_name);
return AVERROR_BSF_NOT_FOUND;
}
avret = av_bsf_alloc(bsf, &priv->bsfc);
if (avret != 0) {
return avret;
}
avret = avcodec_parameters_from_context(priv->bsfc->par_in, avctx);
if (avret != 0) {
return avret;
}
avret = av_bsf_init(priv->bsfc);
if (avret != 0) {
return avret;
}
/* Back up the extradata so it can be restored at close time. */
priv->orig_extradata = avctx->extradata;
priv->orig_extradata_size = avctx->extradata_size;
size = priv->bsfc->par_out->extradata_size;
extradata = av_malloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!extradata) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate copy of extradata\n");
return AVERROR(ENOMEM);
}
memcpy(extradata, priv->bsfc->par_out->extradata, size);
avctx->extradata = extradata;
avctx->extradata_size = size;
return 0;
}
static av_cold int init(AVCodecContext *avctx)
{
CHDContext* priv;
int avret;
BC_STATUS ret;
BC_INFO_CRYSTAL version;
BC_INPUT_FORMAT format = {
@ -417,31 +466,23 @@ static av_cold int init(AVCodecContext *avctx)
subtype = id2subtype(priv, avctx->codec->id);
switch (subtype) {
case BC_MSUBTYPE_AVC1:
{
uint8_t *dummy_p;
int dummy_int;
/* Back up the extradata so it can be restored at close time. */
priv->orig_extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!priv->orig_extradata) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate copy of extradata\n");
return AVERROR(ENOMEM);
}
priv->orig_extradata_size = avctx->extradata_size;
memcpy(priv->orig_extradata, avctx->extradata, avctx->extradata_size);
priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb");
if (!priv->bsfc) {
av_log(avctx, AV_LOG_ERROR,
"Cannot open the h264_mp4toannexb BSF!\n");
return AVERROR_BSF_NOT_FOUND;
}
av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p,
&dummy_int, NULL, 0, 0);
avret = init_bsf(avctx, "h264_mp4toannexb");
if (avret != 0) {
return avret;
}
subtype = BC_MSUBTYPE_H264;
// Fall-through
format.startCodeSz = 4;
format.pMetaData = avctx->extradata;
format.metaDataSz = avctx->extradata_size;
break;
case BC_MSUBTYPE_DIVX:
avret = init_bsf(avctx, "mpeg4_unpack_bframes");
if (avret != 0) {
return avret;
}
format.pMetaData = avctx->extradata;
format.metaDataSz = avctx->extradata_size;
break;
case BC_MSUBTYPE_H264:
format.startCodeSz = 4;
// Fall-through
@ -450,7 +491,6 @@ static av_cold int init(AVCodecContext *avctx)
case BC_MSUBTYPE_WMV3:
case BC_MSUBTYPE_WMVA:
case BC_MSUBTYPE_MPEG2VIDEO:
case BC_MSUBTYPE_DIVX:
case BC_MSUBTYPE_DIVX311:
format.pMetaData = avctx->extradata;
format.metaDataSz = avctx->extradata_size;
@ -691,7 +731,17 @@ static inline CopyRet copy_frame(AVCodecContext *avctx,
if (interlaced)
priv->pic->top_field_first = !bottom_first;
priv->pic->pkt_pts = pkt_pts;
if (pkt_pts != AV_NOPTS_VALUE) {
priv->pic->pts = pkt_pts;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
priv->pic->pkt_pts = pkt_pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
av_frame_set_pkt_pos(priv->pic, -1);
av_frame_set_pkt_duration(priv->pic, 0);
av_frame_set_pkt_size(priv->pic, -1);
if (!priv->need_second_field) {
*got_frame = 1;
@ -813,15 +863,6 @@ static inline CopyRet receive_frame(AVCodecContext *avctx,
priv->last_picture = output.PicInfo.picture_number - 1;
}
if (avctx->codec->id == AV_CODEC_ID_MPEG4 &&
output.PicInfo.timeStamp == 0 && priv->bframe_bug) {
av_log(avctx, AV_LOG_VERBOSE,
"CrystalHD: Not returning packed frame twice.\n");
priv->last_picture++;
DtsReleaseOutputBuffs(dev, NULL, FALSE);
return RET_COPY_AGAIN;
}
print_frame_info(priv, &output);
if (priv->last_picture + 1 < output.PicInfo.picture_number) {
@ -882,33 +923,44 @@ static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *a
av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");
if (avpkt->size == 7 && !priv->bframe_bug) {
/*
* The use of a drop frame triggers the bug
*/
av_log(avctx, AV_LOG_INFO,
"CrystalHD: Enabling work-around for packed b-frame bug\n");
priv->bframe_bug = 1;
} else if (avpkt->size == 8 && priv->bframe_bug) {
/*
* Delay frames don't trigger the bug
*/
av_log(avctx, AV_LOG_INFO,
"CrystalHD: Disabling work-around for packed b-frame bug\n");
priv->bframe_bug = 0;
}
if (len) {
int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
if (priv->bsfc) {
int ret = 0;
AVPacket filter_packet = { 0 };
AVPacket filtered_packet = { 0 };
ret = av_packet_ref(&filter_packet, avpkt);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to ref input packet\n");
return ret;
}
ret = av_bsf_send_packet(priv->bsfc, &filter_packet);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to send input packet\n");
return ret;
}
ret = av_bsf_receive_packet(priv->bsfc, &filtered_packet);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to receive output packet\n");
return ret;
}
in_data = filtered_packet.data;
len = filtered_packet.size;
av_packet_unref(&filter_packet);
}
if (priv->parser) {
int ret = 0;
if (priv->bsfc) {
ret = av_bitstream_filter_filter(priv->bsfc, avctx, NULL,
&in_data, &len,
avpkt->data, len, 0);
}
free_data = ret > 0;
if (ret >= 0) {
@ -918,8 +970,8 @@ static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *a
H264Context *h = priv->parser->priv_data;
index = av_parser_parse2(priv->parser, avctx, &pout, &psize,
in_data, len, avctx->internal->pkt->pts,
avctx->internal->pkt->dts, 0);
in_data, len, avpkt->pts,
avpkt->dts, 0);
if (index < 0) {
av_log(avctx, AV_LOG_WARNING,
"CrystalHD: Failed to parse h.264 packet to "
@ -953,7 +1005,8 @@ static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *a
* avoiding mangling so we need to build a mapping to values
* we know will not be mangled.
*/
uint64_t pts = opaque_list_push(priv, avctx->internal->pkt->pts, pic_type);
int64_t safe_pts = avpkt->pts == AV_NOPTS_VALUE ? 0 : avpkt->pts;
uint64_t pts = opaque_list_push(priv, safe_pts, pic_type);
if (!pts) {
if (free_data) {
av_freep(&in_data);

View File

@ -25,19 +25,24 @@
#include "libavutil/hwcontext_cuda.h"
#include "libavutil/fifo.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "internal.h"
#include <nvcuvid.h>
#include "compat/cuda/nvcuvid.h"
#define MAX_FRAME_COUNT 20
#define MAX_FRAME_COUNT 25
typedef struct CuvidContext
{
AVClass *avclass;
CUvideodecoder cudecoder;
CUvideoparser cuparser;
char *cu_gpu;
AVBufferRef *hwdevice;
AVBufferRef *hwframe;
@ -45,12 +50,26 @@ typedef struct CuvidContext
AVFifoBuffer *frame_queue;
int deint_mode;
int64_t prev_pts;
int internal_error;
int decoder_flushing;
cudaVideoCodec codec_type;
cudaVideoChromaFormat chroma_format;
CUVIDPARSERPARAMS cuparseinfo;
CUVIDEOFORMATEX cuparse_ext;
} CuvidContext;
typedef struct CuvidParsedFrame
{
CUVIDPARSERDISPINFO dispinfo;
int second_field;
int is_deinterlacing;
} CuvidParsedFrame;
static int check_cu(AVCodecContext *avctx, CUresult err, const char *func)
{
const char *err_name;
@ -81,7 +100,7 @@ static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* form
AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
CUVIDDECODECREATEINFO cuinfo;
av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback\n");
av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence);
ctx->internal_error = 0;
@ -92,7 +111,7 @@ static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* form
(AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y },
(AVRational){ avctx->width, avctx->height }));
if (!format->progressive_sequence)
if (!format->progressive_sequence && ctx->deint_mode == cudaVideoDeinterlaceMode_Weave)
avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
else
avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT;
@ -122,13 +141,25 @@ static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* form
return 1;
if (ctx->cudecoder) {
av_log(avctx, AV_LOG_ERROR, "re-initializing decoder is not supported\n");
av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n");
ctx->internal_error = CHECK_CU(cuvidDestroyDecoder(ctx->cudecoder));
if (ctx->internal_error < 0)
return 0;
ctx->cudecoder = NULL;
}
if (hwframe_ctx->pool && (
hwframe_ctx->width < avctx->width ||
hwframe_ctx->height < avctx->height ||
hwframe_ctx->format != AV_PIX_FMT_CUDA ||
hwframe_ctx->sw_format != AV_PIX_FMT_NV12)) {
av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n");
ctx->internal_error = AVERROR(EINVAL);
return 0;
}
if (hwframe_ctx->pool) {
av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized\n");
if (format->chroma_format != cudaVideoChromaFormat_420) {
av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n");
ctx->internal_error = AVERROR(EINVAL);
return 0;
}
@ -157,21 +188,31 @@ static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* form
cuinfo.ulNumDecodeSurfaces = MAX_FRAME_COUNT;
cuinfo.ulNumOutputSurfaces = 1;
cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8;
cuinfo.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave;
if (format->progressive_sequence) {
ctx->deint_mode = cuinfo.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave;
} else {
cuinfo.DeinterlaceMode = ctx->deint_mode;
}
if (ctx->deint_mode != cudaVideoDeinterlaceMode_Weave)
avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1});
ctx->internal_error = CHECK_CU(cuvidCreateDecoder(&ctx->cudecoder, &cuinfo));
if (ctx->internal_error < 0)
return 0;
hwframe_ctx->format = AV_PIX_FMT_CUDA;
hwframe_ctx->sw_format = AV_PIX_FMT_NV12;
hwframe_ctx->width = FFALIGN(avctx->coded_width, 32);
hwframe_ctx->height = FFALIGN(avctx->coded_height, 32);
if (!hwframe_ctx->pool) {
hwframe_ctx->format = AV_PIX_FMT_CUDA;
hwframe_ctx->sw_format = AV_PIX_FMT_NV12;
hwframe_ctx->width = avctx->width;
hwframe_ctx->height = avctx->height;
if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) {
av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n");
return 0;
if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) {
av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n");
return 0;
}
}
return 1;
@ -195,30 +236,42 @@ static int CUDAAPI cuvid_handle_picture_display(void *opaque, CUVIDPARSERDISPINF
{
AVCodecContext *avctx = opaque;
CuvidContext *ctx = avctx->priv_data;
av_log(avctx, AV_LOG_TRACE, "pfnDisplayPicture\n");
CuvidParsedFrame parsed_frame = { *dispinfo, 0, 0 };
ctx->internal_error = 0;
av_fifo_generic_write(ctx->frame_queue, dispinfo, sizeof(CUVIDPARSERDISPINFO), NULL);
if (ctx->deint_mode == cudaVideoDeinterlaceMode_Weave) {
av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
} else {
parsed_frame.is_deinterlacing = 1;
av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
parsed_frame.second_field = 1;
av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
}
return 1;
}
static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
{
CuvidContext *ctx = avctx->priv_data;
AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
AVFrame *frame = data;
CUVIDSOURCEDATAPACKET cupkt;
AVPacket filter_packet = { 0 };
AVPacket filtered_packet = { 0 };
CUdeviceptr mapped_frame = 0;
int ret = 0, eret = 0;
int ret = 0, eret = 0, is_flush = ctx->decoder_flushing;
if (ctx->bsf && avpkt->size) {
av_log(avctx, AV_LOG_TRACE, "cuvid_decode_packet\n");
if (is_flush && avpkt && avpkt->size)
return AVERROR_EOF;
if (av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame) > MAX_FRAME_COUNT - 2 && avpkt && avpkt->size)
return AVERROR(EAGAIN);
if (ctx->bsf && avpkt && avpkt->size) {
if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) {
av_log(avctx, AV_LOG_ERROR, "av_packet_ref failed\n");
return ret;
@ -246,43 +299,85 @@ static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
memset(&cupkt, 0, sizeof(cupkt));
if (avpkt->size) {
if (avpkt && avpkt->size) {
cupkt.payload_size = avpkt->size;
cupkt.payload = avpkt->data;
if (avpkt->pts != AV_NOPTS_VALUE) {
cupkt.flags = CUVID_PKT_TIMESTAMP;
cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->time_base, (AVRational){1, 10000000});
if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000});
else
cupkt.timestamp = avpkt->pts;
}
} else {
cupkt.flags = CUVID_PKT_ENDOFSTREAM;
ctx->decoder_flushing = 1;
}
ret = CHECK_CU(cuvidParseVideoData(ctx->cuparser, &cupkt));
av_packet_unref(&filtered_packet);
if (ret < 0) {
if (ctx->internal_error)
ret = ctx->internal_error;
if (ret < 0)
goto error;
// cuvidParseVideoData doesn't return an error just because stuff failed...
if (ctx->internal_error) {
av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n");
ret = ctx->internal_error;
goto error;
}
error:
eret = CHECK_CU(cuCtxPopCurrent(&dummy));
if (eret < 0)
return eret;
else if (ret < 0)
return ret;
else if (is_flush)
return AVERROR_EOF;
else
return 0;
}
static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame)
{
CuvidContext *ctx = avctx->priv_data;
AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
CUdeviceptr mapped_frame = 0;
int ret = 0, eret = 0;
av_log(avctx, AV_LOG_TRACE, "cuvid_output_frame\n");
if (ctx->decoder_flushing) {
ret = cuvid_decode_packet(avctx, NULL);
if (ret < 0 && ret != AVERROR_EOF)
return ret;
}
ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx));
if (ret < 0)
return ret;
if (av_fifo_size(ctx->frame_queue)) {
CUVIDPARSERDISPINFO dispinfo;
CuvidParsedFrame parsed_frame;
CUVIDPROCPARAMS params;
unsigned int pitch = 0;
int offset = 0;
int i;
av_fifo_generic_read(ctx->frame_queue, &dispinfo, sizeof(CUVIDPARSERDISPINFO), NULL);
av_fifo_generic_read(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
memset(&params, 0, sizeof(params));
params.progressive_frame = dispinfo.progressive_frame;
params.second_field = 0;
params.top_field_first = dispinfo.top_field_first;
params.progressive_frame = parsed_frame.dispinfo.progressive_frame;
params.second_field = parsed_frame.second_field;
params.top_field_first = parsed_frame.dispinfo.top_field_first;
ret = CHECK_CU(cuvidMapVideoFrame(ctx->cudecoder, dispinfo.picture_index, &mapped_frame, &pitch, &params));
ret = CHECK_CU(cuvidMapVideoFrame(ctx->cudecoder, parsed_frame.dispinfo.picture_index, &mapped_frame, &pitch, &params));
if (ret < 0)
goto error;
@ -309,7 +404,7 @@ static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
.dstPitch = frame->linesize[i],
.srcY = offset,
.WidthInBytes = FFMIN(pitch, frame->linesize[i]),
.Height = avctx->coded_height >> (i ? 1 : 0),
.Height = avctx->height >> (i ? 1 : 0),
};
ret = CHECK_CU(cuMemcpy2D(&cpy));
@ -357,24 +452,42 @@ static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
frame->width = avctx->width;
frame->height = avctx->height;
frame->pts = av_rescale_q(dispinfo.timestamp, (AVRational){1, 10000000}, avctx->time_base);
if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
frame->pts = av_rescale_q(parsed_frame.dispinfo.timestamp, (AVRational){1, 10000000}, avctx->pkt_timebase);
else
frame->pts = parsed_frame.dispinfo.timestamp;
if (parsed_frame.second_field) {
if (ctx->prev_pts == INT64_MIN) {
ctx->prev_pts = frame->pts;
frame->pts += (avctx->pkt_timebase.den * avctx->framerate.den) / (avctx->pkt_timebase.num * avctx->framerate.num);
} else {
int pts_diff = (frame->pts - ctx->prev_pts) / 2;
ctx->prev_pts = frame->pts;
frame->pts += pts_diff;
}
}
/* CUVIDs opaque reordering breaks the internal pkt logic.
* So set pkt_pts and clear all the other pkt_ fields.
*/
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_pts = frame->pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
av_frame_set_pkt_pos(frame, -1);
av_frame_set_pkt_duration(frame, 0);
av_frame_set_pkt_size(frame, -1);
frame->interlaced_frame = !dispinfo.progressive_frame;
frame->interlaced_frame = !parsed_frame.is_deinterlacing && !parsed_frame.dispinfo.progressive_frame;
if (!dispinfo.progressive_frame)
frame->top_field_first = dispinfo.top_field_first;
*got_frame = 1;
if (frame->interlaced_frame)
frame->top_field_first = parsed_frame.dispinfo.top_field_first;
} else if (ctx->decoder_flushing) {
ret = AVERROR_EOF;
} else {
*got_frame = 0;
ret = AVERROR(EAGAIN);
}
error:
@ -389,6 +502,37 @@ error:
return ret;
}
static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
CuvidContext *ctx = avctx->priv_data;
AVFrame *frame = data;
int ret = 0;
av_log(avctx, AV_LOG_TRACE, "cuvid_decode_frame\n");
if (ctx->deint_mode != cudaVideoDeinterlaceMode_Weave) {
av_log(avctx, AV_LOG_ERROR, "Deinterlacing is not supported via the old API\n");
return AVERROR(EINVAL);
}
if (!ctx->decoder_flushing) {
ret = cuvid_decode_packet(avctx, avpkt);
if (ret < 0)
return ret;
}
ret = cuvid_output_frame(avctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
*got_frame = 0;
} else if (ret < 0) {
return ret;
} else {
*got_frame = 1;
}
return 0;
}
static av_cold int cuvid_decode_end(AVCodecContext *avctx)
{
CuvidContext *ctx = avctx->priv_data;
@ -410,12 +554,6 @@ static av_cold int cuvid_decode_end(AVCodecContext *avctx)
return 0;
}
static void cuvid_ctx_free(AVHWDeviceContext *ctx)
{
AVCUDADeviceContext *hwctx = ctx->hwctx;
cuCtxDestroy(hwctx->cuda_ctx);
}
static int cuvid_test_dummy_decoder(AVCodecContext *avctx, CUVIDPARSERPARAMS *cuparseinfo)
{
CUVIDDECODECREATEINFO cuinfo;
@ -441,6 +579,7 @@ static int cuvid_test_dummy_decoder(AVCodecContext *avctx, CUVIDPARSERPARAMS *cu
cuinfo.ulNumDecodeSurfaces = MAX_FRAME_COUNT;
cuinfo.ulNumOutputSurfaces = 1;
cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
cuinfo.bitDepthMinus8 = 0;
cuinfo.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave;
@ -461,10 +600,7 @@ static av_cold int cuvid_decode_init(AVCodecContext *avctx)
AVCUDADeviceContext *device_hwctx;
AVHWDeviceContext *device_ctx;
AVHWFramesContext *hwframe_ctx;
CUVIDPARSERPARAMS cuparseinfo;
CUVIDEOFORMATEX cuparse_ext;
CUVIDSOURCEDATAPACKET seq_pkt;
CUdevice device;
CUcontext cuda_ctx = NULL;
CUcontext dummy;
const AVBitStreamFilter *bsf;
@ -480,7 +616,7 @@ static av_cold int cuvid_decode_init(AVCodecContext *avctx)
return ret;
}
ctx->frame_queue = av_fifo_alloc(MAX_FRAME_COUNT * sizeof(CUVIDPARSERDISPINFO));
ctx->frame_queue = av_fifo_alloc(MAX_FRAME_COUNT * sizeof(CuvidParsedFrame));
if (!ctx->frame_queue) {
ret = AVERROR(ENOMEM);
goto error;
@ -502,84 +638,80 @@ static av_cold int cuvid_decode_init(AVCodecContext *avctx)
ret = AVERROR(ENOMEM);
goto error;
}
device_ctx = hwframe_ctx->device_ctx;
device_hwctx = device_ctx->hwctx;
cuda_ctx = device_hwctx->cuda_ctx;
} else {
ctx->hwdevice = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
if (!ctx->hwdevice) {
av_log(avctx, AV_LOG_ERROR, "Error allocating hwdevice\n");
ret = AVERROR(ENOMEM);
goto error;
}
ret = CHECK_CU(cuInit(0));
ret = av_hwdevice_ctx_create(&ctx->hwdevice, AV_HWDEVICE_TYPE_CUDA, ctx->cu_gpu, NULL, 0);
if (ret < 0)
goto error;
ret = CHECK_CU(cuDeviceGet(&device, 0));
if (ret < 0)
goto error;
ret = CHECK_CU(cuCtxCreate(&cuda_ctx, CU_CTX_SCHED_BLOCKING_SYNC, device));
if (ret < 0)
goto error;
device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
device_ctx->free = cuvid_ctx_free;
device_hwctx = device_ctx->hwctx;
device_hwctx->cuda_ctx = cuda_ctx;
ret = CHECK_CU(cuCtxPopCurrent(&dummy));
if (ret < 0)
goto error;
ret = av_hwdevice_ctx_init(ctx->hwdevice);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "av_hwdevice_ctx_init failed\n");
goto error;
}
ctx->hwframe = av_hwframe_ctx_alloc(ctx->hwdevice);
if (!ctx->hwframe) {
av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
ret = AVERROR(ENOMEM);
goto error;
}
hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
}
memset(&cuparseinfo, 0, sizeof(cuparseinfo));
memset(&cuparse_ext, 0, sizeof(cuparse_ext));
device_ctx = hwframe_ctx->device_ctx;
device_hwctx = device_ctx->hwctx;
cuda_ctx = device_hwctx->cuda_ctx;
memset(&ctx->cuparseinfo, 0, sizeof(ctx->cuparseinfo));
memset(&ctx->cuparse_ext, 0, sizeof(ctx->cuparse_ext));
memset(&seq_pkt, 0, sizeof(seq_pkt));
cuparseinfo.pExtVideoInfo = &cuparse_ext;
ctx->cuparseinfo.pExtVideoInfo = &ctx->cuparse_ext;
switch (avctx->codec->id) {
#if CONFIG_H263_CUVID_DECODER
case AV_CODEC_ID_H263:
ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG4;
break;
#endif
#if CONFIG_H264_CUVID_DECODER
case AV_CODEC_ID_H264:
cuparseinfo.CodecType = cudaVideoCodec_H264;
ctx->cuparseinfo.CodecType = cudaVideoCodec_H264;
break;
#endif
#if CONFIG_HEVC_CUVID_DECODER
case AV_CODEC_ID_HEVC:
cuparseinfo.CodecType = cudaVideoCodec_HEVC;
ctx->cuparseinfo.CodecType = cudaVideoCodec_HEVC;
break;
#endif
#if CONFIG_MJPEG_CUVID_DECODER
case AV_CODEC_ID_MJPEG:
ctx->cuparseinfo.CodecType = cudaVideoCodec_JPEG;
break;
#endif
#if CONFIG_MPEG1_CUVID_DECODER
case AV_CODEC_ID_MPEG1VIDEO:
ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG1;
break;
#endif
#if CONFIG_MPEG2_CUVID_DECODER
case AV_CODEC_ID_MPEG2VIDEO:
ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG2;
break;
#endif
#if CONFIG_MPEG4_CUVID_DECODER
case AV_CODEC_ID_MPEG4:
ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG4;
break;
#endif
#if CONFIG_VP8_CUVID_DECODER
case AV_CODEC_ID_VP8:
cuparseinfo.CodecType = cudaVideoCodec_VP8;
ctx->cuparseinfo.CodecType = cudaVideoCodec_VP8;
break;
#endif
#if CONFIG_VP9_CUVID_DECODER
case AV_CODEC_ID_VP9:
cuparseinfo.CodecType = cudaVideoCodec_VP9;
ctx->cuparseinfo.CodecType = cudaVideoCodec_VP9;
break;
#endif
#if CONFIG_VC1_CUVID_DECODER
case AV_CODEC_ID_VC1:
cuparseinfo.CodecType = cudaVideoCodec_VC1;
ctx->cuparseinfo.CodecType = cudaVideoCodec_VC1;
break;
#endif
default:
@ -605,38 +737,38 @@ static av_cold int cuvid_decode_init(AVCodecContext *avctx)
goto error;
}
cuparse_ext.format.seqhdr_data_length = ctx->bsf->par_out->extradata_size;
memcpy(cuparse_ext.raw_seqhdr_data,
ctx->cuparse_ext.format.seqhdr_data_length = ctx->bsf->par_out->extradata_size;
memcpy(ctx->cuparse_ext.raw_seqhdr_data,
ctx->bsf->par_out->extradata,
FFMIN(sizeof(cuparse_ext.raw_seqhdr_data), ctx->bsf->par_out->extradata_size));
FFMIN(sizeof(ctx->cuparse_ext.raw_seqhdr_data), ctx->bsf->par_out->extradata_size));
} else if (avctx->extradata_size > 0) {
cuparse_ext.format.seqhdr_data_length = avctx->extradata_size;
memcpy(cuparse_ext.raw_seqhdr_data,
ctx->cuparse_ext.format.seqhdr_data_length = avctx->extradata_size;
memcpy(ctx->cuparse_ext.raw_seqhdr_data,
avctx->extradata,
FFMIN(sizeof(cuparse_ext.raw_seqhdr_data), avctx->extradata_size));
FFMIN(sizeof(ctx->cuparse_ext.raw_seqhdr_data), avctx->extradata_size));
}
cuparseinfo.ulMaxNumDecodeSurfaces = MAX_FRAME_COUNT;
cuparseinfo.ulMaxDisplayDelay = 4;
cuparseinfo.pUserData = avctx;
cuparseinfo.pfnSequenceCallback = cuvid_handle_video_sequence;
cuparseinfo.pfnDecodePicture = cuvid_handle_picture_decode;
cuparseinfo.pfnDisplayPicture = cuvid_handle_picture_display;
ctx->cuparseinfo.ulMaxNumDecodeSurfaces = MAX_FRAME_COUNT;
ctx->cuparseinfo.ulMaxDisplayDelay = 4;
ctx->cuparseinfo.pUserData = avctx;
ctx->cuparseinfo.pfnSequenceCallback = cuvid_handle_video_sequence;
ctx->cuparseinfo.pfnDecodePicture = cuvid_handle_picture_decode;
ctx->cuparseinfo.pfnDisplayPicture = cuvid_handle_picture_display;
ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx));
if (ret < 0)
goto error;
ret = cuvid_test_dummy_decoder(avctx, &cuparseinfo);
ret = cuvid_test_dummy_decoder(avctx, &ctx->cuparseinfo);
if (ret < 0)
goto error;
ret = CHECK_CU(cuvidCreateVideoParser(&ctx->cuparser, &cuparseinfo));
ret = CHECK_CU(cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
if (ret < 0)
goto error;
seq_pkt.payload = cuparse_ext.raw_seqhdr_data;
seq_pkt.payload_size = cuparse_ext.format.seqhdr_data_length;
seq_pkt.payload = ctx->cuparse_ext.raw_seqhdr_data;
seq_pkt.payload_size = ctx->cuparse_ext.format.seqhdr_data_length;
if (seq_pkt.payload && seq_pkt.payload_size) {
ret = CHECK_CU(cuvidParseVideoData(ctx->cuparser, &seq_pkt));
@ -648,6 +780,11 @@ static av_cold int cuvid_decode_init(AVCodecContext *avctx)
if (ret < 0)
goto error;
ctx->prev_pts = INT64_MIN;
if (!avctx->pkt_timebase.num || !avctx->pkt_timebase.den)
av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
return 0;
error:
@ -655,7 +792,80 @@ error:
return ret;
}
static void cuvid_flush(AVCodecContext *avctx)
{
CuvidContext *ctx = avctx->priv_data;
AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
CUVIDSOURCEDATAPACKET seq_pkt = { 0 };
int ret;
ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx));
if (ret < 0)
goto error;
av_fifo_freep(&ctx->frame_queue);
ctx->frame_queue = av_fifo_alloc(MAX_FRAME_COUNT * sizeof(CuvidParsedFrame));
if (!ctx->frame_queue) {
av_log(avctx, AV_LOG_ERROR, "Failed to recreate frame queue on flush\n");
return;
}
if (ctx->cudecoder) {
cuvidDestroyDecoder(ctx->cudecoder);
ctx->cudecoder = NULL;
}
if (ctx->cuparser) {
cuvidDestroyVideoParser(ctx->cuparser);
ctx->cuparser = NULL;
}
ret = CHECK_CU(cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
if (ret < 0)
goto error;
seq_pkt.payload = ctx->cuparse_ext.raw_seqhdr_data;
seq_pkt.payload_size = ctx->cuparse_ext.format.seqhdr_data_length;
if (seq_pkt.payload && seq_pkt.payload_size) {
ret = CHECK_CU(cuvidParseVideoData(ctx->cuparser, &seq_pkt));
if (ret < 0)
goto error;
}
ret = CHECK_CU(cuCtxPopCurrent(&dummy));
if (ret < 0)
goto error;
ctx->prev_pts = INT64_MIN;
ctx->decoder_flushing = 0;
return;
error:
av_log(avctx, AV_LOG_ERROR, "CUDA reinit on flush failed\n");
}
#define OFFSET(x) offsetof(CuvidContext, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "deint", "Set deinterlacing mode", OFFSET(deint_mode), AV_OPT_TYPE_INT, { .i64 = cudaVideoDeinterlaceMode_Weave }, cudaVideoDeinterlaceMode_Weave, cudaVideoDeinterlaceMode_Adaptive, VD, "deint" },
{ "weave", "Weave deinterlacing (do nothing)", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Weave }, 0, 0, VD, "deint" },
{ "bob", "Bob deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Bob }, 0, 0, VD, "deint" },
{ "adaptive", "Adaptive deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Adaptive }, 0, 0, VD, "deint" },
{ "gpu", "GPU to be used for decoding", OFFSET(cu_gpu), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
{ NULL }
};
#define DEFINE_CUVID_CODEC(x, X) \
static const AVClass x##_cuvid_class = { \
.class_name = #x "_cuvid", \
.item_name = av_default_item_name, \
.option = options, \
.version = LIBAVUTIL_VERSION_INT, \
}; \
AVHWAccel ff_##x##_cuvid_hwaccel = { \
.name = #x "_cuvid", \
.type = AVMEDIA_TYPE_VIDEO, \
@ -668,10 +878,14 @@ error:
.type = AVMEDIA_TYPE_VIDEO, \
.id = AV_CODEC_ID_##X, \
.priv_data_size = sizeof(CuvidContext), \
.priv_class = &x##_cuvid_class, \
.init = cuvid_decode_init, \
.close = cuvid_decode_end, \
.decode = cuvid_decode_frame, \
.capabilities = AV_CODEC_CAP_DELAY, \
.send_packet = cuvid_decode_packet, \
.receive_frame = cuvid_output_frame, \
.flush = cuvid_flush, \
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, \
AV_PIX_FMT_NV12, \
AV_PIX_FMT_NONE }, \
@ -681,10 +895,30 @@ error:
DEFINE_CUVID_CODEC(hevc, HEVC)
#endif
#if CONFIG_H263_CUVID_DECODER
DEFINE_CUVID_CODEC(h263, H263)
#endif
#if CONFIG_H264_CUVID_DECODER
DEFINE_CUVID_CODEC(h264, H264)
#endif
#if CONFIG_MJPEG_CUVID_DECODER
DEFINE_CUVID_CODEC(mjpeg, MJPEG)
#endif
#if CONFIG_MPEG1_CUVID_DECODER
DEFINE_CUVID_CODEC(mpeg1, MPEG1VIDEO)
#endif
#if CONFIG_MPEG2_CUVID_DECODER
DEFINE_CUVID_CODEC(mpeg2, MPEG2VIDEO)
#endif
#if CONFIG_MPEG4_CUVID_DECODER
DEFINE_CUVID_CODEC(mpeg4, MPEG4)
#endif
#if CONFIG_VP8_CUVID_DECODER
DEFINE_CUVID_CODEC(vp8, VP8)
#endif

View File

@ -102,6 +102,7 @@ typedef struct DDSContext {
int compressed;
int paletted;
int bpp;
enum DDSPostProc postproc;
const uint8_t *tex_data; // Compressed texture
@ -148,7 +149,7 @@ static int parse_pixel_format(AVCodecContext *avctx)
ctx->paletted = 0;
}
bpp = bytestream2_get_le32(gbc); // rgbbitcount
bpp = ctx->bpp = bytestream2_get_le32(gbc); // rgbbitcount
r = bytestream2_get_le32(gbc); // rbitmask
g = bytestream2_get_le32(gbc); // gbitmask
b = bytestream2_get_le32(gbc); // bbitmask
@ -354,8 +355,11 @@ static int parse_pixel_format(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
} else {
/* 4 bpp */
if (bpp == 4 && r == 0 && g == 0 && b == 0 && a == 0)
avctx->pix_fmt = AV_PIX_FMT_PAL8;
/* 8 bpp */
if (bpp == 8 && r == 0xff && g == 0 && b == 0 && a == 0)
else if (bpp == 8 && r == 0xff && g == 0 && b == 0 && a == 0)
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
else if (bpp == 8 && r == 0 && g == 0 && b == 0 && a == 0xff)
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
@ -676,6 +680,36 @@ static int dds_decode(AVCodecContext *avctx, void *data,
/* Use the decompress function on the texture, one block per thread. */
ctx->tex_data = gbc->buffer;
avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count);
} else if (!ctx->paletted && ctx->bpp == 4) {
uint8_t *dst = frame->data[0];
int x, y, i;
/* Use the first 64 bytes as palette, then copy the rest. */
bytestream2_get_buffer(gbc, frame->data[1], 16 * 4);
for (i = 0; i < 16; i++) {
AV_WN32(frame->data[1] + i*4,
(frame->data[1][2+i*4]<<0)+
(frame->data[1][1+i*4]<<8)+
(frame->data[1][0+i*4]<<16)+
(frame->data[1][3+i*4]<<24)
);
}
frame->palette_has_changed = 1;
if (bytestream2_get_bytes_left(gbc) < frame->height * frame->width / 2) {
av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n",
bytestream2_get_bytes_left(gbc), frame->height * frame->width / 2);
return AVERROR_INVALIDDATA;
}
for (y = 0; y < frame->height; y++) {
for (x = 0; x < frame->width; x += 2) {
uint8_t val = bytestream2_get_byte(gbc);
dst[x ] = val & 0xF;
dst[x + 1] = val >> 4;
}
dst += frame->linesize[0];
}
} else {
int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0);

247
libavcodec/dirac_vlc.c Normal file
View File

@ -0,0 +1,247 @@
/*
* Copyright (C) 2016 Open Broadcast Systems Ltd.
* Author 2016 Rostislav Pehlivanov <rpehlivanov@obe.tv>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dirac_vlc.h"
#define LUT_SIZE (1 << LUT_BITS)
#define RSIZE_BITS (CHAR_BIT*sizeof(residual))
#define CONVERT_TO_RESIDUE(a, b) \
(((residual)(a)) << (RSIZE_BITS - (b)))
#define INIT_RESIDUE(N) \
residual N = 0; \
av_unused int32_t N ## _bits = 0
#define SET_RESIDUE(N, I, B) \
N = CONVERT_TO_RESIDUE(I, B); \
N ## _bits = B
#define APPEND_RESIDUE(N, M) \
N |= M >> (N ## _bits); \
N ## _bits += (M ## _bits)
int ff_dirac_golomb_read_32bit(DiracGolombLUT *lut_ctx, const uint8_t *buf,
int bytes, uint8_t *_dst, int coeffs)
{
int i, b, c_idx = 0;
int32_t *dst = (int32_t *)_dst;
DiracGolombLUT *future[4], *l = &lut_ctx[2*LUT_SIZE + buf[0]];
INIT_RESIDUE(res);
for (b = 1; b <= bytes; b++) {
future[0] = &lut_ctx[buf[b]];
future[1] = future[0] + 1*LUT_SIZE;
future[2] = future[0] + 2*LUT_SIZE;
future[3] = future[0] + 3*LUT_SIZE;
if ((c_idx + 1) > coeffs)
return c_idx;
/* res_bits is a hint for better branch prediction */
if (res_bits && l->sign) {
int32_t coeff = 1;
APPEND_RESIDUE(res, l->preamble);
for (i = 0; i < (res_bits >> 1) - 1; i++) {
coeff <<= 1;
coeff |= (res >> (RSIZE_BITS - 2*i - 2)) & 1;
}
dst[c_idx++] = l->sign * (coeff - 1);
res_bits = res = 0;
}
memcpy(&dst[c_idx], l->ready, LUT_BITS*sizeof(int32_t));
c_idx += l->ready_num;
APPEND_RESIDUE(res, l->leftover);
l = future[l->need_s ? 3 : !res_bits ? 2 : res_bits & 1];
}
return c_idx;
}
int ff_dirac_golomb_read_16bit(DiracGolombLUT *lut_ctx, const uint8_t *buf,
int bytes, uint8_t *_dst, int coeffs)
{
int i, b, c_idx = 0;
int16_t *dst = (int16_t *)_dst;
DiracGolombLUT *future[4], *l = &lut_ctx[2*LUT_SIZE + buf[0]];
INIT_RESIDUE(res);
for (b = 1; b <= bytes; b++) {
future[0] = &lut_ctx[buf[b]];
future[1] = future[0] + 1*LUT_SIZE;
future[2] = future[0] + 2*LUT_SIZE;
future[3] = future[0] + 3*LUT_SIZE;
if ((c_idx + 1) > coeffs)
return c_idx;
if (res_bits && l->sign) {
int32_t coeff = 1;
APPEND_RESIDUE(res, l->preamble);
for (i = 0; i < (res_bits >> 1) - 1; i++) {
coeff <<= 1;
coeff |= (res >> (RSIZE_BITS - 2*i - 2)) & 1;
}
dst[c_idx++] = l->sign * (coeff - 1);
res_bits = res = 0;
}
for (i = 0; i < LUT_BITS; i++)
dst[c_idx + i] = l->ready[i];
c_idx += l->ready_num;
APPEND_RESIDUE(res, l->leftover);
l = future[l->need_s ? 3 : !res_bits ? 2 : res_bits & 1];
}
return c_idx;
}
/* Searches for golomb codes in a residue */
static inline void search_for_golomb(DiracGolombLUT *l, residual r, int bits)
{
int r_count = RSIZE_BITS - 1;
int bits_start, bits_tot = bits, need_sign = 0;
#define READ_BIT(N) (((N) >> (N ## _count--)) & 1)
while (1) {
int32_t coef = 1;
bits_start = (RSIZE_BITS - 1) - r_count;
while (1) {
if (!bits--)
goto leftover;
if (READ_BIT(r))
break;
coef <<= 1;
if (!bits--)
goto leftover;
coef |= READ_BIT(r);
}
l->ready[l->ready_num] = coef - 1;
if (l->ready[l->ready_num]) {
if (!bits--) {
need_sign = 1;
goto leftover;
}
l->ready[l->ready_num] *= READ_BIT(r) ? -1 : +1;
}
l->ready_num++;
if (!bits)
return;
}
leftover:
l->leftover = r << bits_start;
l->leftover_bits = bits_tot - bits_start;
l->need_s = need_sign;
}
/* Parity LUTs - even and odd bit end positions */
static void generate_parity_lut(DiracGolombLUT *lut, int even)
{
int idx;
for (idx = 0; idx < LUT_SIZE; idx++) {
DiracGolombLUT *l = &lut[idx];
int symbol_end_loc = -1;
uint32_t code;
int i;
INIT_RESIDUE(res);
SET_RESIDUE(res, idx, LUT_BITS);
for (i = 0; i < LUT_BITS; i++) {
const int cond = even ? (i & 1) : !(i & 1);
if (((res >> (RSIZE_BITS - i - 1)) & 1) && cond) {
symbol_end_loc = i + 2;
break;
}
}
if (symbol_end_loc < 0 || symbol_end_loc > LUT_BITS) {
l->preamble = 0;
l->preamble_bits = 0;
l->leftover_bits = LUT_BITS;
l->leftover = CONVERT_TO_RESIDUE(idx, l->leftover_bits);
if (even)
l->need_s = idx & 1;
continue;
}
/* Gets bits 0 through to (symbol_end_loc - 1) inclusive */
code = idx >> ((LUT_BITS - 1) - (symbol_end_loc - 1));
code &= ((1 << LUT_BITS) - 1) >> (LUT_BITS - symbol_end_loc);
l->preamble_bits = symbol_end_loc;
l->preamble = CONVERT_TO_RESIDUE(code, l->preamble_bits);
l->sign = ((l->preamble >> (RSIZE_BITS - l->preamble_bits)) & 1) ? -1 : +1;
search_for_golomb(l, res << symbol_end_loc, LUT_BITS - symbol_end_loc);
}
}
/* Reset (off == 0) and needs-one-more-bit (off == 1) LUTs */
static void generate_offset_lut(DiracGolombLUT *lut, int off)
{
int idx;
for (idx = 0; idx < LUT_SIZE; idx++) {
DiracGolombLUT *l = &lut[idx];
INIT_RESIDUE(res);
SET_RESIDUE(res, idx, LUT_BITS);
l->preamble = CONVERT_TO_RESIDUE(res >> (RSIZE_BITS - off), off);
l->preamble_bits = off;
l->sign = ((l->preamble >> (RSIZE_BITS - l->preamble_bits)) & 1) ? -1 : +1;
search_for_golomb(l, res << off, LUT_BITS - off);
}
}
av_cold int ff_dirac_golomb_reader_init(DiracGolombLUT **lut_ctx)
{
DiracGolombLUT *lut;
if (!(lut = av_calloc(4*LUT_SIZE, sizeof(DiracGolombLUT))))
return AVERROR(ENOMEM);
generate_parity_lut(&lut[0*LUT_SIZE], 0);
generate_parity_lut(&lut[1*LUT_SIZE], 1);
generate_offset_lut(&lut[2*LUT_SIZE], 0);
generate_offset_lut(&lut[3*LUT_SIZE], 1);
*lut_ctx = lut;
return 0;
}
av_cold void ff_dirac_golomb_reader_end(DiracGolombLUT **lut_ctx)
{
av_freep(lut_ctx);
}

51
libavcodec/dirac_vlc.h Normal file
View File

@ -0,0 +1,51 @@
/*
* Copyright (C) 2016 Open Broadcast Systems Ltd.
* Author 2016 Rostislav Pehlivanov <rpehlivanov@obe.tv>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DIRAC_VLC_H
#define AVCODEC_DIRAC_VLC_H
#include "libavutil/avutil.h"
/* Can be 32 bits wide for some performance gain on some machines, but it will
* incorrectly decode very long coefficients (usually only 1 or 2 per frame) */
typedef uint64_t residual;
#define LUT_BITS 8
/* Exactly 64 bytes */
typedef struct DiracGolombLUT {
residual preamble, leftover;
int32_t ready[LUT_BITS];
int32_t preamble_bits, leftover_bits, ready_num;
int8_t need_s, sign;
} DiracGolombLUT;
av_cold int ff_dirac_golomb_reader_init(DiracGolombLUT **lut_ctx);
int ff_dirac_golomb_read_32bit(DiracGolombLUT *lut_ctx, const uint8_t *buf,
int bytes, uint8_t *dst, int coeffs);
int ff_dirac_golomb_read_16bit(DiracGolombLUT *lut_ctx, const uint8_t *buf,
int bytes, uint8_t *_dst, int coeffs);
av_cold void ff_dirac_golomb_reader_end(DiracGolombLUT **lut_ctx);
#endif /* AVCODEC_DIRAC_VLC_H */

View File

@ -32,6 +32,7 @@
#include "internal.h"
#include "golomb.h"
#include "dirac_arith.h"
#include "dirac_vlc.h"
#include "mpeg12data.h"
#include "libavcodec/mpegvideo.h"
#include "mpegvideoencdsp.h"
@ -120,11 +121,20 @@ typedef struct Plane {
SubBand band[MAX_DWT_LEVELS][4];
} Plane;
/* Used by Low Delay and High Quality profiles */
typedef struct DiracSlice {
GetBitContext gb;
int slice_x;
int slice_y;
int bytes;
} DiracSlice;
typedef struct DiracContext {
AVCodecContext *avctx;
MpegvideoEncDSPContext mpvencdsp;
VideoDSPContext vdsp;
DiracDSPContext diracdsp;
DiracGolombLUT *reader_ctx;
DiracVersionInfo version;
GetBitContext gb;
AVDiracSeqHeader seq;
@ -161,6 +171,13 @@ typedef struct DiracContext {
unsigned num_x; /* number of horizontal slices */
unsigned num_y; /* number of vertical slices */
uint8_t *thread_buf; /* Per-thread buffer for coefficient storage */
int threads_num_buf; /* Current # of buffers allocated */
int thread_buf_size; /* Each thread has a buffer this size */
DiracSlice *slice_params_buf;
int slice_params_num_buf;
struct {
unsigned width;
unsigned height;
@ -370,6 +387,11 @@ static av_cold int dirac_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
s->frame_number = -1;
s->thread_buf = NULL;
s->threads_num_buf = -1;
s->thread_buf_size = -1;
ff_dirac_golomb_reader_init(&s->reader_ctx);
ff_diracdsp_init(&s->diracdsp);
ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
ff_videodsp_init(&s->vdsp, 8);
@ -399,65 +421,29 @@ static av_cold int dirac_decode_end(AVCodecContext *avctx)
DiracContext *s = avctx->priv_data;
int i;
ff_dirac_golomb_reader_end(&s->reader_ctx);
dirac_decode_flush(avctx);
for (i = 0; i < MAX_FRAMES; i++)
av_frame_free(&s->all_frames[i].avframe);
av_freep(&s->thread_buf);
av_freep(&s->slice_params_buf);
return 0;
}
#define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
{
int sign, coeff;
uint32_t buf;
OPEN_READER(re, gb);
UPDATE_CACHE(re, gb);
buf = GET_CACHE(re, gb);
if (buf & 0x80000000) {
LAST_SKIP_BITS(re,gb,1);
CLOSE_READER(re, gb);
return 0;
}
if (buf & 0xAA800000) {
buf >>= 32 - 8;
SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
coeff = ff_interleaved_ue_golomb_vlc_code[buf];
} else {
unsigned ret = 1;
do {
buf >>= 32 - 8;
SKIP_BITS(re, gb,
FFMIN(ff_interleaved_golomb_vlc_len[buf], 8));
if (ff_interleaved_golomb_vlc_len[buf] != 9) {
ret <<= (ff_interleaved_golomb_vlc_len[buf] - 1) >> 1;
ret |= ff_interleaved_dirac_golomb_vlc_code[buf];
break;
}
ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
UPDATE_CACHE(re, gb);
buf = GET_CACHE(re, gb);
} while (ret<0x8000000U && BITS_AVAILABLE(re, gb));
coeff = ret - 1;
}
coeff = (coeff * qfactor + qoffset) >> 2;
sign = SHOW_SBITS(re, gb, 1);
LAST_SKIP_BITS(re, gb, 1);
coeff = (coeff ^ sign) - sign;
CLOSE_READER(re, gb);
int coeff = dirac_get_se_golomb(gb);
const int sign = FFSIGN(coeff);
if (coeff)
coeff = sign*((sign * coeff * qfactor + qoffset) >> 2);
return coeff;
}
#define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
#define UNPACK_ARITH(n, type) \
static inline void coeff_unpack_arith_##n(DiracArith *c, int qfactor, int qoffset, \
SubBand *b, type *buf, int x, int y) \
@ -527,7 +513,7 @@ static inline void codeblock(DiracContext *s, SubBand *b,
b->quant = quant;
}
if (b->quant > 115) {
if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
b->quant = 0;
return;
@ -717,12 +703,12 @@ static void decode_subband(DiracContext *s, GetBitContext *gb, int quant,
uint8_t *buf2 = b2 ? b2->ibuf + top * b2->stride: NULL;
int x, y;
if (quant > 115) {
if (quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", quant);
return;
}
qfactor = ff_dirac_qscale_tab[quant & 0x7f];
qoffset = ff_dirac_qoffset_intra_tab[quant & 0x7f] + 2;
qfactor = ff_dirac_qscale_tab[quant];
qoffset = ff_dirac_qoffset_intra_tab[quant] + 2;
/* we have to constantly check for overread since the spec explicitly
requires this, with the meaning that all remaining coeffs are set to 0 */
if (get_bits_count(gb) >= bits_end)
@ -750,15 +736,6 @@ static void decode_subband(DiracContext *s, GetBitContext *gb, int quant,
}
}
/* Used by Low Delay and High Quality profiles */
typedef struct DiracSlice {
GetBitContext gb;
int slice_x;
int slice_y;
int bytes;
} DiracSlice;
/**
* Dirac Specification ->
* 13.5.2 Slices. slice(sx,sy)
@ -801,52 +778,120 @@ static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
return 0;
}
typedef struct SliceCoeffs {
int left;
int top;
int tot_h;
int tot_v;
int tot;
} SliceCoeffs;
static int subband_coeffs(DiracContext *s, int x, int y, int p,
SliceCoeffs c[MAX_DWT_LEVELS])
{
int level, coef = 0;
for (level = 0; level < s->wavelet_depth; level++) {
SliceCoeffs *o = &c[level];
SubBand *b = &s->plane[p].band[level][3]; /* orientation doens't matter */
o->top = b->height * y / s->num_y;
o->left = b->width * x / s->num_x;
o->tot_h = ((b->width * (x + 1)) / s->num_x) - o->left;
o->tot_v = ((b->height * (y + 1)) / s->num_y) - o->top;
o->tot = o->tot_h*o->tot_v;
coef += o->tot * (4 - !!level);
}
return coef;
}
/**
* VC-2 Specification ->
* 13.5.3 hq_slice(sx,sy)
*/
static int decode_hq_slice(AVCodecContext *avctx, void *arg)
static int decode_hq_slice(DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
{
int i, quant, level, orientation, quant_idx;
uint8_t quants[MAX_DWT_LEVELS][4];
DiracContext *s = avctx->priv_data;
DiracSlice *slice = arg;
int i, level, orientation, quant_idx;
int qfactor[MAX_DWT_LEVELS][4], qoffset[MAX_DWT_LEVELS][4];
GetBitContext *gb = &slice->gb;
SliceCoeffs coeffs_num[MAX_DWT_LEVELS];
skip_bits_long(gb, 8*s->highquality.prefix_bytes);
quant_idx = get_bits(gb, 8);
if (quant_idx > DIRAC_MAX_QUANT_INDEX) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid quantization index - %i\n", quant_idx);
return AVERROR_INVALIDDATA;
}
/* Slice quantization (slice_quantizers() in the specs) */
for (level = 0; level < s->wavelet_depth; level++) {
for (orientation = !!level; orientation < 4; orientation++) {
quant = FFMAX(quant_idx - s->lowdelay.quant[level][orientation], 0);
quants[level][orientation] = quant;
const int quant = FFMAX(quant_idx - s->lowdelay.quant[level][orientation], 0);
qfactor[level][orientation] = ff_dirac_qscale_tab[quant];
qoffset[level][orientation] = ff_dirac_qoffset_intra_tab[quant] + 2;
}
}
/* Luma + 2 Chroma planes */
for (i = 0; i < 3; i++) {
int64_t length = s->highquality.size_scaler * get_bits(gb, 8);
int64_t bits_left = 8 * length;
int64_t bits_end = get_bits_count(gb) + bits_left;
int coef_num, coef_par, off = 0;
int64_t length = s->highquality.size_scaler*get_bits(gb, 8);
int64_t bits_end = get_bits_count(gb) + 8*length;
const uint8_t *addr = align_get_bits(gb);
if (bits_end >= INT_MAX) {
if (length*8 > get_bits_left(gb)) {
av_log(s->avctx, AV_LOG_ERROR, "end too far away\n");
return AVERROR_INVALIDDATA;
}
coef_num = subband_coeffs(s, slice->slice_x, slice->slice_y, i, coeffs_num);
if (s->pshift)
coef_par = ff_dirac_golomb_read_32bit(s->reader_ctx, addr,
length, tmp_buf, coef_num);
else
coef_par = ff_dirac_golomb_read_16bit(s->reader_ctx, addr,
length, tmp_buf, coef_num);
if (coef_num > coef_par) {
const int start_b = coef_par * (1 << (s->pshift + 1));
const int end_b = coef_num * (1 << (s->pshift + 1));
memset(&tmp_buf[start_b], 0, end_b - start_b);
}
for (level = 0; level < s->wavelet_depth; level++) {
const SliceCoeffs *c = &coeffs_num[level];
for (orientation = !!level; orientation < 4; orientation++) {
decode_subband(s, gb, quants[level][orientation], slice->slice_x, slice->slice_y, bits_end,
&s->plane[i].band[level][orientation], NULL);
const SubBand *b1 = &s->plane[i].band[level][orientation];
uint8_t *buf = b1->ibuf + c->top * b1->stride + (c->left << (s->pshift + 1));
/* Change to c->tot_h <= 4 for AVX2 dequantization */
const int qfunc = s->pshift + 2*(c->tot_h <= 2);
s->diracdsp.dequant_subband[qfunc](&tmp_buf[off], buf, b1->stride,
qfactor[level][orientation],
qoffset[level][orientation],
c->tot_v, c->tot_h);
off += c->tot << (s->pshift + 1);
}
}
skip_bits_long(gb, bits_end - get_bits_count(gb));
}
return 0;
}
static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
{
int i;
DiracContext *s = avctx->priv_data;
DiracSlice *slices = ((DiracSlice *)arg) + s->num_x*jobnr;
uint8_t *thread_buf = &s->thread_buf[s->thread_buf_size*threadnr];
for (i = 0; i < s->num_x; i++)
decode_hq_slice(s, &slices[i], thread_buf);
return 0;
}
/**
* Dirac Specification ->
* 13.5.1 low_delay_transform_data()
@ -855,14 +900,37 @@ static int decode_lowdelay(DiracContext *s)
{
AVCodecContext *avctx = s->avctx;
int slice_x, slice_y, bufsize;
int64_t bytes = 0;
int64_t coef_buf_size, bytes = 0;
const uint8_t *buf;
DiracSlice *slices;
SliceCoeffs tmp[MAX_DWT_LEVELS];
int slice_num = 0;
slices = av_mallocz_array(s->num_x, s->num_y * sizeof(DiracSlice));
if (!slices)
return AVERROR(ENOMEM);
if (s->slice_params_num_buf != (s->num_x * s->num_y)) {
s->slice_params_buf = av_realloc_f(s->thread_buf, s->num_x * s->num_y, sizeof(DiracSlice));
if (!s->slice_params_buf) {
av_log(s->avctx, AV_LOG_ERROR, "slice params buffer allocation failure\n");
return AVERROR(ENOMEM);
}
s->slice_params_num_buf = s->num_x * s->num_y;
}
slices = s->slice_params_buf;
/* 8 becacuse that's how much the golomb reader could overread junk data
* from another plane/slice at most, and 512 because SIMD */
coef_buf_size = subband_coeffs(s, s->num_x - 1, s->num_y - 1, 0, tmp) + 8;
coef_buf_size = (coef_buf_size << (1 + s->pshift)) + 512;
if (s->threads_num_buf != avctx->thread_count ||
s->thread_buf_size != coef_buf_size) {
s->threads_num_buf = avctx->thread_count;
s->thread_buf_size = coef_buf_size;
s->thread_buf = av_realloc_f(s->thread_buf, avctx->thread_count, s->thread_buf_size);
if (!s->thread_buf) {
av_log(s->avctx, AV_LOG_ERROR, "thread buffer allocation failure\n");
return AVERROR(ENOMEM);
}
}
align_get_bits(&s->gb);
/*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */
@ -879,9 +947,8 @@ static int decode_lowdelay(DiracContext *s)
if (bytes <= bufsize/8)
bytes += buf[bytes] * s->highquality.size_scaler + 1;
}
if (bytes >= INT_MAX) {
if (bytes >= INT_MAX || bytes*8 > bufsize) {
av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
av_free(slices);
return AVERROR_INVALIDDATA;
}
@ -898,8 +965,13 @@ static int decode_lowdelay(DiracContext *s)
bufsize = 0;
}
}
avctx->execute(avctx, decode_hq_slice, slices, NULL, slice_num,
sizeof(DiracSlice));
if (s->num_x*s->num_y != slice_num) {
av_log(s->avctx, AV_LOG_ERROR, "too few slices\n");
return AVERROR_INVALIDDATA;
}
avctx->execute2(avctx, decode_hq_slice_row, slices, NULL, s->num_y);
} else {
for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
@ -933,7 +1005,7 @@ static int decode_lowdelay(DiracContext *s)
intra_dc_prediction_8(&s->plane[2].band[0][0]);
}
}
av_free(slices);
return 0;
}
@ -1748,9 +1820,11 @@ static int dirac_decode_frame_internal(DiracContext *s)
if (s->low_delay) {
/* [DIRAC_STD] 13.5.1 low_delay_transform_data() */
for (comp = 0; comp < 3; comp++) {
Plane *p = &s->plane[comp];
memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
if (!s->hq_picture) {
for (comp = 0; comp < 3; comp++) {
Plane *p = &s->plane[comp];
memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
}
}
if (!s->zero_res) {
if ((ret = decode_lowdelay(s)) < 0)

View File

@ -189,6 +189,27 @@ static void add_rect_clamped_c(uint8_t *dst, const uint16_t *src, int stride,
}
}
#define DEQUANT_SUBBAND(PX) \
static void dequant_subband_ ## PX ## _c(uint8_t *src, uint8_t *dst, ptrdiff_t stride, \
const int qf, const int qs, int tot_v, int tot_h) \
{ \
int i, y; \
for (y = 0; y < tot_v; y++) { \
PX c, sign, *src_r = (PX *)src, *dst_r = (PX *)dst; \
for (i = 0; i < tot_h; i++) { \
c = *src_r++; \
sign = FFSIGN(c)*(!!c); \
c = (FFABS(c)*qf + qs) >> 2; \
*dst_r++ = c*sign; \
} \
src += tot_h << (sizeof(PX) >> 1); \
dst += stride; \
} \
}
DEQUANT_SUBBAND(int16_t)
DEQUANT_SUBBAND(int32_t)
#define PIXFUNC(PFX, WIDTH) \
c->PFX ## _dirac_pixels_tab[WIDTH>>4][0] = ff_ ## PFX ## _dirac_pixels ## WIDTH ## _c; \
c->PFX ## _dirac_pixels_tab[WIDTH>>4][1] = ff_ ## PFX ## _dirac_pixels ## WIDTH ## _l2_c; \
@ -214,6 +235,9 @@ av_cold void ff_diracdsp_init(DiracDSPContext *c)
c->biweight_dirac_pixels_tab[1] = biweight_dirac_pixels16_c;
c->biweight_dirac_pixels_tab[2] = biweight_dirac_pixels32_c;
c->dequant_subband[0] = c->dequant_subband[2] = dequant_subband_int16_t_c;
c->dequant_subband[1] = c->dequant_subband[3] = dequant_subband_int32_t_c;
PIXFUNC(put, 8);
PIXFUNC(put, 16);
PIXFUNC(put, 32);

View File

@ -22,6 +22,7 @@
#define AVCODEC_DIRACDSP_H
#include <stdint.h>
#include <stddef.h>
typedef void (*dirac_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int h);
typedef void (*dirac_biweight_func)(uint8_t *dst, const uint8_t *src, int stride, int log2_denom, int weightd, int weights, int h);
@ -46,6 +47,9 @@ typedef struct {
void (*add_rect_clamped)(uint8_t *dst/*align 16*/, const uint16_t *src/*align 16*/, int stride, const int16_t *idwt/*align 16*/, int idwt_stride, int width, int height/*mod 2*/);
void (*add_dirac_obmc[3])(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
/* 0-1: int16_t and int32_t asm/c, 2-3: int16 and int32_t, C only */
void (*dequant_subband[4])(uint8_t *src, uint8_t *dst, ptrdiff_t stride, const int qf, const int qs, int tot_v, int tot_h);
dirac_weight_func weight_dirac_pixels_tab[3];
dirac_biweight_func biweight_dirac_pixels_tab[3];
} DiracDSPContext;

View File

@ -38,4 +38,6 @@ extern const int32_t ff_dirac_qoffset_intra_tab[120];
/* Scaling offsets needed for quantization/dequantization, for inter frames */
extern const int ff_dirac_qoffset_inter_tab[122];
#define DIRAC_MAX_QUANT_INDEX (FF_ARRAY_ELEMS(ff_dirac_qscale_tab))
#endif /* AVCODEC_DIRACTAB_H */

View File

@ -22,7 +22,6 @@
#include "avcodec.h"
#include "dnxhddata.h"
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
/* The quantization tables below are in zigzag order! */
@ -234,6 +233,7 @@ static const uint8_t dnxhd_1252_chroma_weight[] = {
114, 128, 125, 129, 134, 125, 116, 116,
};
/* Used in CID 1244, 1260 */
static const uint8_t dnxhd_1260_luma_weight[] = {
0, 32, 33, 34, 36, 37, 37, 36,
34, 33, 34, 35, 37, 38, 40, 41,
@ -245,6 +245,7 @@ static const uint8_t dnxhd_1260_luma_weight[] = {
52, 53, 53, 50, 50, 54, 54, 54,
};
/* Used in CID 1244, 1260 */
static const uint8_t dnxhd_1260_chroma_weight[] = {
0, 32, 34, 38, 42, 40, 38, 36,
35, 35, 38, 42, 43, 43, 42, 40,
@ -980,6 +981,14 @@ const CIDEntry ff_dnxhd_cid_table[] = {
dnxhd_1235_run_codes, dnxhd_1235_run_bits, dnxhd_1238_run,
{ 185, 220 },
{ { 25, 1 }, { 30000, 1001 } } },
{ 1244, 1440, 1080, 606208, 303104,
DNXHD_INTERLACED, 4, 8, 3,
dnxhd_1260_luma_weight, dnxhd_1260_chroma_weight,
dnxhd_1237_dc_codes, dnxhd_1237_dc_bits,
dnxhd_1237_ac_codes, dnxhd_1237_ac_bits, dnxhd_1237_ac_info,
dnxhd_1237_run_codes, dnxhd_1237_run_bits, dnxhd_1237_run,
{ 120, 145 },
{ { 25, 1 }, { 30000, 1001 } } },
{ 1250, 1280, 720, 458752, 458752,
0, 6, 10, 4,
dnxhd_1250_luma_weight, dnxhd_1250_chroma_weight,
@ -1047,35 +1056,35 @@ const CIDEntry ff_dnxhd_cid_table[] = {
dnxhd_1235_dc_codes, dnxhd_1235_dc_bits,
dnxhd_1235_ac_codes, dnxhd_1235_ac_bits, dnxhd_1235_ac_info,
dnxhd_1235_run_codes, dnxhd_1235_run_bits, dnxhd_1235_run,
{ 0 } },
{ 0 }, { { 0 } }, { 57344, 255} },
{ 1271, DNXHD_VARIABLE, DNXHD_VARIABLE, DNXHD_VARIABLE, DNXHD_VARIABLE,
0, 6, DNXHD_VARIABLE, 4,
dnxhd_1241_luma_weight, dnxhd_1241_chroma_weight,
dnxhd_1235_dc_codes, dnxhd_1235_dc_bits,
dnxhd_1235_ac_codes, dnxhd_1235_ac_bits, dnxhd_1235_ac_info,
dnxhd_1235_run_codes, dnxhd_1235_run_bits, dnxhd_1235_run,
{ 0 } },
{ 0 }, { { 0 } }, { 28672, 255} },
{ 1272, DNXHD_VARIABLE, DNXHD_VARIABLE, DNXHD_VARIABLE, DNXHD_VARIABLE,
0, 4, 8, 4,
dnxhd_1238_luma_weight, dnxhd_1238_chroma_weight,
dnxhd_1237_dc_codes, dnxhd_1237_dc_bits,
dnxhd_1238_ac_codes, dnxhd_1238_ac_bits, dnxhd_1238_ac_info,
dnxhd_1235_run_codes, dnxhd_1235_run_bits, dnxhd_1238_run,
{ 0 } },
{ 0 }, { { 0 } }, { 28672, 255} },
{ 1273, DNXHD_VARIABLE, DNXHD_VARIABLE, DNXHD_VARIABLE, DNXHD_VARIABLE,
0, 4, 8, 3,
dnxhd_1237_luma_weight, dnxhd_1237_chroma_weight,
dnxhd_1237_dc_codes, dnxhd_1237_dc_bits,
dnxhd_1237_ac_codes, dnxhd_1237_ac_bits, dnxhd_1237_ac_info,
dnxhd_1237_run_codes, dnxhd_1237_run_bits, dnxhd_1237_run,
{ 0 } },
{ 0 }, { { 0 } }, { 18944, 255} },
{ 1274, DNXHD_VARIABLE, DNXHD_VARIABLE, DNXHD_VARIABLE, DNXHD_VARIABLE,
0, 4, 8, 3,
dnxhd_1237_luma_weight, dnxhd_1237_chroma_weight,
dnxhd_1237_dc_codes, dnxhd_1237_dc_bits,
dnxhd_1237_ac_codes, dnxhd_1237_ac_bits, dnxhd_1237_ac_info,
dnxhd_1237_run_codes, dnxhd_1237_run_bits, dnxhd_1237_run,
{ 0 } },
{ 0 }, { { 0 } }, { 5888, 255} },
};
int ff_dnxhd_get_cid_table(int cid)
@ -1103,17 +1112,38 @@ int avpriv_dnxhd_get_interlaced(int cid)
return ff_dnxhd_cid_table[i].flags & DNXHD_INTERLACED ? 1 : 0;
}
#if LIBAVCODEC_VERSION_MAJOR < 58
uint64_t avpriv_dnxhd_parse_header_prefix(const uint8_t *buf)
{
uint64_t prefix = AV_RB32(buf);
prefix = (prefix << 16) | buf[4] << 8;
return ff_dnxhd_check_header_prefix(prefix);
return ff_dnxhd_parse_header_prefix(buf);
}
#endif
static int dnxhd_find_hr_cid(AVCodecContext *avctx)
{
switch (avctx->profile) {
case FF_PROFILE_DNXHR_444:
return 1270;
case FF_PROFILE_DNXHR_HQX:
return 1271;
case FF_PROFILE_DNXHR_HQ:
return 1272;
case FF_PROFILE_DNXHR_SQ:
return 1273;
case FF_PROFILE_DNXHR_LB:
return 1274;
}
return 0;
}
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
{
int i, j;
int mbs = avctx->bit_rate / 1000000;
if (avctx->profile != FF_PROFILE_DNXHD)
return dnxhd_find_hr_cid(avctx);
if (!mbs)
return 0;
for (i = 0; i < FF_ARRAY_ELEMS(ff_dnxhd_cid_table); i++) {

View File

@ -25,6 +25,7 @@
#include <stdint.h>
#include "avcodec.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
/** Additional profile info flags */
#define DNXHD_INTERLACED (1<<0)
@ -34,8 +35,6 @@
/** Frame headers, extra 0x00 added to end for parser */
#define DNXHD_HEADER_INITIAL 0x000002800100
#define DNXHD_HEADER_444 0x000002800200
#define DNXHD_HEADER_HR1 0x000002800300
#define DNXHD_HEADER_HR2 0x0000038C0300
/** Indicate that a CIDEntry value must be read in the bitstream */
#define DNXHD_VARIABLE 0
@ -57,6 +56,7 @@ typedef struct CIDEntry {
const uint8_t *run_bits, *run;
int bit_rates[5]; ///< Helper to choose variants, rounded to nearest 5Mb/s
AVRational frame_rates[5];
AVRational packet_scale;
} CIDEntry;
extern const CIDEntry ff_dnxhd_cid_table[];
@ -65,17 +65,36 @@ int ff_dnxhd_get_cid_table(int cid);
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth);
void ff_dnxhd_print_profiles(AVCodecContext *avctx, int loglevel);
static av_always_inline uint64_t ff_dnxhd_check_header_prefix(uint64_t prefix)
static av_always_inline uint64_t ff_dnxhd_check_header_prefix_hr(uint64_t prefix)
{
if (prefix == DNXHD_HEADER_INITIAL ||
prefix == DNXHD_HEADER_444 ||
prefix == DNXHD_HEADER_HR1 ||
prefix == DNXHD_HEADER_HR2)
uint64_t data_offset = prefix >> 16;
if ((prefix & 0xFFFF0000FFFFLL) == 0x0300 &&
data_offset >= 0x0280 && data_offset <= 0x2170 &&
(data_offset & 3) == 0)
return prefix;
return 0;
}
static av_always_inline uint64_t ff_dnxhd_check_header_prefix(uint64_t prefix)
{
if (prefix == DNXHD_HEADER_INITIAL ||
prefix == DNXHD_HEADER_444 ||
ff_dnxhd_check_header_prefix_hr(prefix))
return prefix;
return 0;
}
static av_always_inline uint64_t ff_dnxhd_parse_header_prefix(const uint8_t *buf)
{
uint64_t prefix = AV_RB32(buf);
prefix = (prefix << 16) | buf[4] << 8;
return ff_dnxhd_check_header_prefix(prefix);
}
int avpriv_dnxhd_get_frame_size(int cid);
int avpriv_dnxhd_get_interlaced(int cid);
#if LIBAVCODEC_VERSION_MAJOR < 58
attribute_deprecated
uint64_t avpriv_dnxhd_parse_header_prefix(const uint8_t *buf);
#endif
#endif /* AVCODEC_DNXHDDATA_H */

Some files were not shown because too many files have changed in this diff Show More