aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorbrian-ch <brian-ch@b956fd51-792f-4845-bead-9b4dfca2ff2c>2015-04-12 18:32:49 +0000
committerbrian-ch <brian-ch@b956fd51-792f-4845-bead-9b4dfca2ff2c>2015-04-12 18:32:49 +0000
commita57eddf010cd07cb6744ef89d3df8661c121d23c (patch)
treebaaef88b26b87703239cce6b05b8c09ba2aea6a1 /src
parentf80199e9dfde699a8c6c3de5e182f64755805efd (diff)
downloadusdx-a57eddf010cd07cb6744ef89d3df8661c121d23c.tar.gz
usdx-a57eddf010cd07cb6744ef89d3df8661c121d23c.tar.xz
usdx-a57eddf010cd07cb6744ef89d3df8661c121d23c.zip
Update ffmpeg 2.6 headers
git-svn-id: svn://svn.code.sf.net/p/ultrastardx/svn/trunk@3114 b956fd51-792f-4845-bead-9b4dfca2ff2c
Diffstat (limited to 'src')
-rw-r--r--src/lib/ffmpeg-2.6/avcodec.pas129
-rw-r--r--src/lib/ffmpeg-2.6/avformat.pas91
-rw-r--r--src/lib/ffmpeg-2.6/avio.pas7
-rw-r--r--src/lib/ffmpeg-2.6/ff_api-defines.inc6
-rw-r--r--src/lib/ffmpeg-2.6/libavutil/frame.pas12
-rw-r--r--src/lib/ffmpeg-2.6/libavutil/pixfmt.pas56
6 files changed, 205 insertions, 96 deletions
diff --git a/src/lib/ffmpeg-2.6/avcodec.pas b/src/lib/ffmpeg-2.6/avcodec.pas
index a1e4aa02..18bb091a 100644
--- a/src/lib/ffmpeg-2.6/avcodec.pas
+++ b/src/lib/ffmpeg-2.6/avcodec.pas
@@ -338,6 +338,7 @@ type
AV_CODEC_ID_SGIRLE_DEPRECATED,
AV_CODEC_ID_MVC1_DEPRECATED,
AV_CODEC_ID_MVC2_DEPRECATED,
+ AV_CODEC_ID_HQX,
(** see below. they need to be hard coded.
AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'),
@@ -440,7 +441,9 @@ type
AV_CODEC_ID_ADPCM_VIMA_DEPRECATED,
(** see below. they need to be hard coded.
AV_CODEC_ID_ADPCM_VIMA = MKBETAG('V','I','M','A'),
+{$IFDEF FF_API_VIMA_DECODER}
AV_CODEC_ID_VIMA = MKBETAG('V','I','M','A'),
+{$IFEND}
AV_CODEC_ID_ADPCM_AFC = MKBETAG('A','F','C',' '),
AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '),
AV_CODEC_ID_ADPCM_DTK = MKBETAG('D','T','K',' '),
@@ -531,6 +534,7 @@ type
AV_CODEC_ID_TAK_DEPRECATED,
AV_CODEC_ID_PAF_AUDIO_DEPRECATED,
AV_CODEC_ID_ON2AVC,
+ AV_CODEC_ID_DSS_SP,
(** see below. they need to be hard coded.
AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
AV_CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
@@ -1451,6 +1455,12 @@ type
AV_PKT_DATA_STEREO3D,
(**
+ * This side data should be associated with an audio stream and corresponds
+ * to enum AVAudioServiceType.
+ *)
+ AV_PKT_DATA_AUDIO_SERVICE_TYPE,
+
+ (**
* Recommmends skipping the specified number of samples
* @code
* u32le number of samples to skip from start of this packet
@@ -2171,13 +2181,13 @@ type
*)
codec_tag: cuint;
+{$IFDEF FF_API_STREAM_CODEC_TAG}
(**
- * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
- * This is used to work around some encoder bugs.
- * - encoding: unused
- * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ * @deprecated this field is unused
*)
+ {attribute_deprecated}
stream_codec_tag: cuint;
+{$IFEND}
priv_data: pointer;
@@ -3662,6 +3672,13 @@ type
framerate: TAVRational;
(**
+ * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
+ * - encoding: unused.
+ * - decoding: Set by libavcodec before calling get_format()
+ *)
+ sw_pix_fmt: TAVPixelFormat;
+
+ (**
* Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
* Code outside libavcodec should access this field using:
* av_codec_{get,set}_pkt_timebase(avctx)
@@ -3829,7 +3846,7 @@ type
(**
* data+linesize for the bitmap of this subtitle.
- * can be set for text/ass as well once they where rendered
+ * can be set for text/ass as well once they are rendered
*)
pict: TAVPicture;
type_: TAVSubtitleType;
@@ -4107,6 +4124,12 @@ const
*)
AV_HWACCEL_FLAG_IGNORE_LEVEL = (1 << 0);
+ (**
+ * Hardware acceleration can output YUV pixel formats with a different chroma
+ * sampling than 4:2:0 and/or other than 8 bits per component.
+ *)
+ AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH = (1 << 1);
+
FF_SUB_CHARENC_MODE_DO_NOTHING = -1; ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance)
FF_SUB_CHARENC_MODE_AUTOMATIC = 0; ///< libavcodec will select the mode itself
FF_SUB_CHARENC_MODE_PRE_DECODER = 1; ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv
@@ -4315,6 +4338,9 @@ procedure avcodec_free_frame(frame: PPAVFrame);
*
* @warning This function is not thread safe!
*
+ * @note Always call this function before using decoding routines (such as
+ * @ref avcodec_decode_video2()).
+ *
* @code
* avcodec_register_all();
* av_dict_set(&opts, "b", "2.5M", 0);
@@ -4756,18 +4782,27 @@ function avcodec_decode_audio3(avctx: PAVCodecContext; samples: PSmallint;
* Decode the audio frame of size avpkt->size from avpkt->data into frame.
*
* Some decoders may support multiple frames in a single AVPacket. Such
- * decoders would then just decode the first frame. In this case,
- * avcodec_decode_audio4 has to be called again with an AVPacket containing
- * the remaining data in order to decode the second frame, etc...
- * Even if no frames are returned, the packet needs to be fed to the decoder
- * with remaining data until it is completely consumed or an error occurs.
+ * decoders would then just decode the first frame and the return value would be
+ * less than the packet size. In this case, avcodec_decode_audio4 has to be
+ * called again with an AVPacket containing the remaining data in order to
+ * decode the second frame, etc... Even if no frames are returned, the packet
+ * needs to be fed to the decoder with remaining data until it is completely
+ * consumed or an error occurs.
+ *
+ * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning samples. It is safe to flush even those decoders that are not
+ * marked with CODEC_CAP_DELAY, then no samples will be returned.
*
* @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
* larger than the actual read bytes because some optimized bitstream
* readers read 32 or 64 bits at once and could read over the end.
*
- * @note You might have to align the input buffer. The alignment requirements
- * depend on the CPU and the decoder.
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
*
* @param avctx the codec context
* @param[out] frame The AVFrame in which to store decoded audio samples.
@@ -4780,10 +4815,13 @@ function avcodec_decode_audio3(avctx: PAVCodecContext; samples: PSmallint;
* to the frame if av_frame_is_writable() returns 1.
* When AVCodecContext.refcounted_frames is set to 0, the returned
* reference belongs to the decoder and is valid only until the
- * next call to this function or until closing the decoder.
- * The caller may not write to it.
+ * next call to this function or until closing or flushing the
+ * decoder. The caller may not write to it.
* @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
- * non-zero.
+ * non-zero. Note that this field being set to zero
+ * does not mean that an error has occurred. For
+ * decoders with CODEC_CAP_DELAY set, no given decode
+ * call is guaranteed to produce a frame.
* @param[in] avpkt The input AVPacket containing the input buffer.
* At least avpkt->data and avpkt->size should be set. Some
* decoders might also require additional fields to be set.
@@ -4807,17 +4845,13 @@ function avcodec_decode_audio4(avctx: PAVCodecContext; frame: PAVFrame;
* @warning The end of the input buffer buf should be set to 0 to ensure that
* no overreading happens for damaged MPEG streams.
*
- * @note You might have to align the input buffer avpkt->data.
- * The alignment requirements depend on the CPU: on some CPUs it isn't
- * necessary at all, on others it won't work at all if not aligned and on others
- * it will work but it will have an impact on performance.
- *
- * In practice, avpkt->data should have 4 byte alignment at minimum.
- *
* @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
* between input and output, these need to be fed with avpkt->data=NULL,
* avpkt->size=0 at the end to return the remaining frames.
*
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
* @param avctx the codec context
* @param[out] picture The AVFrame in which the decoded video frame will be stored.
* Use av_frame_alloc() to get an AVFrame. The codec will
@@ -4830,10 +4864,10 @@ function avcodec_decode_audio4(avctx: PAVCodecContext; frame: PAVFrame;
* to the frame if av_frame_is_writable() returns 1.
* When AVCodecContext.refcounted_frames is set to 0, the returned
* reference belongs to the decoder and is valid only until the
- * next call to this function or until closing the decoder. The
- * caller may not write to it.
+ * next call to this function or until closing or flushing the
+ * decoder. The caller may not write to it.
*
- * @param[in] avpkt The input AVpacket containing the input buffer.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
* You can create such packet with av_init_packet() and by then setting
* data and size, some decoders might in addition need other fields like
* flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
@@ -4847,7 +4881,8 @@ function avcodec_decode_video2(avctx: PAVCodecContext; picture: PAVFrame;
avpkt: {const} PAVPacket): cint;
cdecl; external av__codec;
-(* Decode a subtitle message.
+(*
+ * Decode a subtitle message.
* Return a negative value on error, otherwise return the number of bytes used.
* If no subtitle could be decompressed, got_sub_ptr is zero.
* Otherwise, the subtitle is stored in *sub.
@@ -4856,6 +4891,17 @@ function avcodec_decode_video2(avctx: PAVCodecContext; picture: PAVFrame;
* and reusing a get_buffer written for video codecs would probably perform badly
* due to a potentially very different allocation pattern.
*
+ * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning subtitles. It is safe to flush even those decoders that are not
+ * marked with CODEC_CAP_DELAY, then no subtitles will be returned.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
* @param avctx the codec context
* @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored,
* must be freed with avsubtitle_free if *got_sub_ptr is set.
@@ -5032,12 +5078,43 @@ type
*)
picture_structure: TAVPictureStructure;
+ (**
+ * Picture number incremented in presentation or output order.
+ * This field may be reinitialized at the first picture of a new sequence.
+ *
+ * For example, this corresponds to H.264 PicOrderCnt.
+ *)
+ output_picture_number: cint;
+
+ (**
+ * Dimensions of the decoded video intended for presentation.
+ *)
+ width: cint;
+ height: cint;
+
+ (**
+ * Dimensions of the coded video.
+ *)
+ coded_width: cint;
+ coded_height: cint;
+
+ (**
+ * The format of the coded data, corresponds to enum AVPixelFormat for video
+ * and for enum AVSampleFormat for audio.
+ *
+ * Note that a decoder can have considerable freedom in how exactly it
+ * decodes the data, so the format reported here might be different from the
+ * one returned by a decoder.
+ *)
+ format: cint;
end; {AVCodecParserContext}
TAVCodecParser = record
codec_ids: array [0..4] of cint; (* several codec IDs are permitted *)
priv_data_size: cint;
parser_init: function(s: PAVCodecParserContext): cint; cdecl;
+ (* This callback never returns an error, a negative value means that
+ * the frame start was in a previous packet. *)
parser_parse: function(s: PAVCodecParserContext;
avctx: PAVCodecContext;
poutbuf: {const} PPointer; poutbuf_size: PCint;
diff --git a/src/lib/ffmpeg-2.6/avformat.pas b/src/lib/ffmpeg-2.6/avformat.pas
index 660cf3ce..fdf97dca 100644
--- a/src/lib/ffmpeg-2.6/avformat.pas
+++ b/src/lib/ffmpeg-2.6/avformat.pas
@@ -264,6 +264,10 @@ const
* be set to the timebase that the caller desires to use for this stream (note
* that the timebase actually used by the muxer can be different, as will be
* described later).
+ * - It is advised to manually initialize only the relevant fields in
+ * AVCodecContext, rather than using @ref avcodec_copy_context() during
+ * remuxing: there is no guarantee that the codec context values remain valid
+ * for both input and output format contexts.
* - The caller may fill in additional information, such as @ref
* AVFormatContext.metadata "global" or @ref AVStream.metadata "per-stream"
* metadata, @ref AVFormatContext.chapters "chapters", @ref
@@ -755,6 +759,8 @@ type
* @see avdevice_capabilities_free() for more details.
*)
free_device_capabilities: function(s: PAVFormatContext; caps: PAVDeviceCapabilitiesQuery): cint; cdecl;
+
+ data_codec: TAVCodecID; (**< default data codec *)
end;
(**
* @}
@@ -1734,54 +1740,6 @@ type
*)
format_whitelist: PAnsiChar;
- (*****************************************************************
- * All fields below this line are not part of the public API. They
- * may not be used outside of libavformat and can be changed and
- * removed at will.
- * New public fields should be added right above.
- *****************************************************************
- *)
-
- (**
- * This buffer is only needed when packets were already buffered but
- * not decoded, for example to get the codec parameters in MPEG
- * streams.
- *)
- packet_buffer: PAVPacketList;
- packet_buffer_end_: PAVPacketList;
-
- (* av_seek_frame() support *)
- data_offset: cint64; (**< offset of the first packet *)
-
- (**
- * Raw packets from the demuxer, prior to parsing and decoding.
- * This buffer is used for buffering packets until the codec can
- * be identified, as parsing cannot be done without knowing the
- * codec.
- *)
- raw_packet_buffer_: PAVPacketList;
- raw_packet_buffer_end_: PAVPacketList;
- (**
- * Packets split by the parser get queued here.
- *)
- parse_queue: PAVPacketList;
- parse_queue_end: PAVPacketList;
- (**
- * Remaining size available for raw_packet_buffer, in bytes.
- *)
- raw_packet_buffer_remaining_size: cint;
-
- (**
- * Offset to remap timestamps to be non-negative.
- * Expressed in timebase units.
- * @see AVStream.mux_ts_offset
- *)
- offset: cint64;
-
- (**
- * Timebase for the timestamp offset.
- *)
- offset_timebase: TAVRational;
(**
* An opaque field for libavformat internal usage.
@@ -1822,6 +1780,14 @@ type
subtitle_codec: PAVCodec;
(**
+ * Forced data codec.
+ * This allows forcing a specific decoder, even when there are multiple with
+ * the same codec_id.
+ * Demuxing: Set by user via av_format_set_data_codec (NO direct access).
+ *)
+ data_codec: PAVCodec;
+
+ (**
* Number of bytes to be written as padding in a metadata header.
* Demuxing: Unused.
* Muxing: Set by user via av_format_set_metadata_header_padding.
@@ -1872,6 +1838,12 @@ type
* - demuxing: Set by user.
*)
dump_separator: Pcuint8;
+
+ (**
+ * Forced Data codec_id.
+ * Demuxing: Set by user.
+ *)
+ data_codec_id: TAVCodecID;
end; (** TAVFormatContext **)
function av_format_get_probe_score(s: {const} PAVFormatContext): cint;
@@ -1888,6 +1860,10 @@ function av_format_get_subtitle_codec(s: {const} PAVFormatContext): PAVCodec;
cdecl; external av__format;
procedure av_format_set_subtitle_codec(s: PAVFormatContext; c: PAVCodec);
cdecl; external av__format;
+function av_format_get_data_codec(s: {const} PAVFormatContext): PAVCodec;
+ cdecl; external av__format;
+procedure av_format_set_data_codec(s: PAVFormatContext; c: PAVCodec);
+ cdecl; external av__format;
function av_format_get_metadata_header_padding(s: {const} PAVFormatContext): cint;
cdecl; external av__format;
procedure av_format_set_metadata_header_padding(s: PAVFormatContext; c: cint);
@@ -2340,6 +2316,25 @@ function avformat_seek_file(s: PAVFormatContext; stream_index: cint; min_ts, ts,
cdecl; external av__format;
(**
+ * Discard all internally buffered data. This can be useful when dealing with
+ * discontinuities in the byte stream. Generally works only with formats that
+ * can resync. This includes headerless formats like MPEG-TS/TS but should also
+ * work with NUT, Ogg and in a limited way AVI for example.
+ *
+ * The set of streams, the detected duration, stream parameters and codecs do
+ * not change when calling this function. If you want a complete reset, it's
+ * better to open a new AVFormatContext.
+ *
+ * This does not flush the AVIOContext (s->pb). If necessary, call
+ * avio_flush(s->pb) before calling this function.
+ *
+ * @param s media file handle
+ * @return >=0 on success, error code otherwise
+ *)
+function avformat_flush(s: PAVFormatContext): cint;
+ cdecl; external av__format;
+
+(**
* Start playing a network-based stream (e.g. RTSP stream) at the
* current position.
*)
diff --git a/src/lib/ffmpeg-2.6/avio.pas b/src/lib/ffmpeg-2.6/avio.pas
index c342f8de..db3a84e6 100644
--- a/src/lib/ffmpeg-2.6/avio.pas
+++ b/src/lib/ffmpeg-2.6/avio.pas
@@ -276,6 +276,13 @@ function avio_put_str(s: PAVIOContext; str: {const} PAnsiChar): cint;
function avio_put_str16le(s: PAVIOContext; str: {const} PAnsiChar): cint;
cdecl; external av__format;
+(**
+ * Convert an UTF-8 string to UTF-16BE and write it.
+ * @return number of bytes written.
+ *)
+function avio_put_str16be(s: PAVIOContext; str: {const} PAnsiChar): cint;
+ cdecl; external av__format;
+
const
(**
* Passing this as the "whence" parameter to a seek function causes it to
diff --git a/src/lib/ffmpeg-2.6/ff_api-defines.inc b/src/lib/ffmpeg-2.6/ff_api-defines.inc
index a964caaf..5c9b4bb2 100644
--- a/src/lib/ffmpeg-2.6/ff_api-defines.inc
+++ b/src/lib/ffmpeg-2.6/ff_api-defines.inc
@@ -8,6 +8,9 @@
*)
(** avcodec defines *)
+{$ifndef FF_API_VIMA_DECODER}
+{$define FF_API_VIMA_DECODER := (LIBAVCODEC_VERSION_MAJOR < 57)}
+{$endif}
{$ifndef FF_API_REQUEST_CHANNELS}
{$define FF_API_REQUEST_CHANNELS := (LIBAVCODEC_VERSION_MAJOR < 57)}
{$endif}
@@ -144,6 +147,9 @@
{$ifndef FF_API_MPV_OPT}
{$define FF_API_MPV_OPT := (LIBAVCODEC_VERSION_MAJOR < 59)}
{$endif}
+{$ifndef FF_API_STREAM_CODEC_TAG}
+{$define FF_API_STREAM_CODEC_TAG := (LIBAVCODEC_VERSION_MAJOR < 59)}
+{$endif}
(* avutil defines *)
{$ifndef FF_API_GET_BITS_PER_SAMPLE_FMT}
diff --git a/src/lib/ffmpeg-2.6/libavutil/frame.pas b/src/lib/ffmpeg-2.6/libavutil/frame.pas
index 87eb21ff..f47f2397 100644
--- a/src/lib/ffmpeg-2.6/libavutil/frame.pas
+++ b/src/lib/ffmpeg-2.6/libavutil/frame.pas
@@ -157,7 +157,13 @@ type
* u8 reason for end skip (0=padding silence, 1=convergence)
* @endcode
*)
- AV_FRAME_DATA_SKIP_SAMPLES
+ AV_FRAME_DATA_SKIP_SAMPLES,
+
+ (**
+ * This side data must be associated with an audio frame and corresponds to
+ * enum AVAudioServiceType defined in avcodec.h.
+ *)
+ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
);
TAVActiveFormatDescription = (
@@ -452,7 +458,9 @@ type
(**
* AVBuffer references backing the data for this frame. If all elements of
- * this array are NULL, then this frame is not reference counted.
+ * this array are NULL, then this frame is not reference counted. This array
+ * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must
+ * also be non-NULL for all j < i.
*
* There may be at most one AVBuffer per data plane, so for video this array
* always contains all the references. For planar audio with more than
diff --git a/src/lib/ffmpeg-2.6/libavutil/pixfmt.pas b/src/lib/ffmpeg-2.6/libavutil/pixfmt.pas
index f0539c9a..7709d7f7 100644
--- a/src/lib/ffmpeg-2.6/libavutil/pixfmt.pas
+++ b/src/lib/ffmpeg-2.6/libavutil/pixfmt.pas
@@ -44,11 +44,11 @@ type
* big-endian CPUs.
*
* @par
- * When the pixel format is palettized RGB (AV_PIX_FMT_PAL8), the palettized
+ * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized
* image data is stored in AVFrame.data[0]. The palette is transported in
* AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
* formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
- * also endian-specific). Note also that the individual RGB palette
+ * also endian-specific). Note also that the individual RGB32 palette
* components stored in AVFrame.data[1] should be in the range 0..255.
* This is important as many custom PAL8 video codecs that were designed
* to run on the IBM VGA graphics adapter use 6-bit palette components.
@@ -120,13 +120,13 @@ type
AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
- AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
- AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, most significant bit to 0
AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
- AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
- AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, most significant bit to 1
AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
@@ -143,10 +143,10 @@ type
{$ENDIF}
AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
- AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
- AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
- AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
- AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, most significant bits to 1
AV_PIX_FMT_YA8, ///< 8bit gray, 8bit alpha
(* see const declaration way down
AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
@@ -241,6 +241,19 @@ type
AV_PIX_FMT_YA16BE, ///< 16bit gray, 16bit alpha (big-endian)
AV_PIX_FMT_YA16LE, ///< 16bit gray, 16bit alpha (little-endian)
+ (**
+ * duplicated pixel formats for compatibility with libav.
+ * FFmpeg supports these formats since May 3 2013 (commit e6d4e687558d08187e7a415a7725e4b1a416f782)
+ * Libav added them Jan 14 2015 with incompatible values (commit 0e6c7dfa650e8b0497bfa7a06394b7a462ddc33a)
+ *)
+ AV_PIX_FMT_GBRAP_LIBAV, ///< planar GBRA 4:4:4:4 32bpp
+ AV_PIX_FMT_GBRAP16BE_LIBAV, ///< planar GBRA 4:4:4:4 64bpp, big-endian
+ AV_PIX_FMT_GBRAP16LE_LIBAV, ///< planar GBRA 4:4:4:4 64bpp, little-endian
+ (**
+ * HW acceleration through QSV, data[3] contains a pointer to the
+ * mfxFrameSurface1 structure.
+ *)
+ AV_PIX_FMT_QSV,
{$IFNDEF AV_PIX_FMT_ABI_GIT_MASTER}
AV_PIX_FMT_RGBA64BE = $123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
@@ -248,10 +261,10 @@ type
AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
{$ENDIF}
- AV_PIX_FMT_0RGB = $123 + 4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
- AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
- AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
- AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
+ AV_PIX_FMT_0RGB = $123 + 4,///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
+ AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
+ AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
+ AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
@@ -296,12 +309,15 @@ type
{$IFDEF AV_HAVE_INCOMPATIBLE_LIBAV_ABI}
const
- AV_PIX_FMT_YUVA422P = AV_PIX_FMT_YUVA422P_LIBAV;
- AV_PIX_FMT_YUVA444P = AV_PIX_FMT_YUVA444P_LIBAV;
- AV_PIX_FMT_RGBA64BE = AV_PIX_FMT_RGBA64BE_LIBAV;
- AV_PIX_FMT_RGBA64LE = AV_PIX_FMT_RGBA64LE_LIBAV;
- AV_PIX_FMT_BGRA64BE = AV_PIX_FMT_BGRA64BE_LIBAV;
- AV_PIX_FMT_BGRA64LE = AV_PIX_FMT_BGRA64LE_LIBAV;
+ AV_PIX_FMT_YUVA422P = AV_PIX_FMT_YUVA422P_LIBAV;
+ AV_PIX_FMT_YUVA444P = AV_PIX_FMT_YUVA444P_LIBAV;
+ AV_PIX_FMT_RGBA64BE = AV_PIX_FMT_RGBA64BE_LIBAV;
+ AV_PIX_FMT_RGBA64LE = AV_PIX_FMT_RGBA64LE_LIBAV;
+ AV_PIX_FMT_BGRA64BE = AV_PIX_FMT_BGRA64BE_LIBAV;
+ AV_PIX_FMT_BGRA64LE = AV_PIX_FMT_BGRA64LE_LIBAV;
+ AV_PIX_FMT_GBRAP = AV_PIX_FMT_GBRAP_LIBAV;
+ AV_PIX_FMT_GBRAP16BE = AV_PIX_FMT_GBRAP16BE_LIBAV;
+ AV_PIX_FMT_GBRAP16LE = AV_PIX_FMT_GBRAP16LE_LIBAV;
{$ENDIF}
const