39 #define VP9_SYNCCODE 0x498342 67 for (i = 0; i < n; i++)
76 static void vp9_report_tile_progress(
VP9Context *
s,
int field,
int n) {
83 static void vp9_await_tile_progress(
VP9Context *s,
int field,
int n) {
191 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \ 192 CONFIG_VP9_D3D11VA_HWACCEL * 2 + \ 193 CONFIG_VP9_NVDEC_HWACCEL + \ 194 CONFIG_VP9_VAAPI_HWACCEL + \ 195 CONFIG_VP9_VDPAU_HWACCEL) 204 if (!(s->pix_fmt == s->
gf_fmt && w == s->
w && h == s->
h)) {
208 switch (s->pix_fmt) {
210 #if CONFIG_VP9_VDPAU_HWACCEL 214 #if CONFIG_VP9_DXVA2_HWACCEL 217 #if CONFIG_VP9_D3D11VA_HWACCEL 221 #if CONFIG_VP9_NVDEC_HWACCEL 224 #if CONFIG_VP9_VAAPI_HWACCEL 229 #if CONFIG_VP9_NVDEC_HWACCEL 232 #if CONFIG_VP9_VAAPI_HWACCEL 238 *fmtp++ = s->pix_fmt;
257 s->last_fmt = s->pix_fmt;
260 s->
cols = (w + 7) >> 3;
261 s->
rows = (h + 7) >> 3;
264 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var) 309 int chroma_blocks, chroma_eobs, bytesperpixel = s->
bytesperpixel;
316 chroma_blocks = 64 * 64 >> (s->
ss_h + s->
ss_v);
317 chroma_eobs = 16 * 16 >> (s->
ss_h + s->
ss_v);
323 16 * 16 + 2 * chroma_eobs) * sbs);
344 16 * 16 + 2 * chroma_eobs);
377 return m - ((v + 1) >> 1);
384 static const uint8_t inv_map_table[255] = {
385 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
386 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
387 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
388 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
389 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
390 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
391 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
392 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
393 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
394 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
395 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
396 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
397 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
398 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
399 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
400 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
401 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
402 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
450 s->
s.
h.
bpp = 8 + bits * 2;
459 s->pix_fmt = pix_fmt_rgb[
bits];
471 static const enum AVPixelFormat pix_fmt_for_ss[3][2 ][2 ] = {
495 s->pix_fmt = pix_fmt_for_ss[
bits][1][1];
506 int c,
i, j, k, l, m, n,
w,
h,
max, size2, ret, sharp;
656 for (i = 1; i <= 63; i++) {
660 limit >>= (sharp + 3) >> 2;
661 limit =
FFMIN(limit, 9 - sharp);
663 limit =
FFMAX(limit, 1);
672 for (i = 0; i < 4; i++)
675 for (i = 0; i < 2; i++)
694 for (i = 0; i < 7; i++)
698 for (i = 0; i < 3; i++)
705 for (i = 0; i < 8; i++) {
719 int qyac, qydc, quvac, quvdc, lflvl, sh;
732 qyac = av_clip_uintp2(qyac, 8);
751 av_clip_uintp2(lflvl + (s->
s.
h.
lf_delta.
ref[0] * (1 << sh)), 6);
752 for (j = 1; j < 4; j++) {
775 for (max = 0; (s->
sb_cols >>
max) >= 4; max++) ;
776 max =
FFMAX(0, max - 1);
813 rc += n_range_coders;
819 int valid_ref_frame = 0;
820 for (i = 0; i < 3; i++) {
826 "Ref pixfmt (%s) did not match current frame (%s)",
830 }
else if (refw == w && refh == h) {
835 if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
837 "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
842 s->
mvscale[
i][0] = (refw << 14) / w;
843 s->
mvscale[
i][1] = (refh << 14) / h;
849 if (!valid_ref_frame) {
850 av_log(avctx,
AV_LOG_ERROR,
"No valid reference frame is found, bitstream not supported\n");
877 if (size2 > size - (data2 - data)) {
914 for (i = 0; i < 2; i++)
917 for (i = 0; i < 2; i++)
918 for (j = 0; j < 2; j++)
922 for (i = 0; i < 2; i++)
923 for (j = 0; j < 3; j++)
931 for (i = 0; i < 4; i++) {
934 for (j = 0; j < 2; j++)
935 for (k = 0; k < 2; k++)
936 for (l = 0; l < 6; l++)
937 for (m = 0; m < 6; m++) {
940 if (m >= 3 && l == 0)
942 for (n = 0; n < 3; n++) {
951 for (j = 0; j < 2; j++)
952 for (k = 0; k < 2; k++)
953 for (l = 0; l < 6; l++)
954 for (m = 0; m < 6; m++) {
968 for (i = 0; i < 3; i++)
972 for (i = 0; i < 7; i++)
973 for (j = 0; j < 3; j++)
979 for (i = 0; i < 4; i++)
980 for (j = 0; j < 2; j++)
985 for (i = 0; i < 4; i++)
994 for (i = 0; i < 5; i++)
1003 for (i = 0; i < 5; i++) {
1014 for (i = 0; i < 5; i++)
1020 for (i = 0; i < 4; i++)
1021 for (j = 0; j < 9; j++)
1026 for (i = 0; i < 4; i++)
1027 for (j = 0; j < 4; j++)
1028 for (k = 0; k < 3; k++)
1035 for (i = 0; i < 3; i++)
1039 for (i = 0; i < 2; i++) {
1044 for (j = 0; j < 10; j++)
1053 for (j = 0; j < 10; j++)
1059 for (i = 0; i < 2; i++) {
1060 for (j = 0; j < 2; j++)
1061 for (k = 0; k < 3; k++)
1066 for (j = 0; j < 3; j++)
1073 for (i = 0; i < 2; i++) {
1085 return (data2 - data) + size2;
1089 ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl)
1097 ptrdiff_t hbs = 4 >> bl;
1105 }
else if (col + hbs < s->cols) {
1106 if (row + hbs < s->rows) {
1114 yoff += hbs * 8 * y_stride;
1115 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1120 yoff += hbs * 8 * bytesperpixel;
1121 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1125 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1127 yoff + 8 * hbs * bytesperpixel,
1128 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1129 yoff += hbs * 8 * y_stride;
1130 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1131 decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1132 decode_sb(td, row + hbs, col + hbs, lflvl,
1133 yoff + 8 * hbs * bytesperpixel,
1134 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1141 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1143 yoff + 8 * hbs * bytesperpixel,
1144 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1149 }
else if (row + hbs < s->rows) {
1152 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1153 yoff += hbs * 8 * y_stride;
1154 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1155 decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1162 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1168 ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl)
1172 ptrdiff_t hbs = 4 >> bl;
1180 }
else if (td->
b->
bl == bl) {
1183 yoff += hbs * 8 * y_stride;
1184 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1187 yoff += hbs * 8 * bytesperpixel;
1188 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1193 if (col + hbs < s->cols) {
1194 if (row + hbs < s->rows) {
1195 decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1196 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1197 yoff += hbs * 8 * y_stride;
1198 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1199 decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1201 yoff + 8 * hbs * bytesperpixel,
1202 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1204 yoff += hbs * 8 * bytesperpixel;
1205 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1206 decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1208 }
else if (row + hbs < s->rows) {
1209 yoff += hbs * 8 * y_stride;
1210 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1211 decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1218 int sb_start = ( idx * n) >> log2_n;
1219 int sb_end = ((idx + 1) * n) >> log2_n;
1220 *start =
FFMIN(sb_start, n) << 3;
1221 *end =
FFMIN(sb_end, n) << 3;
1238 for (i = 0; i < 3; i++) {
1243 for (i = 0; i < 8; i++) {
1261 int row, col, tile_row, tile_col, ret;
1263 int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1265 ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1288 if (tile_size > size) {
1303 for (row = tile_row_start; row < tile_row_end;
1304 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->
ss_v) {
1306 ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1324 td->
c = &td->
c_b[tile_col];
1327 for (col = tile_col_start;
1329 col += 8, yoff2 += 64 * bytesperpixel,
1330 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1334 memset(lflvl_ptr->
mask, 0,
sizeof(lflvl_ptr->
mask));
1355 if (row + 8 < s->
rows) {
1357 f->
data[0] + yoff + 63 * ls_y,
1358 8 * s->
cols * bytesperpixel);
1360 f->
data[1] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1361 8 * s->
cols * bytesperpixel >> s->
ss_h);
1363 f->
data[2] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1364 8 * s->
cols * bytesperpixel >> s->
ss_h);
1371 lflvl_ptr = s->
lflvl;
1372 for (col = 0; col < s->
cols;
1373 col += 8, yoff2 += 64 * bytesperpixel,
1374 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1391 int decode_tiles_mt(
AVCodecContext *avctx,
void *tdata,
int jobnr,
1396 ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1398 unsigned tile_cols_len;
1399 int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1410 uvoff = (64 * bytesperpixel >> s->
ss_h)*(tile_col_start >> 3);
1411 yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1412 lflvl_ptr_base = s->
lflvl+(tile_col_start >> 3);
1418 td->
c = &td->
c_b[tile_row];
1419 for (row = tile_row_start; row < tile_row_end;
1420 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->
ss_v) {
1421 ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1435 for (col = tile_col_start;
1437 col += 8, yoff2 += 64 * bytesperpixel,
1438 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1441 memset(lflvl_ptr->
mask, 0,
sizeof(lflvl_ptr->
mask));
1448 tile_cols_len = tile_col_end - tile_col_start;
1449 if (row + 8 < s->
rows) {
1451 f->
data[0] + yoff + 63 * ls_y,
1452 8 * tile_cols_len * bytesperpixel);
1454 f->
data[1] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1455 8 * tile_cols_len * bytesperpixel >> s->
ss_h);
1457 f->
data[2] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1458 8 * tile_cols_len * bytesperpixel >> s->
ss_h);
1461 vp9_report_tile_progress(s, row >> 3, 1);
1471 ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1480 for (i = 0; i < s->
sb_rows; i++) {
1484 yoff = (ls_y * 64)*i;
1485 uvoff = (ls_uv * 64 >> s->
ss_v)*i;
1487 for (col = 0; col < s->
cols;
1488 col += 8, yoff += 64 * bytesperpixel,
1489 uvoff += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1502 unsigned int tile, nb_blocks = 0;
1522 unsigned int block = 0;
1523 unsigned int tile, block_tile;
1564 }
else if (ret == 0) {
1578 for (i = 0; i < 8; i++) {
1618 for (i = 0; i < 8; i++) {
1659 "Failed to allocate block buffers\n");
1665 for (i = 0; i < 4; i++) {
1666 for (j = 0; j < 2; j++)
1667 for (k = 0; k < 2; k++)
1668 for (l = 0; l < 6; l++)
1669 for (m = 0; m < 6; m++)
1683 for (i = 0; i < s->
sb_rows; i++)
1702 int tile_row, tile_col;
1718 if (tile_size > size)
1744 for (j = 0; j <
sizeof(s->
td[
i].
counts) /
sizeof(
unsigned); j++)
1751 }
while (s->
pass++ == 1);
1767 for (i = 0; i < 8; i++) {
1789 for (i = 0; i < 3; i++)
1791 for (i = 0; i < 8; i++)
1800 for (i = 0; i < 3; i++) {
1808 for (i = 0; i < 8; i++) {
1837 for (i = 0; i < 3; i++) {
1840 if (ssrc->s.frames[i].tf.f->buf[0]) {
1845 for (i = 0; i < 8; i++) {
1848 if (ssrc->next_refs[i].f->buf[0]) {
1857 s->
ss_v = ssrc->ss_v;
1858 s->
ss_h = ssrc->ss_h;
1863 s->
gf_fmt = ssrc->gf_fmt;
1866 s->
s.
h.
bpp = ssrc->s.h.bpp;
1868 s->pix_fmt = ssrc->pix_fmt;
1893 .bsfs =
"vp9_superframe_split",
1895 #if CONFIG_VP9_DXVA2_HWACCEL 1898 #if CONFIG_VP9_D3D11VA_HWACCEL 1901 #if CONFIG_VP9_D3D11VA2_HWACCEL 1904 #if CONFIG_VP9_NVDEC_HWACCEL 1907 #if CONFIG_VP9_VAAPI_HWACCEL 1910 #if CONFIG_VP9_VDPAU_HWACCEL also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
uint8_t left_uv_nnz_ctx[2][16]
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
uint8_t * segmentation_map
#define AV_PIX_FMT_YUV440P10
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
static void vp9_decode_flush(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
#define pthread_mutex_lock(a)
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
struct ProbContext::@175 mv_comp[2]
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
#define atomic_store(object, desired)
static void flush(AVCodecContext *avctx)
VP5 and VP6 compatible video decoder (common features)
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
int32_t qp
Base quantisation parameter for the frame.
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
struct VP9TileData::@181 * block_structure
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
static av_cold int init(AVCodecContext *avctx)
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
int32_t delta_qp
Difference between this block's final quantization parameter and the corresponding per-frame value...
static av_cold int vp9_decode_init(AVCodecContext *avctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
#define HWACCEL_NVDEC(codec)
#define AV_PIX_FMT_GBRP10
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int src_x
Distance in luma pixels from the top-left corner of the visible frame to the top-left corner of the b...
#define AV_PIX_FMT_YUV420P12
const int16_t ff_vp9_dc_qlookup[3][256]
unsigned coef[4][2][2][6][6][3]
uint8_t left_segpred_ctx[8]
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
const int16_t ff_vp9_ac_qlookup[3][256]
uint8_t left_mode_ctx[16]
functionally identical to above
uint8_t * intra_pred_data[3]
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
uint8_t coef[4][2][2][6][6][3]
unsigned partition[4][4][4]
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame...
const int8_t ff_vp9_partition_tree[3][2]
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static void vp9_free_entries(AVCodecContext *avctx)
#define HWACCEL_VDPAU(codec)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
AVColorSpace
YUV colorspace type.
void ff_vp9_adapt_probs(VP9Context *s)
static void free_buffers(VP9Context *s)
Data structure for storing block-level encoding information.
static av_cold int end(AVCodecContext *avctx)
Multithreading support functions.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define FF_CODEC_PROPERTY_LOSSLESS
static int update_size(AVCodecContext *avctx, int w, int h)
AVBufferRef * hwaccel_priv_buf
static enum AVPixelFormat pix_fmt_rgb[3]
static int get_bits_count(const GetBitContext *s)
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
uint8_t left_partition_ctx[8]
bitstream reader API header.
static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
uint8_t * above_uv_nnz_ctx[2]
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
#define AV_PIX_FMT_YUV422P12
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
#define HWACCEL_D3D11VA(codec)
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
static av_cold int vp9_decode_free(AVCodecContext *avctx)
uint8_t partition[4][4][3]
#define i(width, name, range_min, range_max)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
int frame_extradata_pool_size
int flags
AV_CODEC_FLAG_*.
void * hwaccel_picture_private
simple assert() macros that are a bit more flexible than ISO C assert().
struct VP9Context::@176 filter_lut
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
uint8_t * above_filter_ctx
const uint8_t ff_vp9_model_pareto8[256][8]
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
uint8_t left_y_nnz_ctx[16]
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define FF_THREAD_FRAME
Decode more than one frame at once.
unsigned int block_size_idx_x
Video encoding parameters for a given frame.
int w
Width and height of the block in luma pixels.
ITU-R BT2020 non-constant luminance system.
struct VP9Context::@178 prob
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define atomic_load_explicit(object, order)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
uint8_t * above_partition_ctx
#define pthread_mutex_unlock(a)
HW acceleration through CUDA.
#define HWACCEL_DXVA2(codec)
uint8_t * above_segpred_ctx
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define atomic_fetch_add_explicit(object, operand, order)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
static int init_frames(AVCodecContext *avctx)
VP56mv(* above_mv_ctx)[2]
Libavcodec external API header.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
uint8_t * data
The data buffer.
static int update_block_buffers(AVCodecContext *avctx)
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static unsigned int get_bits1(GetBitContext *s)
#define AV_PIX_FMT_YUV420P10
uint8_t * above_y_nnz_ctx
int16_t * uvblock_base[2]
static void skip_bits(GetBitContext *s, int n)
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
enum AVColorSpace colorspace
YUV colorspace type.
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
#define HWACCEL_D3D11VA2(codec)
static void vp9_tile_data_free(VP9TileData *td)
#define AV_PIX_FMT_YUV440P12
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
static int read_colorspace_details(AVCodecContext *avctx)
uint8_t * above_intra_ctx
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
unsigned int nb_block_structure
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
static enum AVPixelFormat pix_fmts[]
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
int size
Size of data in bytes.
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
unsigned eob[4][2][2][6][6][2]
Hardware surfaces for Direct3D11.
the normal 219*2^(n-8) "MPEG" YUV ranges
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
static av_always_inline int inv_recenter_nonneg(int v, int m)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
static int ref[MAX_W *MAX_W]
#define assign(var, type, n)
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
enum AVPixelFormat pix_fmt last_fmt gf_fmt
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
unsigned properties
Properties of the stream that gets decoded.
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Core video DSP helper functions.
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
#define FF_ENABLE_DEPRECATION_WARNINGS
#define HWACCEL_VAAPI(codec)
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
static int decode012(GetBitContext *gb)
int key_frame
1 -> keyframe, 0-> not
static const uint8_t * align_get_bits(GetBitContext *s)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
#define atomic_init(obj, value)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
#define REF_INVALID_SCALE
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
static int update_prob(VP56RangeCoder *c, int p)
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
#define av_malloc_array(a, b)
const ProbContext ff_vp9_default_probs
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
const AVProfile ff_vp9_profiles[]
struct VP9Context::@177 prob_ctx[4]
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
unsigned int block_size_idx_y
AVPixelFormat
Pixel format.
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int block_alloc_using_2pass
AVBufferPool * frame_extradata_pool
struct VP9TileData::@179 counts
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().