FFmpeg  4.3.6
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "libavutil/crc.h"
47 
48 #include "internal.h"
49 #include "avcodec.h"
50 #include "mpegutils.h"
51 #include "h264dec.h"
52 #include "h264data.h"
53 #include "golomb.h"
54 #include "hpeldsp.h"
55 #include "mathops.h"
56 #include "rectangle.h"
57 #include "tpeldsp.h"
58 
59 #if CONFIG_ZLIB
60 #include <zlib.h>
61 #endif
62 
63 #include "svq1.h"
64 
65 /**
66  * @file
67  * svq3 decoder.
68  */
69 
70 typedef struct SVQ3Frame {
72 
74  int16_t (*motion_val[2])[2];
75 
77  uint32_t *mb_type;
78 
79 
81  int8_t *ref_index[2];
82 } SVQ3Frame;
83 
84 typedef struct SVQ3Context {
86 
92 
103  uint32_t watermark_key;
105  int buf_size;
112  int qscale;
113  int cbp;
118 
119  enum AVPictureType pict_type;
120  enum AVPictureType slice_type;
122 
123  int mb_x, mb_y;
124  int mb_xy;
125  int mb_width, mb_height;
126  int mb_stride, mb_num;
127  int b_stride;
128 
129  uint32_t *mb2br_xy;
130 
133 
134  int8_t intra4x4_pred_mode_cache[5 * 8];
135  int8_t (*intra4x4_pred_mode);
136 
137  unsigned int top_samples_available;
140 
142 
143  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
144  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
145  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
146  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
147  DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
148  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
149  int block_offset[2 * (16 * 3)];
151 } SVQ3Context;
152 
153 #define FULLPEL_MODE 1
154 #define HALFPEL_MODE 2
155 #define THIRDPEL_MODE 3
156 #define PREDICT_MODE 4
157 
158 /* dual scan (from some older H.264 draft)
159  * o-->o-->o o
160  * | /|
161  * o o o / o
162  * | / | |/ |
163  * o o o o
164  * /
165  * o-->o-->o-->o
166  */
167 static const uint8_t svq3_scan[16] = {
168  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
169  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
170  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
171  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
172 };
173 
174 static const uint8_t luma_dc_zigzag_scan[16] = {
175  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
176  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
177  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
178  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
179 };
180 
181 static const uint8_t svq3_pred_0[25][2] = {
182  { 0, 0 },
183  { 1, 0 }, { 0, 1 },
184  { 0, 2 }, { 1, 1 }, { 2, 0 },
185  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
186  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
187  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
188  { 2, 4 }, { 3, 3 }, { 4, 2 },
189  { 4, 3 }, { 3, 4 },
190  { 4, 4 }
191 };
192 
193 static const int8_t svq3_pred_1[6][6][5] = {
194  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
195  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
196  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
197  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
198  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
199  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
200  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
201  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
202  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
203  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
204  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
205  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
206 };
207 
208 static const struct {
211 } svq3_dct_tables[2][16] = {
212  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
213  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
214  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
215  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
216 };
217 
218 static const uint32_t svq3_dequant_coeff[32] = {
219  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
220  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
221  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
222  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
223 };
224 
225 static int svq3_decode_end(AVCodecContext *avctx);
226 
227 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
228 {
229  const unsigned qmul = svq3_dequant_coeff[qp];
230 #define stride 16
231  int i;
232  int temp[16];
233  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
234 
235  for (i = 0; i < 4; i++) {
236  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
237  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
238  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
239  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
240 
241  temp[4 * i + 0] = z0 + z3;
242  temp[4 * i + 1] = z1 + z2;
243  temp[4 * i + 2] = z1 - z2;
244  temp[4 * i + 3] = z0 - z3;
245  }
246 
247  for (i = 0; i < 4; i++) {
248  const int offset = x_offset[i];
249  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
250  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
251  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
252  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
253 
254  output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
255  output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
256  output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
257  output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
258  }
259 }
260 #undef stride
261 
262 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
263  int stride, int qp, int dc)
264 {
265  const int qmul = svq3_dequant_coeff[qp];
266  int i;
267 
268  if (dc) {
269  dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
270  : qmul * (block[0] >> 3) / 2);
271  block[0] = 0;
272  }
273 
274  for (i = 0; i < 4; i++) {
275  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
276  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
277  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
278  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
279 
280  block[0 + 4 * i] = z0 + z3;
281  block[1 + 4 * i] = z1 + z2;
282  block[2 + 4 * i] = z1 - z2;
283  block[3 + 4 * i] = z0 - z3;
284  }
285 
286  for (i = 0; i < 4; i++) {
287  const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
288  const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
289  const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
290  const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
291  const int rr = (dc + 0x80000u);
292 
293  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
294  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
295  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
296  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
297  }
298 
299  memset(block, 0, 16 * sizeof(int16_t));
300 }
301 
302 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
303  int index, const int type)
304 {
305  static const uint8_t *const scan_patterns[4] = {
307  };
308 
309  int run, level, sign, limit;
310  unsigned vlc;
311  const int intra = 3 * type >> 2;
312  const uint8_t *const scan = scan_patterns[type];
313 
314  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
315  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
316  if ((int32_t)vlc < 0)
317  return -1;
318 
319  sign = (vlc & 1) ? 0 : -1;
320  vlc = vlc + 1 >> 1;
321 
322  if (type == 3) {
323  if (vlc < 3) {
324  run = 0;
325  level = vlc;
326  } else if (vlc < 4) {
327  run = 1;
328  level = 1;
329  } else {
330  run = vlc & 0x3;
331  level = (vlc + 9 >> 2) - run;
332  }
333  } else {
334  if (vlc < 16U) {
335  run = svq3_dct_tables[intra][vlc].run;
336  level = svq3_dct_tables[intra][vlc].level;
337  } else if (intra) {
338  run = vlc & 0x7;
339  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
340  } else {
341  run = vlc & 0xF;
342  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
343  }
344  }
345 
346 
347  if ((index += run) >= limit)
348  return -1;
349 
350  block[scan[index]] = (level ^ sign) - sign;
351  }
352 
353  if (type != 2) {
354  break;
355  }
356  }
357 
358  return 0;
359 }
360 
361 static av_always_inline int
362 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
363  int i, int list, int part_width)
364 {
365  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
366 
367  if (topright_ref != PART_NOT_AVAILABLE) {
368  *C = s->mv_cache[list][i - 8 + part_width];
369  return topright_ref;
370  } else {
371  *C = s->mv_cache[list][i - 8 - 1];
372  return s->ref_cache[list][i - 8 - 1];
373  }
374 }
375 
376 /**
377  * Get the predicted MV.
378  * @param n the block index
379  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
380  * @param mx the x component of the predicted motion vector
381  * @param my the y component of the predicted motion vector
382  */
383 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
384  int part_width, int list,
385  int ref, int *const mx, int *const my)
386 {
387  const int index8 = scan8[n];
388  const int top_ref = s->ref_cache[list][index8 - 8];
389  const int left_ref = s->ref_cache[list][index8 - 1];
390  const int16_t *const A = s->mv_cache[list][index8 - 1];
391  const int16_t *const B = s->mv_cache[list][index8 - 8];
392  const int16_t *C;
393  int diagonal_ref, match_count;
394 
395 /* mv_cache
396  * B . . A T T T T
397  * U . . L . . , .
398  * U . . L . . . .
399  * U . . L . . , .
400  * . . . L . . . .
401  */
402 
403  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
404  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
405  if (match_count > 1) { //most common
406  *mx = mid_pred(A[0], B[0], C[0]);
407  *my = mid_pred(A[1], B[1], C[1]);
408  } else if (match_count == 1) {
409  if (left_ref == ref) {
410  *mx = A[0];
411  *my = A[1];
412  } else if (top_ref == ref) {
413  *mx = B[0];
414  *my = B[1];
415  } else {
416  *mx = C[0];
417  *my = C[1];
418  }
419  } else {
420  if (top_ref == PART_NOT_AVAILABLE &&
421  diagonal_ref == PART_NOT_AVAILABLE &&
422  left_ref != PART_NOT_AVAILABLE) {
423  *mx = A[0];
424  *my = A[1];
425  } else {
426  *mx = mid_pred(A[0], B[0], C[0]);
427  *my = mid_pred(A[1], B[1], C[1]);
428  }
429  }
430 }
431 
432 static inline void svq3_mc_dir_part(SVQ3Context *s,
433  int x, int y, int width, int height,
434  int mx, int my, int dxy,
435  int thirdpel, int dir, int avg)
436 {
437  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
438  uint8_t *src, *dest;
439  int i, emu = 0;
440  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
441  int linesize = s->cur_pic->f->linesize[0];
442  int uvlinesize = s->cur_pic->f->linesize[1];
443 
444  mx += x;
445  my += y;
446 
447  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
448  my < 0 || my >= s->v_edge_pos - height - 1) {
449  emu = 1;
450  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
451  my = av_clip(my, -16, s->v_edge_pos - height + 15);
452  }
453 
454  /* form component predictions */
455  dest = s->cur_pic->f->data[0] + x + y * linesize;
456  src = pic->f->data[0] + mx + my * linesize;
457 
458  if (emu) {
460  linesize, linesize,
461  width + 1, height + 1,
462  mx, my, s->h_edge_pos, s->v_edge_pos);
463  src = s->edge_emu_buffer;
464  }
465  if (thirdpel)
466  (avg ? s->tdsp.avg_tpel_pixels_tab
467  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
468  width, height);
469  else
470  (avg ? s->hdsp.avg_pixels_tab
471  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
472  height);
473 
474  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
475  mx = mx + (mx < (int) x) >> 1;
476  my = my + (my < (int) y) >> 1;
477  width = width >> 1;
478  height = height >> 1;
479  blocksize++;
480 
481  for (i = 1; i < 3; i++) {
482  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
483  src = pic->f->data[i] + mx + my * uvlinesize;
484 
485  if (emu) {
487  uvlinesize, uvlinesize,
488  width + 1, height + 1,
489  mx, my, (s->h_edge_pos >> 1),
490  s->v_edge_pos >> 1);
491  src = s->edge_emu_buffer;
492  }
493  if (thirdpel)
494  (avg ? s->tdsp.avg_tpel_pixels_tab
495  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
496  uvlinesize,
497  width, height);
498  else
499  (avg ? s->hdsp.avg_pixels_tab
500  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
501  uvlinesize,
502  height);
503  }
504  }
505 }
506 
507 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
508  int dir, int avg)
509 {
510  int i, j, k, mx, my, dx, dy, x, y;
511  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
512  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
513  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
514  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
515  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
516 
517  for (i = 0; i < 16; i += part_height)
518  for (j = 0; j < 16; j += part_width) {
519  const int b_xy = (4 * s->mb_x + (j >> 2)) +
520  (4 * s->mb_y + (i >> 2)) * s->b_stride;
521  int dxy;
522  x = 16 * s->mb_x + j;
523  y = 16 * s->mb_y + i;
524  k = (j >> 2 & 1) + (i >> 1 & 2) +
525  (j >> 1 & 4) + (i & 8);
526 
527  if (mode != PREDICT_MODE) {
528  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
529  } else {
530  mx = s->next_pic->motion_val[0][b_xy][0] * 2;
531  my = s->next_pic->motion_val[0][b_xy][1] * 2;
532 
533  if (dir == 0) {
534  mx = mx * s->frame_num_offset /
535  s->prev_frame_num_offset + 1 >> 1;
536  my = my * s->frame_num_offset /
537  s->prev_frame_num_offset + 1 >> 1;
538  } else {
539  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
540  s->prev_frame_num_offset + 1 >> 1;
541  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
542  s->prev_frame_num_offset + 1 >> 1;
543  }
544  }
545 
546  /* clip motion vector prediction to frame border */
547  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
548  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
549 
550  /* get (optional) motion vector differential */
551  if (mode == PREDICT_MODE) {
552  dx = dy = 0;
553  } else {
556 
557  if (dx != (int16_t)dx || dy != (int16_t)dy) {
558  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
559  return -1;
560  }
561  }
562 
563  /* compute motion vector */
564  if (mode == THIRDPEL_MODE) {
565  int fx, fy;
566  mx = (mx + 1 >> 1) + dx;
567  my = (my + 1 >> 1) + dy;
568  fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
569  fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
570  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
571 
572  svq3_mc_dir_part(s, x, y, part_width, part_height,
573  fx, fy, dxy, 1, dir, avg);
574  mx += mx;
575  my += my;
576  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
577  mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
578  my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
579  dxy = (mx & 1) + 2 * (my & 1);
580 
581  svq3_mc_dir_part(s, x, y, part_width, part_height,
582  mx >> 1, my >> 1, dxy, 0, dir, avg);
583  mx *= 3;
584  my *= 3;
585  } else {
586  mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
587  my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
588 
589  svq3_mc_dir_part(s, x, y, part_width, part_height,
590  mx, my, 0, 0, dir, avg);
591  mx *= 6;
592  my *= 6;
593  }
594 
595  /* update mv_cache */
596  if (mode != PREDICT_MODE) {
597  int32_t mv = pack16to32(mx, my);
598 
599  if (part_height == 8 && i < 8) {
600  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
601 
602  if (part_width == 8 && j < 8)
603  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
604  }
605  if (part_width == 8 && j < 8)
606  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
607  if (part_width == 4 || part_height == 4)
608  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
609  }
610 
611  /* write back motion vectors */
612  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
613  part_width >> 2, part_height >> 2, s->b_stride,
614  pack16to32(mx, my), 4);
615  }
616 
617  return 0;
618 }
619 
621  int mb_type, const int *block_offset,
622  int linesize, uint8_t *dest_y)
623 {
624  int i;
625  if (!IS_INTRA4x4(mb_type)) {
626  for (i = 0; i < 16; i++)
627  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
628  uint8_t *const ptr = dest_y + block_offset[i];
629  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
630  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
631  }
632  }
633 }
634 
636  int mb_type,
637  const int *block_offset,
638  int linesize,
639  uint8_t *dest_y)
640 {
641  int i;
642  int qscale = s->qscale;
643 
644  if (IS_INTRA4x4(mb_type)) {
645  for (i = 0; i < 16; i++) {
646  uint8_t *const ptr = dest_y + block_offset[i];
647  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
648 
649  uint8_t *topright;
650  int nnz, tr;
651  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
652  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
653  av_assert2(s->mb_y || linesize <= block_offset[i]);
654  if (!topright_avail) {
655  tr = ptr[3 - linesize] * 0x01010101u;
656  topright = (uint8_t *)&tr;
657  } else
658  topright = ptr + 4 - linesize;
659  } else
660  topright = NULL;
661 
662  s->hpc.pred4x4[dir](ptr, topright, linesize);
663  nnz = s->non_zero_count_cache[scan8[i]];
664  if (nnz) {
665  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
666  }
667  }
668  } else {
669  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
670  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
671  }
672 }
673 
675 {
676  const int mb_x = s->mb_x;
677  const int mb_y = s->mb_y;
678  const int mb_xy = s->mb_xy;
679  const int mb_type = s->cur_pic->mb_type[mb_xy];
680  uint8_t *dest_y, *dest_cb, *dest_cr;
681  int linesize, uvlinesize;
682  int i, j;
683  const int *block_offset = &s->block_offset[0];
684  const int block_h = 16 >> 1;
685 
686  linesize = s->cur_pic->f->linesize[0];
687  uvlinesize = s->cur_pic->f->linesize[1];
688 
689  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
690  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
691  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
692 
693  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
694  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
695 
696  if (IS_INTRA(mb_type)) {
697  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
698  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
699 
700  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
701  }
702 
703  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
704 
705  if (s->cbp & 0x30) {
706  uint8_t *dest[2] = { dest_cb, dest_cr };
707  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
708  s->dequant4_coeff[4][0]);
709  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
710  s->dequant4_coeff[4][0]);
711  for (j = 1; j < 3; j++) {
712  for (i = j * 16; i < j * 16 + 4; i++)
713  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
714  uint8_t *const ptr = dest[j - 1] + block_offset[i];
715  svq3_add_idct_c(ptr, s->mb + i * 16,
716  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
717  }
718  }
719  }
720 }
721 
722 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
723 {
724  int i, j, k, m, dir, mode;
725  int cbp = 0;
726  uint32_t vlc;
727  int8_t *top, *left;
728  const int mb_xy = s->mb_xy;
729  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
730 
731  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
732  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
733  s->topright_samples_available = 0xFFFF;
734 
735  if (mb_type == 0) { /* SKIP */
736  if (s->pict_type == AV_PICTURE_TYPE_P ||
737  s->next_pic->mb_type[mb_xy] == -1) {
738  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
739  0, 0, 0, 0, 0, 0);
740 
741  if (s->pict_type == AV_PICTURE_TYPE_B)
742  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
743  0, 0, 0, 0, 1, 1);
744 
745  mb_type = MB_TYPE_SKIP;
746  } else {
747  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
748  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
749  return -1;
750  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
751  return -1;
752 
753  mb_type = MB_TYPE_16x16;
754  }
755  } else if (mb_type < 8) { /* INTER */
756  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
757  mode = THIRDPEL_MODE;
758  else if (s->halfpel_flag &&
759  s->thirdpel_flag == !get_bits1(&s->gb_slice))
760  mode = HALFPEL_MODE;
761  else
762  mode = FULLPEL_MODE;
763 
764  /* fill caches */
765  /* note ref_cache should contain here:
766  * ????????
767  * ???11111
768  * N??11111
769  * N??11111
770  * N??11111
771  */
772 
773  for (m = 0; m < 2; m++) {
774  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
775  for (i = 0; i < 4; i++)
776  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
777  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
778  } else {
779  for (i = 0; i < 4; i++)
780  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
781  }
782  if (s->mb_y > 0) {
783  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
784  s->cur_pic->motion_val[m][b_xy - s->b_stride],
785  4 * 2 * sizeof(int16_t));
786  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
787  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
788 
789  if (s->mb_x < s->mb_width - 1) {
790  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
791  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
792  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
793  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
794  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
795  } else
796  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
797  if (s->mb_x > 0) {
798  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
799  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
800  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
801  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
802  } else
803  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
804  } else
805  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
806  PART_NOT_AVAILABLE, 8);
807 
808  if (s->pict_type != AV_PICTURE_TYPE_B)
809  break;
810  }
811 
812  /* decode motion vector(s) and form prediction(s) */
813  if (s->pict_type == AV_PICTURE_TYPE_P) {
814  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
815  return -1;
816  } else { /* AV_PICTURE_TYPE_B */
817  if (mb_type != 2) {
818  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
819  return -1;
820  } else {
821  for (i = 0; i < 4; i++)
822  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
823  0, 4 * 2 * sizeof(int16_t));
824  }
825  if (mb_type != 1) {
826  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
827  return -1;
828  } else {
829  for (i = 0; i < 4; i++)
830  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
831  0, 4 * 2 * sizeof(int16_t));
832  }
833  }
834 
835  mb_type = MB_TYPE_16x16;
836  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
837  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
838  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
839 
840  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
841 
842  if (mb_type == 8) {
843  if (s->mb_x > 0) {
844  for (i = 0; i < 4; i++)
845  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
846  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
847  s->left_samples_available = 0x5F5F;
848  }
849  if (s->mb_y > 0) {
850  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
851  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
852  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
853  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
854 
855  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
856  s->top_samples_available = 0x33FF;
857  }
858 
859  /* decode prediction codes for luma blocks */
860  for (i = 0; i < 16; i += 2) {
862 
863  if (vlc >= 25U) {
865  "luma prediction:%"PRIu32"\n", vlc);
866  return -1;
867  }
868 
869  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
870  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
871 
872  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
873  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
874 
875  if (left[1] == -1 || left[2] == -1) {
876  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
877  return -1;
878  }
879  }
880  } else { /* mb_type == 33, DC_128_PRED block type */
881  for (i = 0; i < 4; i++)
882  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
883  }
884 
885  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
886  i4x4[4] = i4x4_cache[7 + 8 * 3];
887  i4x4[5] = i4x4_cache[7 + 8 * 2];
888  i4x4[6] = i4x4_cache[7 + 8 * 1];
889 
890  if (mb_type == 8) {
894 
895  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
896  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
897  } else {
898  for (i = 0; i < 4; i++)
899  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
900 
901  s->top_samples_available = 0x33FF;
902  s->left_samples_available = 0x5F5F;
903  }
904 
905  mb_type = MB_TYPE_INTRA4x4;
906  } else { /* INTRA16x16 */
907  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
908  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
909 
911  s->left_samples_available, dir, 0)) < 0) {
912  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
913  return s->intra16x16_pred_mode;
914  }
915 
916  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
917  mb_type = MB_TYPE_INTRA16x16;
918  }
919 
920  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
921  for (i = 0; i < 4; i++)
922  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
923  0, 4 * 2 * sizeof(int16_t));
924  if (s->pict_type == AV_PICTURE_TYPE_B) {
925  for (i = 0; i < 4; i++)
926  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
927  0, 4 * 2 * sizeof(int16_t));
928  }
929  }
930  if (!IS_INTRA4x4(mb_type)) {
931  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
932  }
933  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
934  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
935  }
936 
937  if (!IS_INTRA16x16(mb_type) &&
938  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
939  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
940  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
941  return -1;
942  }
943 
944  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
946  }
947  if (IS_INTRA16x16(mb_type) ||
948  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
950 
951  if (s->qscale > 31u) {
952  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
953  return -1;
954  }
955  }
956  if (IS_INTRA16x16(mb_type)) {
957  AV_ZERO128(s->mb_luma_dc[0] + 0);
958  AV_ZERO128(s->mb_luma_dc[0] + 8);
959  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
961  "error while decoding intra luma dc\n");
962  return -1;
963  }
964  }
965 
966  if (cbp) {
967  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
968  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
969 
970  for (i = 0; i < 4; i++)
971  if ((cbp & (1 << i))) {
972  for (j = 0; j < 4; j++) {
973  k = index ? (1 * (j & 1) + 2 * (i & 1) +
974  2 * (j & 2) + 4 * (i & 2))
975  : (4 * i + j);
976  s->non_zero_count_cache[scan8[k]] = 1;
977 
978  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
980  "error while decoding block\n");
981  return -1;
982  }
983  }
984  }
985 
986  if ((cbp & 0x30)) {
987  for (i = 1; i < 3; ++i)
988  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
990  "error while decoding chroma dc block\n");
991  return -1;
992  }
993 
994  if ((cbp & 0x20)) {
995  for (i = 1; i < 3; i++) {
996  for (j = 0; j < 4; j++) {
997  k = 16 * i + j;
998  s->non_zero_count_cache[scan8[k]] = 1;
999 
1000  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
1002  "error while decoding chroma ac block\n");
1003  return -1;
1004  }
1005  }
1006  }
1007  }
1008  }
1009  }
1010 
1011  s->cbp = cbp;
1012  s->cur_pic->mb_type[mb_xy] = mb_type;
1013 
1014  if (IS_INTRA(mb_type))
1017 
1018  return 0;
1019 }
1020 
1022 {
1023  SVQ3Context *s = avctx->priv_data;
1024  const int mb_xy = s->mb_xy;
1025  int i, header;
1026  unsigned slice_id;
1027 
1028  header = get_bits(&s->gb, 8);
1029 
1030  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1031  /* TODO: what? */
1032  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1033  return -1;
1034  } else {
1035  int slice_bits, slice_bytes, slice_length;
1036  int length = header >> 5 & 3;
1037 
1038  slice_length = show_bits(&s->gb, 8 * length);
1039  slice_bits = slice_length * 8;
1040  slice_bytes = slice_length + length - 1;
1041 
1042  skip_bits(&s->gb, 8);
1043 
1045  if (!s->slice_buf)
1046  return AVERROR(ENOMEM);
1047 
1048  if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1049  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1050  return AVERROR_INVALIDDATA;
1051  }
1052  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1053 
1054  if (s->watermark_key) {
1055  uint32_t header = AV_RL32(&s->slice_buf[1]);
1056  AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1057  }
1058  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1059 
1060  if (length > 0) {
1061  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1062  }
1063  skip_bits_long(&s->gb, slice_bytes * 8);
1064  }
1065 
1066  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1067  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1068  return -1;
1069  }
1070 
1071  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1072 
1073  if ((header & 0x9F) == 2) {
1074  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1075  get_bits(&s->gb_slice, i);
1076  } else if (get_bits1(&s->gb_slice)) {
1077  avpriv_report_missing_feature(s->avctx, "Media key encryption");
1078  return AVERROR_PATCHWELCOME;
1079  }
1080 
1081  s->slice_num = get_bits(&s->gb_slice, 8);
1082  s->qscale = get_bits(&s->gb_slice, 5);
1083  s->adaptive_quant = get_bits1(&s->gb_slice);
1084 
1085  /* unknown fields */
1086  skip_bits1(&s->gb_slice);
1087 
1088  if (s->has_watermark)
1089  skip_bits1(&s->gb_slice);
1090 
1091  skip_bits1(&s->gb_slice);
1092  skip_bits(&s->gb_slice, 2);
1093 
1094  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1095  return AVERROR_INVALIDDATA;
1096 
1097  /* reset intra predictors and invalidate motion vector references */
1098  if (s->mb_x > 0) {
1099  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1100  -1, 4 * sizeof(int8_t));
1101  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1102  -1, 8 * sizeof(int8_t) * s->mb_x);
1103  }
1104  if (s->mb_y > 0) {
1105  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1106  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1107 
1108  if (s->mb_x > 0)
1109  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1110  }
1111 
1112  return 0;
1113 }
1114 
1116 {
1117  int q, x;
1118  const int max_qp = 51;
1119 
1120  for (q = 0; q < max_qp + 1; q++) {
1121  int shift = ff_h264_quant_div6[q] + 2;
1122  int idx = ff_h264_quant_rem6[q];
1123  for (x = 0; x < 16; x++)
1124  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1125  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1126  }
1127 }
1128 
1130 {
1131  SVQ3Context *s = avctx->priv_data;
1132  int m, x, y;
1133  unsigned char *extradata;
1134  unsigned char *extradata_end;
1135  unsigned int size;
1136  int marker_found = 0;
1137  int ret;
1138 
1139  s->cur_pic = &s->frames[0];
1140  s->last_pic = &s->frames[1];
1141  s->next_pic = &s->frames[2];
1142 
1143  s->cur_pic->f = av_frame_alloc();
1144  s->last_pic->f = av_frame_alloc();
1145  s->next_pic->f = av_frame_alloc();
1146  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1147  return AVERROR(ENOMEM);
1148 
1149  ff_h264dsp_init(&s->h264dsp, 8, 1);
1151  ff_videodsp_init(&s->vdsp, 8);
1152 
1153 
1154  avctx->bits_per_raw_sample = 8;
1155 
1156  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1157  ff_tpeldsp_init(&s->tdsp);
1158 
1159  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1160  avctx->color_range = AVCOL_RANGE_JPEG;
1161 
1162  s->avctx = avctx;
1163  s->halfpel_flag = 1;
1164  s->thirdpel_flag = 1;
1165  s->has_watermark = 0;
1166 
1167  /* prowl for the "SEQH" marker in the extradata */
1168  extradata = (unsigned char *)avctx->extradata;
1169  extradata_end = avctx->extradata + avctx->extradata_size;
1170  if (extradata) {
1171  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1172  if (!memcmp(extradata, "SEQH", 4)) {
1173  marker_found = 1;
1174  break;
1175  }
1176  extradata++;
1177  }
1178  }
1179 
1180  /* if a match was found, parse the extra data */
1181  if (marker_found) {
1182  GetBitContext gb;
1183  int frame_size_code;
1184  int unk0, unk1, unk2, unk3, unk4;
1185  int w,h;
1186 
1187  size = AV_RB32(&extradata[4]);
1188  if (size > extradata_end - extradata - 8) {
1189  ret = AVERROR_INVALIDDATA;
1190  goto fail;
1191  }
1192  init_get_bits(&gb, extradata + 8, size * 8);
1193 
1194  /* 'frame size code' and optional 'width, height' */
1195  frame_size_code = get_bits(&gb, 3);
1196  switch (frame_size_code) {
1197  case 0:
1198  w = 160;
1199  h = 120;
1200  break;
1201  case 1:
1202  w = 128;
1203  h = 96;
1204  break;
1205  case 2:
1206  w = 176;
1207  h = 144;
1208  break;
1209  case 3:
1210  w = 352;
1211  h = 288;
1212  break;
1213  case 4:
1214  w = 704;
1215  h = 576;
1216  break;
1217  case 5:
1218  w = 240;
1219  h = 180;
1220  break;
1221  case 6:
1222  w = 320;
1223  h = 240;
1224  break;
1225  case 7:
1226  w = get_bits(&gb, 12);
1227  h = get_bits(&gb, 12);
1228  break;
1229  }
1230  ret = ff_set_dimensions(avctx, w, h);
1231  if (ret < 0)
1232  goto fail;
1233 
1234  s->halfpel_flag = get_bits1(&gb);
1235  s->thirdpel_flag = get_bits1(&gb);
1236 
1237  /* unknown fields */
1238  unk0 = get_bits1(&gb);
1239  unk1 = get_bits1(&gb);
1240  unk2 = get_bits1(&gb);
1241  unk3 = get_bits1(&gb);
1242 
1243  s->low_delay = get_bits1(&gb);
1244 
1245  /* unknown field */
1246  unk4 = get_bits1(&gb);
1247 
1248  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1249  unk0, unk1, unk2, unk3, unk4);
1250 
1251  if (skip_1stop_8data_bits(&gb) < 0) {
1252  ret = AVERROR_INVALIDDATA;
1253  goto fail;
1254  }
1255 
1256  s->has_watermark = get_bits1(&gb);
1257  avctx->has_b_frames = !s->low_delay;
1258  if (s->has_watermark) {
1259 #if CONFIG_ZLIB
1260  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1261  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1262  int u1 = get_interleaved_ue_golomb(&gb);
1263  int u2 = get_bits(&gb, 8);
1264  int u3 = get_bits(&gb, 2);
1265  int u4 = get_interleaved_ue_golomb(&gb);
1266  unsigned long buf_len = watermark_width *
1267  watermark_height * 4;
1268  int offset = get_bits_count(&gb) + 7 >> 3;
1269  uint8_t *buf;
1270 
1271  if (watermark_height <= 0 ||
1272  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1273  ret = -1;
1274  goto fail;
1275  }
1276 
1277  buf = av_malloc(buf_len);
1278  if (!buf) {
1279  ret = AVERROR(ENOMEM);
1280  goto fail;
1281  }
1282  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1283  watermark_width, watermark_height);
1284  av_log(avctx, AV_LOG_DEBUG,
1285  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1286  u1, u2, u3, u4, offset);
1287  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1288  size - offset) != Z_OK) {
1289  av_log(avctx, AV_LOG_ERROR,
1290  "could not uncompress watermark logo\n");
1291  av_free(buf);
1292  ret = -1;
1293  goto fail;
1294  }
1296 
1297  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1298  av_log(avctx, AV_LOG_DEBUG,
1299  "watermark key %#"PRIx32"\n", s->watermark_key);
1300  av_free(buf);
1301 #else
1302  av_log(avctx, AV_LOG_ERROR,
1303  "this svq3 file contains watermark which need zlib support compiled in\n");
1304  ret = -1;
1305  goto fail;
1306 #endif
1307  }
1308  }
1309 
1310  s->mb_width = (avctx->width + 15) / 16;
1311  s->mb_height = (avctx->height + 15) / 16;
1312  s->mb_stride = s->mb_width + 1;
1313  s->mb_num = s->mb_width * s->mb_height;
1314  s->b_stride = 4 * s->mb_width;
1315  s->h_edge_pos = s->mb_width * 16;
1316  s->v_edge_pos = s->mb_height * 16;
1317 
1318  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1319  if (!s->intra4x4_pred_mode)
1320  return AVERROR(ENOMEM);
1321 
1322  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1323  sizeof(*s->mb2br_xy));
1324  if (!s->mb2br_xy)
1325  return AVERROR(ENOMEM);
1326 
1327  for (y = 0; y < s->mb_height; y++)
1328  for (x = 0; x < s->mb_width; x++) {
1329  const int mb_xy = x + y * s->mb_stride;
1330 
1331  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1332  }
1333 
1335 
1336  return 0;
1337 fail:
1338  svq3_decode_end(avctx);
1339  return ret;
1340 }
1341 
1342 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1343 {
1344  int i;
1345  for (i = 0; i < 2; i++) {
1346  av_buffer_unref(&pic->motion_val_buf[i]);
1347  av_buffer_unref(&pic->ref_index_buf[i]);
1348  }
1350 
1351  av_frame_unref(pic->f);
1352 }
1353 
1354 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1355 {
1356  SVQ3Context *s = avctx->priv_data;
1357  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1358  const int mb_array_size = s->mb_stride * s->mb_height;
1359  const int b4_stride = s->mb_width * 4 + 1;
1360  const int b4_array_size = b4_stride * s->mb_height * 4;
1361  int ret;
1362 
1363  if (!pic->motion_val_buf[0]) {
1364  int i;
1365 
1366  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1367  if (!pic->mb_type_buf)
1368  return AVERROR(ENOMEM);
1369  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1370 
1371  for (i = 0; i < 2; i++) {
1372  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1373  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1374  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1375  ret = AVERROR(ENOMEM);
1376  goto fail;
1377  }
1378 
1379  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1380  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1381  }
1382  }
1383 
1384  ret = ff_get_buffer(avctx, pic->f,
1385  (s->pict_type != AV_PICTURE_TYPE_B) ?
1387  if (ret < 0)
1388  goto fail;
1389 
1390  if (!s->edge_emu_buffer) {
1391  s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1392  if (!s->edge_emu_buffer)
1393  return AVERROR(ENOMEM);
1394  }
1395 
1396  return 0;
1397 fail:
1398  free_picture(avctx, pic);
1399  return ret;
1400 }
1401 
1402 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1403  int *got_frame, AVPacket *avpkt)
1404 {
1405  SVQ3Context *s = avctx->priv_data;
1406  int buf_size = avpkt->size;
1407  int left;
1408  uint8_t *buf;
1409  int ret, m, i;
1410 
1411  /* special case for last picture */
1412  if (buf_size == 0) {
1413  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1414  ret = av_frame_ref(data, s->next_pic->f);
1415  if (ret < 0)
1416  return ret;
1417  s->last_frame_output = 1;
1418  *got_frame = 1;
1419  }
1420  return 0;
1421  }
1422 
1423  s->mb_x = s->mb_y = s->mb_xy = 0;
1424 
1425  if (s->watermark_key) {
1426  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1427  if (!s->buf)
1428  return AVERROR(ENOMEM);
1429  memcpy(s->buf, avpkt->data, buf_size);
1430  buf = s->buf;
1431  } else {
1432  buf = avpkt->data;
1433  }
1434 
1435  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1436  if (ret < 0)
1437  return ret;
1438 
1439  if (svq3_decode_slice_header(avctx))
1440  return -1;
1441 
1442  s->pict_type = s->slice_type;
1443 
1444  if (s->pict_type != AV_PICTURE_TYPE_B)
1445  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1446 
1447  av_frame_unref(s->cur_pic->f);
1448 
1449  /* for skipping the frame */
1450  s->cur_pic->f->pict_type = s->pict_type;
1452 
1453  ret = get_buffer(avctx, s->cur_pic);
1454  if (ret < 0)
1455  return ret;
1456 
1457  for (i = 0; i < 16; i++) {
1458  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1459  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1460  }
1461  for (i = 0; i < 16; i++) {
1462  s->block_offset[16 + i] =
1463  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1464  s->block_offset[48 + 16 + i] =
1465  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1466  }
1467 
1468  if (s->pict_type != AV_PICTURE_TYPE_I) {
1469  if (!s->last_pic->f->data[0]) {
1470  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1471  av_frame_unref(s->last_pic->f);
1472  ret = get_buffer(avctx, s->last_pic);
1473  if (ret < 0)
1474  return ret;
1475  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1476  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1477  s->last_pic->f->linesize[1]);
1478  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1479  s->last_pic->f->linesize[2]);
1480  }
1481 
1482  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1483  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1484  av_frame_unref(s->next_pic->f);
1485  ret = get_buffer(avctx, s->next_pic);
1486  if (ret < 0)
1487  return ret;
1488  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1489  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1490  s->next_pic->f->linesize[1]);
1491  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1492  s->next_pic->f->linesize[2]);
1493  }
1494  }
1495 
1496  if (avctx->debug & FF_DEBUG_PICT_INFO)
1498  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1500  s->halfpel_flag, s->thirdpel_flag,
1501  s->adaptive_quant, s->qscale, s->slice_num);
1502 
1503  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1505  avctx->skip_frame >= AVDISCARD_ALL)
1506  return 0;
1507 
1508  if (s->next_p_frame_damaged) {
1509  if (s->pict_type == AV_PICTURE_TYPE_B)
1510  return 0;
1511  else
1512  s->next_p_frame_damaged = 0;
1513  }
1514 
1515  if (s->pict_type == AV_PICTURE_TYPE_B) {
1517 
1518  if (s->frame_num_offset < 0)
1519  s->frame_num_offset += 256;
1520  if (s->frame_num_offset == 0 ||
1522  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1523  return -1;
1524  }
1525  } else {
1526  s->prev_frame_num = s->frame_num;
1527  s->frame_num = s->slice_num;
1529 
1530  if (s->prev_frame_num_offset < 0)
1531  s->prev_frame_num_offset += 256;
1532  }
1533 
1534  for (m = 0; m < 2; m++) {
1535  int i;
1536  for (i = 0; i < 4; i++) {
1537  int j;
1538  for (j = -1; j < 4; j++)
1539  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1540  if (i < 3)
1541  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1542  }
1543  }
1544 
1545  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1546  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1547  unsigned mb_type;
1548  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1549 
1550  if ((get_bits_left(&s->gb_slice)) <= 7) {
1551  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1552  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1553 
1554  if (svq3_decode_slice_header(avctx))
1555  return -1;
1556  }
1557  if (s->slice_type != s->pict_type) {
1558  avpriv_request_sample(avctx, "non constant slice type");
1559  }
1560  /* TODO: support s->mb_skip_run */
1561  }
1562 
1563  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1564 
1565  if (s->pict_type == AV_PICTURE_TYPE_I)
1566  mb_type += 8;
1567  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1568  mb_type += 4;
1569  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1571  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1572  return -1;
1573  }
1574 
1575  if (mb_type != 0 || s->cbp)
1576  hl_decode_mb(s);
1577 
1578  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1579  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1580  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1581  }
1582 
1583  ff_draw_horiz_band(avctx, s->cur_pic->f,
1584  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1585  16 * s->mb_y, 16, PICT_FRAME, 0,
1586  s->low_delay);
1587  }
1588 
1589  left = buf_size*8 - get_bits_count(&s->gb_slice);
1590 
1591  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1592  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1593  //av_hex_dump(stderr, buf+buf_size-8, 8);
1594  }
1595 
1596  if (left < 0) {
1597  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1598  return -1;
1599  }
1600 
1601  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1602  ret = av_frame_ref(data, s->cur_pic->f);
1603  else if (s->last_pic->f->data[0])
1604  ret = av_frame_ref(data, s->last_pic->f);
1605  if (ret < 0)
1606  return ret;
1607 
1608  /* Do not output the last pic after seeking. */
1609  if (s->last_pic->f->data[0] || s->low_delay)
1610  *got_frame = 1;
1611 
1612  if (s->pict_type != AV_PICTURE_TYPE_B) {
1613  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1614  } else {
1615  av_frame_unref(s->cur_pic->f);
1616  }
1617 
1618  return buf_size;
1619 }
1620 
1622 {
1623  SVQ3Context *s = avctx->priv_data;
1624 
1625  free_picture(avctx, s->cur_pic);
1626  free_picture(avctx, s->next_pic);
1627  free_picture(avctx, s->last_pic);
1628  av_frame_free(&s->cur_pic->f);
1629  av_frame_free(&s->next_pic->f);
1630  av_frame_free(&s->last_pic->f);
1631  av_freep(&s->slice_buf);
1634  av_freep(&s->mb2br_xy);
1635 
1636 
1637  av_freep(&s->buf);
1638  s->buf_size = 0;
1639 
1640  return 0;
1641 }
1642 
1644  .name = "svq3",
1645  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1646  .type = AVMEDIA_TYPE_VIDEO,
1647  .id = AV_CODEC_ID_SVQ3,
1648  .priv_data_size = sizeof(SVQ3Context),
1650  .close = svq3_decode_end,
1652  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1655  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1656  AV_PIX_FMT_NONE},
1657 };
uint8_t pred_mode
Definition: h264data.h:35
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
discard all frames except keyframes
Definition: avcodec.h:235
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
Definition: h264dsp.h:104
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int cbp
Definition: svq3.c:113
static int shift(int a, int b)
Definition: sonic.c:82
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
#define C
HpelDSPContext hdsp
Definition: svq3.c:89
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:302
else temp
Definition: vf_mcdeint.c:256
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int prev_frame_num
Definition: svq3.c:117
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:383
#define avpriv_request_sample(...)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
int size
Definition: packet.h:356
int mb_xy
Definition: svq3.c:124
const uint8_t * buffer
Definition: get_bits.h:62
#define av_bswap16
Definition: bswap.h:31
uint8_t * slice_buf
Definition: svq3.c:98
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
int v_edge_pos
Definition: svq3.c:109
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
discard all
Definition: avcodec.h:236
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
uint8_t run
Definition: svq3.c:209
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
#define FULLPEL_MODE
Definition: svq3.c:153
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
AVCodec.
Definition: codec.h:190
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:51
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int16_t mb[16 *48 *2]
Definition: svq3.c:145
Macro definitions for various function/variable attributes.
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:507
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2004
static int16_t block[64]
Definition: dct.c:115
SVQ3Frame frames[3]
Definition: svq3.c:150
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int has_watermark
Definition: svq3.c:102
int thirdpel_flag
Definition: svq3.c:101
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:52
int mb_num
Definition: svq3.c:126
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:174
uint8_t
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:620
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define mb
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define DC_PRED8x8
Definition: h264pred.h:68
int block_offset[2 *(16 *3)]
Definition: svq3.c:149
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
static const struct @135 svq3_dct_tables[2][16]
#define AV_RB32
Definition: intreadwrite.h:130
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:362
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
#define MB_TYPE_16x16
Definition: mpegutils.h:54
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
Public header for CRC hash function implementation.
const char data[16]
Definition: mxf.c:91
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
#define height
uint8_t * data
Definition: packet.h:355
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
AVBufferRef * ref_index_buf[2]
Definition: svq3.c:80
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:131
thirdpel DSP functions
static const uint8_t header[24]
Definition: sdr2.c:67
enum AVPictureType slice_type
Definition: svq3.c:120
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
int prev_frame_num_offset
Definition: svq3.c:116
int low_delay
Definition: svq3.c:121
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:722
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
#define HALFPEL_MODE
Definition: svq3.c:154
AVCodecContext * avctx
Definition: svq3.c:85
int8_t * intra4x4_pred_mode
Definition: svq3.c:135
#define AVERROR(e)
Definition: error.h:43
uint8_t * edge_emu_buffer
Definition: svq3.c:141
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define B
Definition: huffyuvdsp.h:32
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:188
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
int frame_num
Definition: svq3.c:114
int mb_x
Definition: svq3.c:123
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
unsigned int left_samples_available
Definition: svq3.c:139
const char * name
Name of the codec implementation.
Definition: codec.h:197
#define IS_SKIP(a)
Definition: mpegutils.h:81
int chroma_pred_mode
Definition: svq3.c:131
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
#define PREDICT_MODE
Definition: svq3.c:156
static const uint8_t offset[127][2]
Definition: vf_spp.c:93
#define fail()
Definition: checkasm.h:123
unsigned int topright_samples_available
Definition: svq3.c:138
Sorenson Vector Quantizer #1 (SVQ1) video codec.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
Definition: svq3.c:70
useful rectangle filling function
AVBufferRef * motion_val_buf[2]
Definition: svq3.c:73
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
SVQ3Frame * cur_pic
Definition: svq3.c:93
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:148
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
int16_t(*[2] motion_val)[2]
Definition: svq3.c:74
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define width
int width
picture width / height.
Definition: avcodec.h:699
uint8_t w
Definition: llviddspenc.c:38
int32_t
GetBitContext gb_slice
Definition: svq3.c:97
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1129
#define s(width, name)
Definition: cbs_vp9.c:257
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
H.264 / AVC / MPEG-4 part10 codec.
int b_stride
Definition: svq3.c:127
#define AV_RL32
Definition: intreadwrite.h:146
H264PredContext hpc
Definition: svq3.c:88
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:831
int frames
Definition: movenc.c:65
int last_frame_output
Definition: svq3.c:110
int next_p_frame_damaged
Definition: svq3.c:107
#define av_log2
Definition: intmath.h:83
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
#define IS_INTRA16x16(a)
Definition: mpegutils.h:76
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static const int8_t mv[256][2]
Definition: 4xm.c:77
H264DSPContext h264dsp
Definition: svq3.c:87
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1643
GetBitContext gb
Definition: svq3.c:96
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
int debug
debug
Definition: avcodec.h:1611
int intra16x16_pred_mode
Definition: svq3.c:132
main external API structure.
Definition: avcodec.h:526
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
uint8_t * data
The data buffer.
Definition: buffer.h:89
#define QP_MAX_NUM
Definition: h264.h:27
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:650
int extradata_size
Definition: avcodec.h:628
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
int qscale
Definition: svq3.c:112
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
AVBufferRef * mb_type_buf
Definition: svq3.c:76
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
int mb_height
Definition: svq3.c:125
enum AVPictureType pict_type
Definition: svq3.c:119
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
int index
Definition: gxfenc.c:89
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:666
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:432
uint32_t * mb_type
Definition: svq3.c:77
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1115
cl_device_type type
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1621
#define mid_pred
Definition: mathops.h:97
int8_t ref_cache[2][5 *8]
Definition: svq3.c:144
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:181
int mb_y
Definition: svq3.c:123
AVPictureType
Definition: avutil.h:272
#define IS_INTER(a)
Definition: mpegutils.h:79
int slice_num
Definition: svq3.c:111
AVFrame * f
Definition: svq3.c:71
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
uint8_t * buf
Definition: svq3.c:104
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
SVQ3Frame * last_pic
Definition: svq3.c:95
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
VideoDSPContext vdsp
Definition: svq3.c:91
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1342
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint32_t * mb2br_xy
Definition: svq3.c:129
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
uint8_t level
Definition: svq3.c:210
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
A reference to a data buffer.
Definition: buffer.h:81
#define avg(a, b, c, d)
discard all non reference
Definition: avcodec.h:232
int
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:635
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:147
uint8_t cbp
Definition: h264data.h:36
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:279
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1354
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
int mb_stride
Definition: svq3.c:126
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:179
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:146
int h_edge_pos
Definition: svq3.c:108
Bi-dir predicted.
Definition: avutil.h:276
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:227
#define stride
int frame_num_offset
Definition: svq3.c:115
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:218
void * priv_data
Definition: avcodec.h:553
#define THIRDPEL_MODE
Definition: svq3.c:155
#define PICT_FRAME
Definition: mpegutils.h:39
unsigned int top_samples_available
Definition: svq3.c:137
#define IS_INTRA4x4(a)
Definition: mpegutils.h:75
#define av_free(p)
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:674
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1021
#define PART_NOT_AVAILABLE
Definition: h264dec.h:397
int slice_size
Definition: svq3.c:99
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
TpelDSPContext tdsp
Definition: svq3.c:90
static const uint8_t svq3_scan[16]
Definition: svq3.c:167
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:134
int mb_width
Definition: svq3.c:125
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:193
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:262
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
#define av_freep(p)
uint32_t watermark_key
Definition: svq3.c:103
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:854
#define av_always_inline
Definition: attributes.h:45
SVQ3Frame * next_pic
Definition: svq3.c:94
#define FFSWAP(type, a, b)
Definition: common.h:99
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
int buf_size
Definition: svq3.c:105
exp golomb vlc stuff
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:332
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1402
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
Predicted.
Definition: avutil.h:275
int halfpel_flag
Definition: svq3.c:100
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
int adaptive_quant
Definition: svq3.c:106
int8_t * ref_index[2]
Definition: svq3.c:81
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:190
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:143