59 #define FREEZE_INTERVAL 128 90 int frontier = 1 << avctx->
trellis;
129 bytestream_put_le16(&extradata, avctx->
frame_size);
130 bytestream_put_le16(&extradata, 7);
131 for (i = 0; i < 7; i++) {
195 int nibble = 8*(delta < 0);
198 diff = delta + (step >> 3);
230 int predictor,
nibble, bias;
235 nibble = sample - predictor;
241 nibble = (nibble + bias) / c->
idelta;
242 nibble = av_clip_intp2(nibble, 3) & 0x0F;
244 predictor += ((nibble & 0x08) ? (nibble - 0x10) :
nibble) * c->
idelta;
247 c->
sample1 = av_clip_int16(predictor);
268 nibble =
FFMIN(7,
abs(delta) * 4 / c->
step) + (delta < 0) * 8;
273 c->
step = av_clip(c->
step, 127, 24576);
279 const int16_t *samples,
uint8_t *dst,
284 const int frontier = 1 << avctx->
trellis;
291 int pathn = 0, froze = -1,
i, j, k, generation = 0;
293 memset(hash, 0xff, 65536 *
sizeof(*hash));
295 memset(nodep_buf, 0, 2 * frontier *
sizeof(*nodep_buf));
296 nodes[0] = node_buf + frontier;
310 nodes[0]->
step = 127;
318 for (
i = 0;
i < n;
i++) {
323 memset(nodes_next, 0, frontier *
sizeof(
TrellisNode*));
324 for (j = 0; j < frontier && nodes[j]; j++) {
327 const int range = (j < frontier / 2) ? 1 : 0;
328 const int step = nodes[j]->step;
331 const int predictor = ((nodes[j]->sample1 * c->
coeff1) +
332 (nodes[j]->sample2 * c->
coeff2)) / 64;
333 const int div = (sample - predictor) / step;
334 const int nmin = av_clip(div-range, -8, 6);
335 const int nmax = av_clip(div+range, -7, 7);
336 for (nidx = nmin; nidx <= nmax; nidx++) {
337 const int nibble = nidx & 0xf;
338 int dec_sample = predictor + nidx * step;
339 #define STORE_NODE(NAME, STEP_INDEX)\ 345 dec_sample = av_clip_int16(dec_sample);\ 346 d = sample - dec_sample;\ 347 ssd = nodes[j]->ssd + d*(unsigned)d;\ 352 if (ssd < nodes[j]->ssd)\ 365 h = &hash[(uint16_t) dec_sample];\ 366 if (*h == generation)\ 368 if (heap_pos < frontier) {\ 373 pos = (frontier >> 1) +\ 374 (heap_pos & ((frontier >> 1) - 1));\ 375 if (ssd > nodes_next[pos]->ssd)\ 380 u = nodes_next[pos];\ 382 av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\ 384 nodes_next[pos] = u;\ 388 u->step = STEP_INDEX;\ 389 u->sample2 = nodes[j]->sample1;\ 390 u->sample1 = dec_sample;\ 391 paths[u->path].nibble = nibble;\ 392 paths[u->path].prev = nodes[j]->path;\ 396 int parent = (pos - 1) >> 1;\ 397 if (nodes_next[parent]->ssd <= ssd)\ 399 FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ 409 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ 410 const int predictor = nodes[j]->sample1;\ 411 const int div = (sample - predictor) * 4 / STEP_TABLE;\ 412 int nmin = av_clip(div - range, -7, 6);\ 413 int nmax = av_clip(div + range, -6, 7);\ 418 for (nidx = nmin; nidx <= nmax; nidx++) {\ 419 const int nibble = nidx < 0 ? 7 - nidx : nidx;\ 420 int dec_sample = predictor +\ 422 ff_adpcm_yamaha_difflookup[nibble]) / 8;\ 423 STORE_NODE(NAME, STEP_INDEX);\ 441 if (generation == 255) {
442 memset(hash, 0xff, 65536 *
sizeof(*hash));
447 if (nodes[0]->ssd > (1 << 28)) {
448 for (j = 1; j < frontier && nodes[j]; j++)
449 nodes[j]->ssd -= nodes[0]->ssd;
455 p = &paths[nodes[0]->path];
456 for (k =
i; k > froze; k--) {
465 memset(nodes + 1, 0, (frontier - 1) *
sizeof(
TrellisNode*));
469 p = &paths[nodes[0]->
path];
470 for (
i = n - 1;
i > froze;
i--) {
476 c->
sample1 = nodes[0]->sample1;
477 c->
sample2 = nodes[0]->sample2;
479 c->
step = nodes[0]->step;
480 c->
idelta = nodes[0]->step;
486 int n,
i, ch, st, pkt_size, ret;
487 const int16_t *samples;
493 samples = (
const int16_t *)frame->
data[0];
514 for (ch = 0; ch < avctx->
channels; ch++) {
527 for (ch = 0; ch < avctx->
channels; ch++) {
529 buf + ch * blocks * 8, &c->
status[ch],
532 for (i = 0; i < blocks; i++) {
533 for (ch = 0; ch < avctx->
channels; ch++) {
534 uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
535 for (j = 0; j < 8; j += 2)
536 *dst++ = buf1[j] | (buf1[j + 1] << 4);
541 for (i = 0; i < blocks; i++) {
542 for (ch = 0; ch < avctx->
channels; ch++) {
544 const int16_t *smp = &samples_p[ch][1 + i * 8];
545 for (j = 0; j < 8; j += 2) {
560 for (ch = 0; ch < avctx->
channels; ch++) {
568 for (i = 0; i < 64; i++)
572 for (i = 0; i < 64; i += 2) {
593 for (ch = 0; ch < avctx->
channels; ch++) {
612 for (i = 0; i < avctx->
channels; i++) {
626 buf + n, &c->
status[1], n,
628 for (i = 0; i < n; i++) {
640 samples[2 * i + 1]));
647 for (i = 0; i < avctx->
channels; i++) {
653 for (i = 0; i < avctx->
channels; i++) {
658 for (i = 0; i < avctx->
channels; i++)
664 for (i = 0; i < avctx->
channels; i++)
673 for (i = 0; i < n; i += 2)
674 *dst++ = (buf[i] << 4) | buf[i + 1];
680 for (i = 0; i < n; i++)
681 *dst++ = (buf[i] << 4) | buf[n +
i];
685 for (i = 7 * avctx->
channels; i < avctx->block_align; i++) {
701 for (i = 0; i < n; i += 2)
702 *dst++ = buf[i] | (buf[i + 1] << 4);
708 for (i = 0; i < n; i++)
709 *dst++ = buf[i] | (buf[n + i] << 4);
713 for (n *= avctx->
channels; n > 0; n--) {
724 avpkt->
size = pkt_size;
739 #define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_) \ 740 AVCodec ff_ ## name_ ## _encoder = { \ 742 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 743 .type = AVMEDIA_TYPE_AUDIO, \ 745 .priv_data_size = sizeof(ADPCMEncodeContext), \ 746 .init = adpcm_encode_init, \ 747 .encode2 = adpcm_encode_frame, \ 748 .close = adpcm_encode_close, \ 749 .sample_fmts = sample_fmts_, \ 750 .capabilities = capabilities_, \ 751 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \ const struct AVCodec * codec
#define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_)
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static void put_sbits(PutBitContext *pb, int n, int32_t value)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
static void error(const char *err)
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define u(width, name, range_min, range_max)
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
#define i(width, name, range_min, range_max)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
ADPCM encoder/decoder common header.
#define STORE_NODE(NAME, STEP_INDEX)
const int16_t ff_adpcm_step_table[89]
This is the step table.
const int8_t ff_adpcm_index_table[16]
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int frame_size
Number of samples per channel in an audio frame.
const int16_t ff_adpcm_AdaptationTable[]
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
int sample_rate
samples per second
main external API structure.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define FF_ALLOC_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
const int8_t ff_adpcm_yamaha_difflookup[]
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
const int16_t ff_adpcm_yamaha_indexscale[]
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
int trellis
trellis RD quantization
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int channels
number of audio channels
static enum AVSampleFormat sample_fmts[]
uint8_t ** extended_data
pointers to the data planes/channels.
ADPCMChannelStatus status[6]
This structure stores compressed data.
int nb_samples
number of audio samples (per channel) described by this frame
static enum AVSampleFormat sample_fmts_p[]