94 return (
int)d & ~((1 << chroma_sub) - 1);
120 "Error when evaluating the expression '%s' for %s\n",
131 char *res,
int res_len,
int flags)
136 if (!strcmp(cmd,
"x"))
138 else if (!strcmp(cmd,
"y"))
171 static const enum AVPixelFormat overlay_pix_fmts_yuv420[] = {
178 static const enum AVPixelFormat overlay_pix_fmts_yuv422[] = {
185 static const enum AVPixelFormat overlay_pix_fmts_yuv444[] = {
214 main_formats = main_pix_fmts_yuv420;
215 overlay_formats = overlay_pix_fmts_yuv420;
218 main_formats = main_pix_fmts_yuv422;
219 overlay_formats = overlay_pix_fmts_yuv422;
222 main_formats = main_pix_fmts_yuv444;
223 overlay_formats = overlay_pix_fmts_yuv444;
226 main_formats = main_pix_fmts_rgb;
227 overlay_formats = overlay_pix_fmts_rgb;
230 main_formats = main_pix_fmts_gbrp;
231 overlay_formats = overlay_pix_fmts_gbrp;
287 "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s\n",
313 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16) 319 #define UNPREMULTIPLY_ALPHA(x, y) ((((x) << 16) - ((x) << 9) + (x)) / ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x))) 327 int main_has_alpha,
int x,
int y,
328 int is_straight,
int jobnr,
int nb_jobs)
331 int i, imax, j, jmax;
332 const int src_w = src->
width;
333 const int src_h = src->
height;
334 const int dst_w = dst->
width;
335 const int dst_h = dst->
height;
351 imax =
FFMIN3(-y + dst_h,
FFMIN(src_h, dst_h), y + src_h);
353 slice_start = i + (imax * jobnr) / nb_jobs;
354 slice_end = i + (imax * (jobnr+1)) / nb_jobs;
357 dp = dst->
data[0] + (y + slice_start) * dst->
linesize[0];
359 for (i = slice_start; i <
slice_end; i++) {
362 d = dp + (x+j) * dstep;
364 for (jmax =
FFMIN(-x + dst_w, src_w); j < jmax; j++) {
369 if (main_has_alpha && alpha != 0 && alpha != 255) {
385 d[dr] = is_straight ?
FAST_DIV255(d[dr] * (255 - alpha) + S[sr] * alpha) :
387 d[dg] = is_straight ?
FAST_DIV255(d[dg] * (255 - alpha) + S[sg] * alpha) :
389 d[db] = is_straight ?
FAST_DIV255(d[db] * (255 - alpha) + S[sb] * alpha) :
392 if (main_has_alpha) {
414 int src_w,
int src_h,
415 int dst_w,
int dst_h,
435 int jmax, j, k, kmax;
439 jmax =
FFMIN3(-yp + dst_hp,
FFMIN(src_hp, dst_hp), yp + src_hp);
441 slice_start = j + (jmax * jobnr) / nb_jobs;
442 slice_end = j + (jmax * (jobnr+1)) / nb_jobs;
445 dp = dst->
data[dst_plane]
446 + (yp + slice_start) * dst->
linesize[dst_plane]
453 d = dp + (xp+k) * dst_step;
456 da = dap + ((xp+k) << hsub);
457 kmax =
FFMIN(-xp + dst_wp, src_wp);
459 if (((vsub && j+1 < src_hp) || !vsub) && octx->
blend_row[i]) {
464 da += (1 <<
hsub) * c;
465 a += (1 <<
hsub) * c;
468 for (; k < kmax; k++) {
469 int alpha_v, alpha_h,
alpha;
472 if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
473 alpha = (a[0] + a[src->
linesize[3]] +
475 }
else if (hsub || vsub) {
476 alpha_h = hsub && k+1 < src_wp ?
477 (a[0] + a[1]) >> 1 : a[0];
478 alpha_v = vsub && j+1 < src_hp ?
479 (a[0] + a[src->
linesize[3]]) >> 1 : a[0];
480 alpha = (alpha_v + alpha_h) >> 1;
485 if (main_has_alpha && alpha != 0 && alpha != 255) {
488 if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
489 alpha_d = (da[0] + da[dst->
linesize[3]] +
490 da[1] + da[dst->
linesize[3]+1]) >> 2;
491 }
else if (hsub || vsub) {
492 alpha_h = hsub && k+1 < src_wp ?
493 (da[0] + da[1]) >> 1 : da[0];
494 alpha_v = vsub && j+1 < src_hp ?
495 (da[0] + da[dst->
linesize[3]]) >> 1 : da[0];
496 alpha_d = (alpha_v + alpha_h) >> 1;
505 *d = av_clip(
FAST_DIV255((*d - 128) * (255 - alpha)) + *s - 128, -128, 128) + 128;
517 dap += (1 << vsub) * dst->
linesize[3];
522 int src_w,
int src_h,
523 int dst_w,
int dst_h,
525 int jobnr,
int nb_jobs)
529 int i, imax, j, jmax;
532 imax =
FFMIN(-y + dst_h, src_h);
533 slice_start = (imax * jobnr) / nb_jobs;
534 slice_end = ((imax * (jobnr+1)) / nb_jobs);
537 sa = src->
data[3] + (i + slice_start) * src->
linesize[3];
538 da = dst->
data[3] + (y + i + slice_start) * dst->
linesize[3];
540 for (i = i + slice_start; i <
slice_end; i++) {
545 for (jmax =
FFMIN(-x + dst_w, src_w); j < jmax; j++) {
547 if (alpha != 0 && alpha != 255) {
575 int jobnr,
int nb_jobs)
578 const int src_w = src->
width;
579 const int src_h = src->
height;
580 const int dst_w = dst->
width;
581 const int dst_h = dst->
height;
583 blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
586 blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
589 blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
594 alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs);
607 const int src_w = src->
width;
608 const int src_h = src->
height;
609 const int dst_w = dst->
width;
610 const int dst_h = dst->
height;
612 blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
615 blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
618 blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
623 alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs);
630 blend_slice_yuv(ctx, td->
dst, td->
src, 1, 1, 0, s->
x, s->
y, 1, jobnr, nb_jobs);
638 blend_slice_yuv(ctx, td->
dst, td->
src, 1, 1, 1, s->
x, s->
y, 1, jobnr, nb_jobs);
646 blend_slice_yuv(ctx, td->
dst, td->
src, 1, 0, 0, s->
x, s->
y, 1, jobnr, nb_jobs);
654 blend_slice_yuv(ctx, td->
dst, td->
src, 1, 0, 1, s->
x, s->
y, 1, jobnr, nb_jobs);
662 blend_slice_yuv(ctx, td->
dst, td->
src, 0, 0, 0, s->
x, s->
y, 1, jobnr, nb_jobs);
670 blend_slice_yuv(ctx, td->
dst, td->
src, 0, 0, 1, s->
x, s->
y, 1, jobnr, nb_jobs);
678 blend_slice_planar_rgb(ctx, td->
dst, td->
src, 0, 0, 0, s->
x, s->
y, 1, jobnr, nb_jobs);
686 blend_slice_planar_rgb(ctx, td->
dst, td->
src, 0, 0, 1, s->
x, s->
y, 1, jobnr, nb_jobs);
694 blend_slice_yuv(ctx, td->
dst, td->
src, 1, 1, 0, s->
x, s->
y, 0, jobnr, nb_jobs);
702 blend_slice_yuv(ctx, td->
dst, td->
src, 1, 1, 1, s->
x, s->
y, 0, jobnr, nb_jobs);
710 blend_slice_yuv(ctx, td->
dst, td->
src, 1, 0, 0, s->
x, s->
y, 0, jobnr, nb_jobs);
718 blend_slice_yuv(ctx, td->
dst, td->
src, 1, 0, 1, s->
x, s->
y, 0, jobnr, nb_jobs);
726 blend_slice_yuv(ctx, td->
dst, td->
src, 0, 0, 0, s->
x, s->
y, 0, jobnr, nb_jobs);
734 blend_slice_yuv(ctx, td->
dst, td->
src, 0, 0, 1, s->
x, s->
y, 0, jobnr, nb_jobs);
742 blend_slice_planar_rgb(ctx, td->
dst, td->
src, 0, 0, 0, s->
x, s->
y, 0, jobnr, nb_jobs);
750 blend_slice_planar_rgb(ctx, td->
dst, td->
src, 0, 0, 1, s->
x, s->
y, 0, jobnr, nb_jobs);
932 if (s->
x < mainpic->
width && s->
x + second->
width >= 0 &&
958 #define OFFSET(x) offsetof(OverlayContext, x) 959 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM 964 {
"eof_action",
"Action to take when encountering EOF from secondary input ",
973 {
"shortest",
"force termination when the shortest input terminates",
OFFSET(
fs.opt_shortest),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
FLAGS },
1016 .preinit = overlay_framesync_preinit,
1020 .priv_class = &overlay_class,
1024 .
inputs = avfilter_vf_overlay_inputs,
1025 .
outputs = avfilter_vf_overlay_outputs,
static int activate(AVFilterContext *ctx)
int plane
Which of the 4 planes contains the component.
static const char * format[]
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static int blend_slice_rgb_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
This structure describes decoded (raw) audio or video data.
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
int h
agreed upon image height
const AVPixFmtDescriptor * main_desc
format descriptor for main input
static void alpha_composite(const AVFrame *src, const AVFrame *dst, int src_w, int src_h, int dst_w, int dst_h, int x, int y, int jobnr, int nb_jobs)
static int blend_slice_yuv444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static const AVFilterPad avfilter_vf_overlay_inputs[]
void ff_overlay_init_x86(OverlayContext *s, int format, int pix_format, int alpha_format, int main_has_alpha)
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
static int blend_slice_rgba_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static int blend_slice_yuva420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int blend_slice_gbrp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int blend_slice_yuv420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], const AVPixFmtDescriptor *pixdesc)
Compute the max pixel step for each plane of an image with a format described by pixdesc.
const char * name
Pad name.
AVFilterContext * parent
Parent filter context.
AVFilterLink ** inputs
array of pointers to input links
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static int blend_slice_yuv422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
#define fs(width, name, subs,...)
timestamp utils, mostly useful for debugging/logging purposes
static const char *const var_names[]
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
static av_cold int end(AVCodecContext *avctx)
double var_values[VAR_VARS_NB]
static int blend_slice_yuva444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
uint8_t overlay_rgba_map[4]
static av_always_inline void blend_plane(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int src_w, int src_h, int dst_w, int dst_h, int i, int hsub, int vsub, int x, int y, int main_has_alpha, int dst_plane, int dst_offset, int dst_step, int straight, int yuv, int jobnr, int nb_jobs)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_LOG_VERBOSE
Detailed information.
static int blend_slice_yuv444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
A filter pad used for either input or output.
A link between two filters.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define i(width, name, range_min, range_max)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define UNPREMULTIPLY_ALPHA(x, y)
static av_always_inline void blend_slice_yuv(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int hsub, int vsub, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
static int blend_slice_gbrap_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int blend_slice_yuva422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int(* blend_row[4])(uint8_t *d, uint8_t *da, uint8_t *s, uint8_t *a, int w, ptrdiff_t alinesize)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
static int config_input_overlay(AVFilterLink *inlink)
static int blend_slice_yuva444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
AVFilterFormats * in_formats
Lists of formats and channel layouts supported by the input and output filters respectively.
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int w
agreed upon image width
as above, but U and V bytes are swapped
static const AVFilterPad avfilter_vf_overlay_outputs[]
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
static int blend_slice_yuva422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
packed RGB 8:8:8, 24bpp, BGRBGR...
AVFilterContext * src
source filter
static av_cold int init(AVFilterContext *ctx)
static const AVFilterPad inputs[]
int(* blend_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int main_pix_step[4]
steps per pixel for each plane of the main output
static const AVFilterPad outputs[]
int format
agreed upon media format
static int blend_slice_yuv422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
static const AVOption overlay_options[]
static int blend_slice_rgba(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int blend_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int blend_slice_gbrp_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static av_always_inline void blend_slice_packed_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
Blend image in src to destination buffer dst at position (x, y).
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
static av_cold void uninit(AVFilterContext *ctx)
Used for passing data between threads.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static const int16_t alpha[]
static int blend_slice_yuv420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int y
position of overlaid picture
static int blend_slice_yuva420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
uint8_t overlay_has_alpha
uint8_t overlay_is_packed_rgb
const char * name
Filter name.
static av_always_inline void blend_slice_planar_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int hsub, int vsub, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
static int query_formats(AVFilterContext *ctx)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
int overlay_pix_step[4]
steps per pixel for each plane of the overlay
AVFilterLink ** outputs
array of pointers to output links
int offset
Number of elements before the component of the first pixel.
#define flags(name, subs,...)
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int normalize_xy(double d, int chroma_sub)
static int config_input_main(AVFilterLink *inlink)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal and external API header
static int blend_slice_gbrap(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar GBRA 4:4:4:4 32bpp
int vsub
chroma subsampling values
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
avfilter_execute_func * execute
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
AVFilterContext * dst
dest filter
static int config_output(AVFilterLink *outlink)
uint8_t main_is_packed_rgb
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
FRAMESYNC_DEFINE_CLASS(overlay, OverlayContext, fs)
static void eval_expr(AVFilterContext *ctx)
static int do_blend(FFFrameSync *fs)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
AVPixelFormat
Pixel format.
#define AV_NOPTS_VALUE
Undefined timestamp value.
static enum AVPixelFormat alpha_pix_fmts[]
AVFilterFormats * out_formats
int step
Number of elements between 2 horizontally consecutive pixels.
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)