Paul B Mahol (12019-09-30): > Also properly handle EOF timestamps. > Fixes #6833. > > Signed-off-by: Paul B Mahol <[email protected]> > --- > libavfilter/setpts.c | 74 ++++++++++++++++++++++++++++++++------------ > 1 file changed, 55 insertions(+), 19 deletions(-) > > diff --git a/libavfilter/setpts.c b/libavfilter/setpts.c > index 800ba6a83f..076534c518 100644 > --- a/libavfilter/setpts.c > +++ b/libavfilter/setpts.c > @@ -33,6 +33,7 @@ > #include "libavutil/time.h" > #include "audio.h" > #include "avfilter.h" > +#include "filters.h" > #include "internal.h" > #include "video.h" > > @@ -154,6 +155,28 @@ static inline char *double2int64str(char *buf, double v) > return buf; > } > > +static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame > *frame, int64_t pts) > +{ > + if (isnan(setpts->var_values[VAR_STARTPTS])) { > + setpts->var_values[VAR_STARTPTS] = TS2D(pts); > + setpts->var_values[VAR_STARTT ] = TS2T(pts, inlink->time_base); > + } > + setpts->var_values[VAR_PTS ] = TS2D(pts); > + setpts->var_values[VAR_T ] = TS2T(pts, inlink->time_base); > + setpts->var_values[VAR_POS ] = !frame || frame->pkt_pos == -1 ? > NAN : frame->pkt_pos; > + setpts->var_values[VAR_RTCTIME ] = av_gettime(); > + > + if (frame) { > + if (inlink->type == AVMEDIA_TYPE_VIDEO) { > + setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame; > + } else if (inlink->type == AVMEDIA_TYPE_AUDIO) { > + setpts->var_values[VAR_S] = frame->nb_samples; > + setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples; > + } > + } > + > + return av_expr_eval(setpts->expr, setpts->var_values, NULL); > +} > #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v) > > static int filter_frame(AVFilterLink *inlink, AVFrame *frame) > @@ -162,23 +185,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame > *frame) > int64_t in_pts = frame->pts; > double d; > > - if (isnan(setpts->var_values[VAR_STARTPTS])) { > - setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts); > - setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, > inlink->time_base); > - } > - setpts->var_values[VAR_PTS ] = TS2D(frame->pts); > - setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); > - setpts->var_values[VAR_POS ] = frame->pkt_pos == -1 ? NAN : > frame->pkt_pos; > - setpts->var_values[VAR_RTCTIME ] = av_gettime(); > - > - if (inlink->type == AVMEDIA_TYPE_VIDEO) { > - setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame; > - } else if (inlink->type == AVMEDIA_TYPE_AUDIO) { > - setpts->var_values[VAR_S] = frame->nb_samples; > - setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples; > - } > - > - d = av_expr_eval(setpts->expr, setpts->var_values, NULL); > + d = eval_pts(setpts, inlink, frame, frame->pts); > frame->pts = D2TS(d); >
> av_log(inlink->dst, AV_LOG_TRACE,
I think there should be a LOG_TRACE for the output timestamp too.
> @@ -216,6 +223,35 @@ static int filter_frame(AVFilterLink *inlink, AVFrame
> *frame)
> return ff_filter_frame(inlink->dst->outputs[0], frame);
> }
>
> +static int activate(AVFilterContext *ctx)
> +{
> + SetPTSContext *setpts = ctx->priv;
> + AVFilterLink *inlink = ctx->inputs[0];
> + AVFilterLink *outlink = ctx->outputs[0];
> + AVFrame *in;
> + int status;
> + int64_t pts;
> + int ret;
> +
> + FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
> +
> + ret = ff_inlink_consume_frame(inlink, &in);
> + if (ret < 0)
> + return ret;
> + if (ret > 0)
> + return filter_frame(inlink, in);
> +
> + if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
> + pts = D2TS(eval_pts(setpts, inlink, NULL, pts));
> + ff_outlink_set_status(outlink, status, pts);
> + return 0;
> + }
> +
> + FF_FILTER_FORWARD_WANTED(outlink, inlink);
> +
> + return FFERROR_NOT_READY;
> +}
> +
> static av_cold void uninit(AVFilterContext *ctx)
> {
> SetPTSContext *setpts = ctx->priv;
> @@ -239,7 +275,6 @@ static const AVFilterPad avfilter_vf_setpts_inputs[] = {
> .name = "default",
> .type = AVMEDIA_TYPE_VIDEO,
> .config_props = config_input,
> - .filter_frame = filter_frame,
> },
> { NULL }
> };
> @@ -256,6 +291,7 @@ AVFilter ff_vf_setpts = {
> .name = "setpts",
> .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video
> frame."),
> .init = init,
> + .activate = activate,
> .uninit = uninit,
>
> .priv_size = sizeof(SetPTSContext),
> @@ -276,7 +312,6 @@ static const AVFilterPad asetpts_inputs[] = {
> .name = "default",
> .type = AVMEDIA_TYPE_AUDIO,
> .config_props = config_input,
> - .filter_frame = filter_frame,
> },
> { NULL }
> };
> @@ -293,6 +328,7 @@ AVFilter ff_af_asetpts = {
> .name = "asetpts",
> .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio
> frame."),
> .init = init,
> + .activate = activate,
> .uninit = uninit,
> .priv_size = sizeof(SetPTSContext),
> .priv_class = &asetpts_class,
Apart from that, LGTM. Thanks for taking care of it.
Regards,
--
Nicolas George
signature.asc
Description: PGP signature
_______________________________________________ ffmpeg-devel mailing list [email protected] https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email [email protected] with subject "unsubscribe".
