Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/FFmpeg/FFmpeg.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuo, Yejun <yejun.guo@intel.com>2020-09-10 17:29:57 +0300
committerGuo, Yejun <yejun.guo@intel.com>2020-09-21 16:26:56 +0300
commitfce3e3e137843d86411f8868f18e1c3f472de0e5 (patch)
tree4ad669abc4ea7fc78220f4b24f0455fb17a8234b /libavfilter/vf_sr.c
parent2003e32f62d94ba75b59d70632c9f2862b383591 (diff)
dnn: put DNNModel.set_input and DNNModule.execute_model together
suppose we have a detect and classify filter in the future, the detect filter generates some bounding boxes (BBox) as AVFrame sidedata, and the classify filter executes DNN model for each BBox. For each BBox, we need to crop the AVFrame, copy data to DNN model input and do the model execution. So we have to save the in_frame at DNNModel.set_input and use it at DNNModule.execute_model, such saving is not feasible when we support async execute_model. This patch sets the in_frame as execution_model parameter, and so all the information are put together within the same function for each inference. It also makes easy to support BBox async inference.
Diffstat (limited to 'libavfilter/vf_sr.c')
-rw-r--r--libavfilter/vf_sr.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index 2eda8c3219..72a3137262 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -114,16 +114,11 @@ static int config_output(AVFilterLink *outlink)
AVFrame *out = NULL;
const char *model_output_name = "y";
- AVFrame *fake_in = ff_get_video_buffer(inlink, inlink->w, inlink->h);
- result = (ctx->model->set_input)(ctx->model->model, fake_in, "x");
- if (result != DNN_SUCCESS) {
- av_log(context, AV_LOG_ERROR, "could not set input for the model\n");
- return AVERROR(EIO);
- }
-
// have a try run in case that the dnn model resize the frame
+ AVFrame *fake_in = ff_get_video_buffer(inlink, inlink->w, inlink->h);
out = ff_get_video_buffer(inlink, inlink->w, inlink->h);
- result = (ctx->dnn_module->execute_model)(ctx->model, (const char **)&model_output_name, 1, out);
+ result = (ctx->dnn_module->execute_model)(ctx->model, "x", fake_in,
+ (const char **)&model_output_name, 1, out);
if (result != DNN_SUCCESS){
av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n");
return AVERROR(EIO);
@@ -178,19 +173,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
sws_scale(ctx->sws_pre_scale,
(const uint8_t **)in->data, in->linesize, 0, in->height,
out->data, out->linesize);
- dnn_result = (ctx->model->set_input)(ctx->model->model, out, "x");
+ dnn_result = (ctx->dnn_module->execute_model)(ctx->model, "x", out,
+ (const char **)&model_output_name, 1, out);
} else {
- dnn_result = (ctx->model->set_input)(ctx->model->model, in, "x");
- }
-
- if (dnn_result != DNN_SUCCESS) {
- av_frame_free(&in);
- av_frame_free(&out);
- av_log(context, AV_LOG_ERROR, "could not set input for the model\n");
- return AVERROR(EIO);
+ dnn_result = (ctx->dnn_module->execute_model)(ctx->model, "x", in,
+ (const char **)&model_output_name, 1, out);
}
- dnn_result = (ctx->dnn_module->execute_model)(ctx->model, (const char **)&model_output_name, 1, out);
if (dnn_result != DNN_SUCCESS){
av_log(ctx, AV_LOG_ERROR, "failed to execute loaded model\n");
av_frame_free(&in);