This is an automated email from the ASF dual-hosted git repository.

tlopex pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new ec0daad082 [Relax][ONNX] Support Resize dynamic ROI via TOPI (#18963)
ec0daad082 is described below

commit ec0daad082aa1c35c6d8e4bcc6cd450038cc994c
Author: Dayuxiaoshui <[email protected]>
AuthorDate: Thu Apr 2 11:25:40 2026 +0800

    [Relax][ONNX] Support Resize dynamic ROI via TOPI (#18963)
    
    The ONNX Resize converter previously rejected non-constant ROI inputs,
    which blocked models where ROI is provided at runtime. This change adds
    a dynamic-ROI path lowered through TOPI resize kernels while preserving
    the existing relax.image.resize* path for static ROI.
    
    Specifically:
    - add reusable helper to convert ONNX full ROI ([starts..., ends...])
    into spatial ROI vector
    - add reusable helper to emit topi.image.resize1d/2d/3d for dynamic ROI
    - keep static ROI fast path for relax.image.resize2d/resize3d
    - normalize dynamic ROI expr before emit_te to ensure struct_info is
    populated
    - handle optional Resize inputs (roi/scales/sizes) more defensively
    - add frontend test coverage with graph-input ROI:
    test_resize_dynamic_roi_tf_crop_and_resize
    
    Ref: apache/tvm#18945
---
 python/tvm/relax/frontend/onnx/onnx_frontend.py | 164 ++++++++++++++++++++----
 tests/python/relax/test_frontend_onnx.py        |  58 +++++++++
 2 files changed, 199 insertions(+), 23 deletions(-)

diff --git a/python/tvm/relax/frontend/onnx/onnx_frontend.py 
b/python/tvm/relax/frontend/onnx/onnx_frontend.py
index 21b8f22a0c..fd883e3d4a 100644
--- a/python/tvm/relax/frontend/onnx/onnx_frontend.py
+++ b/python/tvm/relax/frontend/onnx/onnx_frontend.py
@@ -2632,6 +2632,105 @@ class Identity(OnnxOpConverter):
         return inputs[0]
 
 
+def _onnx_resize_spatial_roi_vector(roi_full: relax.Expr, rank: int) -> 
relax.Expr:
+    """Map ONNX ROI [starts..., ends...] to TOPI spatial ROI (drop N/C 
axes)."""
+    return relax.op.concat(
+        [
+            relax.op.strided_slice(roi_full, axes=[0], begin=[2], end=[rank]),
+            relax.op.strided_slice(roi_full, axes=[0], begin=[rank + 2], 
end=[2 * rank]),
+        ],
+        axis=0,
+    )
+
+
+def _topi_resize3d_roi_from_onnx_ncdhw_spatial(roi_spatial: list[float]) -> 
list[float]:
+    """Reorder spatial ROI for NCDHW ONNX layout to TOPI resize3d convention.
+
+    ONNX spatial slice after dropping N/C is ordered (D, H, W) for starts then 
ends.
+    TOPI ``resize3d`` with layout NCDHW expects
+    ``(start_w, start_h, start_d, end_w, end_h, end_d)`` (see 
topi/image/resize.py).
+    """
+    if len(roi_spatial) != 6:
+        return roi_spatial
+    d0, h0, w0, d1, h1, w1 = roi_spatial
+    return [w0, h0, d0, w1, h1, d1]
+
+
+def _emit_resize_topi_dynamic_roi(
+    bb: relax.BlockBuilder,
+    data: relax.Expr,
+    roi_spatial_vec: relax.Expr,
+    sizes_spatial: list,
+    rank: int,
+    topi_mode: str,
+    coord_mode: str,
+    rounding_method: str,
+    cubic_coeff_a: float,
+    exclude_outside: int,
+    extrapolation_value: float,
+) -> relax.Expr:
+    """Lower Resize with runtime ROI via TOPI, which supports Expr ROI."""
+    if rank == 3:
+
+        def resize1d_dyn(d, r, s0):
+            return topi.image.resize1d(
+                d,
+                (r[0], r[1]),
+                [s0],
+                "NCW",
+                topi_mode,
+                coord_mode,
+                rounding_method,
+                cubic_coeff_a,
+                exclude_outside,
+                extrapolation_value,
+            )
+
+        return bb.emit_te(resize1d_dyn, data, roi_spatial_vec, 
sizes_spatial[0])
+
+    if rank == 4:
+
+        def resize2d_dyn(d, r, s0, s1):
+            return topi.image.resize2d(
+                d,
+                (r[0], r[1], r[2], r[3]),
+                (s0, s1),
+                layout="NCHW",
+                method=topi_mode,
+                coordinate_transformation_mode=coord_mode,
+                rounding_method=rounding_method,
+                bicubic_alpha=cubic_coeff_a,
+                bicubic_exclude=exclude_outside,
+                extrapolation_value=extrapolation_value,
+            )
+
+        return bb.emit_te(resize2d_dyn, data, roi_spatial_vec, 
sizes_spatial[0], sizes_spatial[1])
+
+    def resize3d_dyn(d, r, s0, s1, s2):
+        # r is ONNX order (D,H,W) x2; TOPI expects (W,H,D) x2.
+        return topi.image.resize3d(
+            d,
+            (r[2], r[1], r[0], r[5], r[4], r[3]),
+            (s0, s1, s2),
+            layout="NCDHW",
+            method=topi_mode,
+            coordinate_transformation_mode=coord_mode,
+            rounding_method=rounding_method,
+            bicubic_alpha=cubic_coeff_a,
+            bicubic_exclude=exclude_outside,
+            extrapolation_value=extrapolation_value,
+        )
+
+    return bb.emit_te(
+        resize3d_dyn,
+        data,
+        roi_spatial_vec,
+        sizes_spatial[0],
+        sizes_spatial[1],
+        sizes_spatial[2],
+    )
+
+
 class Resize(OnnxOpConverter):
     """Converts an onnx Resize node into an equivalent Relax expression."""
 
@@ -2654,9 +2753,9 @@ class Resize(OnnxOpConverter):
 
         # Unpack inputs.
         x = inputs[0]
-        roi = get_constant(inputs[1], params)
-        scales = get_constant(inputs[2], params)
-        sizes = get_constant(inputs[3], params)
+        roi = get_constant(inputs[1], params) if len(inputs) > 1 and inputs[1] 
is not None else None
+        scales = get_constant(inputs[2], params) if len(inputs) > 2 else None
+        sizes = get_constant(inputs[3], params) if len(inputs) > 3 else None
         ndims = len(x.struct_info.shape)
         assert ndims in (3, 4, 5), "Only resize1d/resize2d/resize3d are 
supported."
 
@@ -2664,26 +2763,29 @@ class Resize(OnnxOpConverter):
             "Only one of scales and sizes can be provided in Resize."
         )
 
-        # Define relax implementation.
+        # ROI can be a static list (for relax.image.resize*) or dynamic tensor 
(TOPI path).
+        roi_static: list[float] | None = None
+        roi_dynamic_vec: relax.Expr | None = None
         if roi is not None:
             if isinstance(roi, relax.Constant):
-                roi = roi.data.numpy().tolist()
-                if len(roi) == 2 * ndims:
-                    roi = roi[2:ndims] + roi[ndims + 2 : 2 * ndims]
-                elif len(roi) == 0:
-                    roi = [0.0] * (2 * (ndims - 2))
+                roi_np = roi.data.numpy().tolist()
+                if len(roi_np) == 2 * ndims:
+                    roi_static = roi_np[2:ndims] + roi_np[ndims + 2 : 2 * 
ndims]
+                elif len(roi_np) == 0:
+                    roi_static = [0.0] * (2 * (ndims - 2))
+                elif len(roi_np) == 2 * (ndims - 2):
+                    # Some exporters already provide spatial-only ROI.
+                    roi_static = roi_np
+                else:
+                    roi_static = roi_np
             else:
-                roi = relax.op.concat(
-                    [
-                        relax.op.strided_slice(roi, axes=[0], begin=[2], 
end=[ndims]),
-                        relax.op.strided_slice(roi, axes=[0], begin=[ndims + 
2], end=[2 * ndims]),
-                    ],
-                    axis=0,
+                roi_dynamic_vec = bb.normalize(
+                    _onnx_resize_spatial_roi_vector(roi, ndims)
                 )
-                # TODO The backend C++ func resize2d does not support dynamic 
ROI for now.
-                raise NotImplementedError("Dynamic ROI is not supported in 
resize for now.")
         else:
-            roi = [0.0] * (2 * (ndims - 2))
+            roi_static = [0.0] * (2 * (ndims - 2))
+
+        use_dynamic_roi = roi_dynamic_vec is not None
 
         # Convert scales to sizes if needed.
         if scales is not None:
@@ -2692,7 +2794,7 @@ class Resize(OnnxOpConverter):
             elif isinstance(scales, relax.expr.ShapeExpr):
                 scales = [int(val.value) for val in scales.values]
             else:
-                assert f"Type {type(scales)} for scale is currently 
unsupported."
+                raise ValueError(f"Type {type(scales)} for scale is currently 
unsupported.")
             sizes = []
 
             for i, dim in enumerate(x.struct_info.shape):
@@ -2704,13 +2806,28 @@ class Resize(OnnxOpConverter):
             elif isinstance(sizes, relax.expr.ShapeExpr):
                 sizes = [int(val.value) for val in sizes.values][2:]
             else:
-                assert f"Type {type(sizes)} for size is currently unsupported."
+                raise ValueError(f"Type {type(sizes)} for size is currently 
unsupported.")
+
+        if use_dynamic_roi:
+            return _emit_resize_topi_dynamic_roi(
+                bb,
+                x,
+                roi_dynamic_vec,
+                sizes,
+                ndims,
+                topi_mode,
+                coord_mode,
+                rounding_method,
+                cubic_coeff_a,
+                exclude_outside,
+                extrapolation_value,
+            )
 
         if ndims == 3:
             return bb.emit_te(
                 topi.image.resize1d,
                 x,
-                roi,
+                roi_static,
                 sizes,
                 "NCW",
                 topi_mode,
@@ -2724,7 +2841,7 @@ class Resize(OnnxOpConverter):
             return relax.op.image.resize2d(
                 x,
                 size=relax.ShapeExpr(sizes),
-                roi=roi,
+                roi=roi_static,
                 layout="NCHW",
                 method=relax_mode,
                 coordinate_transformation_mode=coord_mode,
@@ -2734,10 +2851,11 @@ class Resize(OnnxOpConverter):
                 extrapolation_value=extrapolation_value,
             )
         else:  # ndims == 5
+            roi3d = _topi_resize3d_roi_from_onnx_ncdhw_spatial(roi_static)
             return relax.op.image.resize3d(
                 x,
                 size=relax.ShapeExpr(sizes),
-                roi=roi,
+                roi=roi3d,
                 layout="NCDHW",
                 method=relax_mode,
                 coordinate_transformation_mode=coord_mode,
diff --git a/tests/python/relax/test_frontend_onnx.py 
b/tests/python/relax/test_frontend_onnx.py
index 621ce43379..ab3a5c5148 100644
--- a/tests/python/relax/test_frontend_onnx.py
+++ b/tests/python/relax/test_frontend_onnx.py
@@ -3268,6 +3268,64 @@ def test_resize(with_roi, roi_list, with_constant):
     check_correctness(model)
 
 
+def test_resize_dynamic_roi_tf_crop_and_resize():
+    """ROI is a graph input (not initializer), lowered through TOPI 
dynamic-ROI path."""
+    resize_node = helper.make_node(
+        "Resize",
+        ["X", "roi", "scales"],
+        ["Y"],
+        mode="linear",
+        coordinate_transformation_mode="tf_crop_and_resize",
+    )
+    graph = helper.make_graph(
+        [resize_node],
+        "resize_dynamic_roi",
+        inputs=[
+            helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 3, 32, 
32]),
+            helper.make_tensor_value_info("roi", TensorProto.FLOAT, [8]),
+        ],
+        initializer=[
+            helper.make_tensor("scales", TensorProto.FLOAT, [4], [1.0, 1.0, 
2.0, 2.0]),
+        ],
+        outputs=[
+            helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 3, 64, 
64]),
+        ],
+    )
+    model = helper.make_model(graph, producer_name="resize_dynamic_roi")
+    check_correctness(model, atol=1e-5)
+
+
+def test_resize_dynamic_roi_3d_tf_crop_and_resize():
+    """5-D NCDHW: ROI is a graph input; covers dynamic-ROI TOPI resize3d 
path."""
+    resize_node = helper.make_node(
+        "Resize",
+        ["X", "roi", "scales"],
+        ["Y"],
+        mode="linear",
+        coordinate_transformation_mode="tf_crop_and_resize",
+    )
+    graph = helper.make_graph(
+        [resize_node],
+        "resize_dynamic_roi_3d",
+        inputs=[
+            helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 1, 3, 4, 
5]),
+            helper.make_tensor_value_info("roi", TensorProto.FLOAT, [10]),
+        ],
+        initializer=[
+            helper.make_tensor("scales", TensorProto.FLOAT, [5], [1.0, 1.0, 
2.0, 2.0, 2.0]),
+        ],
+        outputs=[
+            helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 1, 6, 8, 
10]),
+        ],
+    )
+    model = helper.make_model(graph, producer_name="resize_dynamic_roi_3d")
+    # Use a valid full-tensor ROI so ORT and TOPI agree on tf_crop_and_resize 
(random ROI
+    # can hit extrapolation / numerical differences across runtimes).
+    x_np = rg.standard_normal((1, 1, 3, 4, 5)).astype(np.float32)
+    roi_np = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=np.float32)
+    check_correctness(model, opset=18, atol=1e-5, inputs={"X": x_np, "roi": 
roi_np})
+
+
 def test_resize_nd_sizes():
     cases = [
         ("resize1d", [1, 1, 4], [1, 1, 7]),

Reply via email to