gemini-code-assist[bot] commented on code in PR #18668:
URL: https://github.com/apache/tvm/pull/18668#discussion_r2700674778
##########
python/tvm/relax/frontend/onnx/onnx_frontend.py:
##########
@@ -2399,6 +2399,63 @@ def _impl_v9(cls, bb, inputs, attr, params):
return (data - data_mean) / relax.op.sqrt(data_squared_mean -
data_mean_squared)
+class LocalResponseNormalization(OnnxOpConverter):
+ """Converts an onnx LocalResponseNormalization node into an equivalent
Relax expression."""
+
+ @classmethod
+ def _impl_v13(cls, bb, inputs, attr, params):
+ data = inputs[0]
+ size = attr["size"]
+ alpha = attr.get("alpha", 0.0001)
+ beta = attr.get("beta", 0.75)
+ bias = attr.get("bias", 1.0)
+
+ if hasattr(data.struct_info, "ndim"):
+ ndim = data.struct_info.ndim
+ else:
+ ndim = len(data.struct_info.shape)
+
+ if ndim not in [3, 4]:
+ raise ValueError(f"LRN only supports 3D or 4D input, got {ndim}D.")
Review Comment:

The current logic for determining the input tensor's rank does not handle
cases where the rank is unknown (`ndim == -1`). This could lead to incorrect
behavior or errors when processing models with dynamic-rank tensors, as the
code would default to the 4D logic path. It's better to explicitly check for an
unknown rank and raise an error. Using `data.struct_info.ndim` directly is also
a cleaner way to get the rank.
```suggestion
ndim = data.struct_info.ndim
if ndim == -1:
raise ValueError("The input tensor to LRN must have a known
rank.")
if ndim not in [3, 4]:
raise ValueError(f"LRN only supports 3D or 4D input, got
{ndim}D.")
```
##########
python/tvm/relax/frontend/onnx/onnx_frontend.py:
##########
@@ -2399,6 +2399,63 @@ def _impl_v9(cls, bb, inputs, attr, params):
return (data - data_mean) / relax.op.sqrt(data_squared_mean -
data_mean_squared)
+class LocalResponseNormalization(OnnxOpConverter):
+ """Converts an onnx LocalResponseNormalization node into an equivalent
Relax expression."""
+
+ @classmethod
+ def _impl_v13(cls, bb, inputs, attr, params):
+ data = inputs[0]
+ size = attr["size"]
+ alpha = attr.get("alpha", 0.0001)
+ beta = attr.get("beta", 0.75)
+ bias = attr.get("bias", 1.0)
+
+ if hasattr(data.struct_info, "ndim"):
+ ndim = data.struct_info.ndim
+ else:
+ ndim = len(data.struct_info.shape)
+
+ if ndim not in [3, 4]:
+ raise ValueError(f"LRN only supports 3D or 4D input, got {ndim}D.")
+
+ data_squared = relax.op.multiply(data, data)
+ data_expanded = relax.op.expand_dims(data_squared, axis=1)
+ pad_len = size // 2
+ if ndim == 3:
+ pool_padding = [pad_len, 0, pad_len, 0]
+ pool_op = relax.op.nn.avg_pool2d
+ pool_size = (size, 1)
+ layout = "NCHW"
+ strides = (1, 1)
+ else:
+ pool_padding = [pad_len, 0, 0, pad_len, 0, 0]
+ pool_op = relax.op.nn.avg_pool3d
+ pool_size = (size, 1, 1)
+ layout = "NCDHW"
+ strides = (1, 1, 1)
+
+ data_avgpool = pool_op(
+ data_expanded,
+ pool_size=pool_size,
+ strides=strides,
+ padding=pool_padding,
+ layout=layout,
+ ceil_mode=False,
+ count_include_pad=True,
+ )
+ data_squeezed = relax.op.squeeze(data_avgpool, axis=1)
+
+ const_alpha = relax.const(alpha, dtype="float32")
+ const_bias = relax.const(bias, dtype="float32")
+ const_beta = relax.const(beta, dtype="float32")
Review Comment:

The data type for the `alpha`, `bias`, and `beta` constants is hardcoded to
`float32`. The ONNX LRN operator supports various floating-point types (e.g.,
`float16`, `float64`). Hardcoding the dtype can cause type mismatches or
precision loss if the input tensor has a different float dtype. These constants
should use the same data type as the input tensor `data`.
```suggestion
const_alpha = relax.const(alpha, dtype=data.struct_info.dtype)
const_bias = relax.const(bias, dtype=data.struct_info.dtype)
const_beta = relax.const(beta, dtype=data.struct_info.dtype)
```
##########
tests/python/relax/test_frontend_onnx.py:
##########
@@ -1673,6 +1673,33 @@ def verify_embedlayernormalization(
)
+def test_local_response_norm():
+ lrn_node = helper.make_node(
+ op_type="LRN",
+ inputs=["input"],
+ outputs=["output"],
+ name="LRN_Node",
+ alpha=0.0001,
+ beta=0.75,
+ bias=1.0,
+ size=3,
+ )
+
+ graph = helper.make_graph(
+ [lrn_node],
+ "local_response_norm_test",
+ inputs=[
+ helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 3,
32, 32]),
+ ],
+ outputs=[
+ helper.make_tensor_value_info("output", TensorProto.FLOAT, [1, 3,
32, 32]),
+ ],
+ )
+
+ model = helper.make_model(graph, producer_name="local_response_norm_test")
+ check_correctness(model)
+
Review Comment:

The current test for `LocalResponseNormalization` only covers a single case
with a 4D input and default attributes. To ensure the implementation is robust,
it would be beneficial to expand the test coverage. I suggest parameterizing
the test to include 3D inputs and variations in the LRN attributes (`size`,
`alpha`, `beta`, `bias`).
```python
@pytest.mark.parametrize(
"shape, size, alpha, beta, bias",
[
([1, 3, 32, 32], 3, 0.0001, 0.75, 1.0), # 4D case
([1, 5, 16], 5, 0.0002, 0.5, 2.0), # 3D case
],
)
def test_local_response_norm(shape, size, alpha, beta, bias):
lrn_node = helper.make_node(
op_type="LRN",
inputs=["input"],
outputs=["output"],
name="LRN_Node",
alpha=alpha,
beta=beta,
bias=bias,
size=size,
)
graph = helper.make_graph(
[lrn_node],
"local_response_norm_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, shape),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT,
shape),
],
)
model = helper.make_model(graph,
producer_name="local_response_norm_test")
check_correctness(model)
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]