dutZ1855 opened a new issue, #18580:
URL: https://github.com/apache/tvm/issues/18580

   Hello, I found that the output of the ACOS operator is inconsistent with ort 
and pytorch. Here is my code. May I ask if this issue is caused by unreasonable 
method calls or other factors in the code.
   ```
   from pathlib import Path
   import sys
   import argparse
   
   import numpy as np
   import onnx
   from onnx import helper, TensorProto
   
   # Prefer local TVM in repo
   REPO_ROOT = Path(__file__).resolve().parents[1]
   TVM_PY = REPO_ROOT / "tvm" / "python"
   if TVM_PY.exists():
       sys.path.insert(0, str(TVM_PY))
   
   import tvm  # noqa: E402
   from tvm import relax  # noqa: E402
   from tvm.relax.frontend import onnx as rx_onnx  # noqa: E402
   
   def build_model_proto():
       x = helper.make_tensor_value_info("x", TensorProto.FLOAT, [1, 1])
       y = helper.make_tensor_value_info("y", TensorProto.FLOAT, [1, 1])
       node = helper.make_node("Acos", ["x"], ["y"])
       graph = helper.make_graph([node], "acos_single", [x], [y])
       return helper.make_model(graph, opset_imports=[helper.make_opsetid("", 
14)])
   
   
   def run_tvm(model_proto, inputs, target="llvm"):
       shape_dict = {k: v.shape for k, v in inputs.items()}
       converted = rx_onnx.from_onnx(model_proto, shape_dict=shape_dict)
       if isinstance(converted, (list, tuple)) and len(converted) >= 1:
           mod = converted[0]
           params = converted[1] if len(converted) >= 2 else None
       else:
           mod, params = converted, None
       mod = relax.transform.DecomposeOpsForInference()(mod)
       mod = relax.transform.LegalizeOps()(mod)
       mod, params2 = relax.frontend.detach_params(mod)
       params = params if params is not None else params2
   
       dev = tvm.device(str(target), 0)
       tgt = tvm.target.Target(str(target))
       relax_pipeline = relax.pipeline.get_default_pipeline(tgt)
       with tvm.transform.PassContext(opt_level=3):
           ex = relax.build(mod, target=tgt, params=params, 
relax_pipeline=relax_pipeline)
       vm = relax.VirtualMachine(ex, dev)
   
       vm.set_input("main", tvm.runtime.tensor(inputs["x"], device=dev))
       vm.invoke_stateful("main")
       out = vm.get_outputs("main")
       if hasattr(out, "numpy"):
           return out.numpy()
       else:
           return out[0].numpy()
   
   
   def main():
       parser = argparse.ArgumentParser(description="Acos check.")
       parser.add_argument("--value", type=float, default=1.0, help="Input 
scalar value for Acos.")
       args = parser.parse_args()
   
       x_val = np.array([[args.value]], dtype=np.float32)
       in_domain = np.all(np.abs(x_val) <= 1.0)
   
       model_proto = build_model_proto()
       inputs = {"x": x_val}
       tvm_out = run_tvm(model_proto, inputs)
       expected = np.arccos(x_val, dtype=np.float32)
       diff = np.abs(tvm_out - expected)
       max_abs = diff.max()
       max_rel = (diff / np.maximum(np.abs(expected), 1e-12)).max()
       print(f"Input x: {x_val} (domain_ok={in_domain})")
       print("TVM y:", tvm_out)
       print("Expected (np.arccos):", expected)
       print(f"max_abs={max_abs:.6f} max_rel={max_rel:.6f}")
   
   
   if __name__ == "__main__":
       main()
   ```
   
   
   ### behavior
   
   When we input 1, the normal calculation result of ACOS is 0, but I output an 
incorrect value of 0.354874 here. There are also some abnormal situations with 
other inputs
   
   ```
   python acos_tvm.py --value 1
   Input x: [[1.]] (domain_ok=True)
   TVM y: [[0.354874]]
   Expected (np.arccos): [[0.]]
   max_abs=0.354874 max_rel=354874032128.000000
   ```
   ```
   python run_acos_tvm_only.py --value -1
   Input x: [[-1.]] (domain_ok=True)
   TVM y: [[2.7867188]]
   Expected (np.arccos): [[3.1415927]]
   max_abs=0.354874 max_rel=0.112960
   ```
   ### Environment
   
   Operating System:Ubuntu 22.04.4 LTS
   TVM version:0.23.0dev
   pytorch version:2.9.1
   ort version:1.23.2
   onnx version: 1.20.0
   openvino: 2025.4.0
   python:3.11.14
   ### Steps to reproduce
   
   [model.zip](https://github.com/user-attachments/files/24093771/model.zip)
   
   ### Triage
   
   Please refer to the list of label tags 
[here](https://github.com/apache/tvm/wiki/Issue-Triage-Labels) to find the 
relevant tags and add them below in a bullet format (example below).
   
   * needs-triage
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to