gemini-code-assist[bot] commented on code in PR #67:
URL: https://github.com/apache/tvm-ffi/pull/67#discussion_r2384335911


##########
include/tvm/ffi/container/shape.h:
##########
@@ -93,21 +156,41 @@ TVM_FFI_INLINE ObjectPtr<ShapeObj> 
MakeInplaceShape(IterType begin, IterType end
   return p;
 }
 
-TVM_FFI_INLINE ObjectPtr<ShapeObj> MakeStridesFromShape(const int64_t* data, 
int64_t ndim) {
-  int64_t* strides_data;
-  ObjectPtr<ShapeObj> strides = details::MakeEmptyShape(ndim, &strides_data);
+/*!
+ * \brief Get the product of a shape.
+ * \param shape The input shape.
+ * \param out_strides The output strides.
+ * \return The product of the shape.
+ */
+TVM_FFI_INLINE void FillStridesFromShape(ShapeView shape, int64_t* 
out_strides) {
   int64_t stride = 1;
-  for (int i = ndim - 1; i >= 0; --i) {
-    strides_data[i] = stride;
-    stride *= data[i];
+  for (int i = static_cast<int>(shape.size()) - 1; i >= 0; --i) {

Review Comment:
   ![medium](https://www.gstatic.com/codereviewagent/medium-priority.svg)
   
   The loop variable `i` is initialized by casting `shape.size()` (a `size_t`) 
to `int`. On a 64-bit system, `size_t` can be larger than `int`, which could 
lead to an integer overflow if a tensor has more than `INT_MAX` dimensions. 
While highly unlikely, it's safer to use `int64_t` for the loop variable, which 
is also consistent with the type used for shape dimensions elsewhere.
   
   ```c
     for (int64_t i = static_cast<int64_t>(shape.size()) - 1; i >= 0; --i) {
   ```



##########
include/tvm/ffi/container/tensor.h:
##########
@@ -393,7 +367,15 @@ class Tensor : public ObjectRef {
       throw ffi::Error(error_context.kind, error_context.message,
                        TVMFFIBacktrace(__FILE__, __LINE__, __func__, 0));
     }
-    return 
Tensor(make_object<details::TensorObjFromDLPack<DLManagedTensorVersioned>>(tensor));
+    if (tensor->dl_tensor.strides != nullptr) {
+      return 
Tensor(make_object<details::TensorObjFromDLPack<DLManagedTensorVersioned>>(
+          tensor, /*extra_strides_at_tail=*/false));
+    } else {
+      return Tensor(
+          
make_inplace_array_object<details::TensorObjFromDLPack<DLManagedTensorVersioned>,
+                                    int64_t>(tensor->dl_tensor.ndim, tensor,
+                                             /*extra_strides_at_tail=*/true));
+    }

Review Comment:
   ![medium](https://www.gstatic.com/codereviewagent/medium-priority.svg)
   
   This logic to handle DLPack tensors with and without strides is duplicated 
here and in `FromDLPack` (lines 397-405) and `FromDLPackVersioned` (lines 
426-434). To improve maintainability and reduce code duplication, consider 
extracting this logic into a private static template helper function.
   
   For example, you could add a helper like this:
   
   ```cpp
   private:
   template <typename TDLManagedTensor>
   static Tensor FromDLPackImpl(TDLManagedTensor* tensor) {
     if (tensor->dl_tensor.strides != nullptr) {
       return 
Tensor(make_object<details::TensorObjFromDLPack<TDLManagedTensor>>(
           tensor, /*extra_strides_at_tail=*/false));
     } else {
       return Tensor(
           
make_inplace_array_object<details::TensorObjFromDLPack<TDLManagedTensor>,
                                     int64_t>(tensor->dl_tensor.ndim, tensor,
                                              /*extra_strides_at_tail=*/true));
     }
   }
   ```
   
   Then, `FromDLPack`, `FromDLPackVersioned`, and `FromDLPackAlloc` can call 
this helper after performing their specific checks.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to