https://github.com/aartbik created https://github.com/llvm/llvm-project/pull/68951
None >From 64011a22c3e9b97515e34139d0fbb03e0b1fe330 Mon Sep 17 00:00:00 2001 From: Aart Bik <ajc...@google.com> Date: Thu, 12 Oct 2023 21:24:05 -0700 Subject: [PATCH] [mlir][sparse] remove unused sparse tensor iterator --- .../mlir/Dialect/SparseTensor/IR/Enums.h | 3 - .../ExecutionEngine/SparseTensorRuntime.h | 22 +--- .../ExecutionEngine/SparseTensorRuntime.cpp | 109 +----------------- 3 files changed, 7 insertions(+), 127 deletions(-) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h index f1643d66c26a195..1434c649acd29b4 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h @@ -146,11 +146,8 @@ enum class Action : uint32_t { kEmptyForward = 1, kFromCOO = 2, kSparseToSparse = 3, - kFuture = 4, // not used kToCOO = 5, - kToIterator = 6, kPack = 7, - // Sort an unordered COO in place. kSortCOOInPlace = 8, }; diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h index f9312c866f36317..e8dd50d6730c784 100644 --- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h @@ -39,9 +39,9 @@ extern "C" { /// This is the "swiss army knife" method for materializing sparse /// tensors into the computation. The types of the `ptr` argument and -/// the result depend on the action, as explained in the following table -/// (where "STS" means a sparse-tensor-storage object, "COO" means -/// a coordinate-scheme object, and "Iterator" means an iterator object). +/// the result depend on the action, as explained in the following table, +/// where "STS" means a sparse-tensor-storage object and "COO" means +/// a coordinate-scheme object. /// /// Action: `ptr`: Returns: /// kEmpty - STS, empty @@ -49,8 +49,8 @@ extern "C" { /// kFromCOO COO STS, copied from the COO source /// kSparseToSparse STS STS, copied from the STS source /// kToCOO STS COO, copied from the STS source -/// kToIterator STS Iterator (@getNext/@delSparseTensorIterator) /// kPack buffers STS, from level buffers +/// kSortCOOInPlace STS STS, sorted in place MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_newSparseTensor( // NOLINT StridedMemRefType<index_type, 1> *dimSizesRef, StridedMemRefType<index_type, 1> *lvlSizesRef, @@ -90,14 +90,6 @@ MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSECOORDINATES) MLIR_SPARSETENSOR_FOREVERY_V(DECL_FORWARDINGINSERT) #undef DECL_FORWARDINGINSERT -/// Coordinate-scheme method for getting the next element while iterating. -#define DECL_GETNEXT(VNAME, V) \ - MLIR_CRUNNERUTILS_EXPORT bool _mlir_ciface_getNext##VNAME( \ - void *iter, StridedMemRefType<index_type, 1> *cref, \ - StridedMemRefType<V, 0> *vref); -MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETNEXT) -#undef DECL_GETNEXT - /// Tensor-storage method to insert elements in lexicographical /// level-coordinate order. #define DECL_LEXINSERT(VNAME, V) \ @@ -201,12 +193,6 @@ MLIR_CRUNNERUTILS_EXPORT void delSparseTensor(void *tensor); MLIR_SPARSETENSOR_FOREVERY_V(DECL_DELCOO) #undef DECL_DELCOO -/// Releases the memory for an iterator object. -#define DECL_DELITER(VNAME, V) \ - MLIR_CRUNNERUTILS_EXPORT void delSparseTensorIterator##VNAME(void *iter); -MLIR_SPARSETENSOR_FOREVERY_V(DECL_DELITER) -#undef DECL_DELITER - /// Helper function to read a sparse tensor filename from the environment, /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc. MLIR_CRUNNERUTILS_EXPORT char *getTensorFilename(index_type id); diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp index cd1b663578a48ce..ae33a869497a01c 100644 --- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp @@ -63,71 +63,18 @@ using namespace mlir::sparse_tensor; //===----------------------------------------------------------------------===// // -// Implementation details for public functions, which don't have a good -// place to live in the C++ library this file is wrapping. +// Utilities for manipulating `StridedMemRefType`. // //===----------------------------------------------------------------------===// namespace { -/// Wrapper class to avoid memory leakage issues. The `SparseTensorCOO<V>` -/// class provides a standard C++ iterator interface, where the iterator -/// is implemented as per `std::vector`'s iterator. However, for MLIR's -/// usage we need to have an iterator which also holds onto the underlying -/// `SparseTensorCOO<V>` so that it can be freed whenever the iterator -/// is freed. -// -// We name this `SparseTensorIterator` rather than `SparseTensorCOOIterator` -// for future-proofing, since the use of `SparseTensorCOO` is an -// implementation detail that we eventually want to change (e.g., to -// use `SparseTensorEnumerator` directly, rather than constructing the -// intermediate `SparseTensorCOO` at all). -template <typename V> -class SparseTensorIterator final { -public: - /// This ctor requires `coo` to be a non-null pointer to a dynamically - /// allocated object, and takes ownership of that object. Therefore, - /// callers must not free the underlying COO object, since the iterator's - /// dtor will do so. - explicit SparseTensorIterator(const SparseTensorCOO<V> *coo) - : coo(coo), it(coo->begin()), end(coo->end()) {} - - ~SparseTensorIterator() { delete coo; } - - // Disable copy-ctor and copy-assignment, to prevent double-free. - SparseTensorIterator(const SparseTensorIterator<V> &) = delete; - SparseTensorIterator<V> &operator=(const SparseTensorIterator<V> &) = delete; - - /// Gets the next element. If there are no remaining elements, then - /// returns nullptr. - const Element<V> *getNext() { return it < end ? &*it++ : nullptr; } - -private: - const SparseTensorCOO<V> *const coo; // Owning pointer. - typename SparseTensorCOO<V>::const_iterator it; - const typename SparseTensorCOO<V>::const_iterator end; -}; - -//===----------------------------------------------------------------------===// -// -// Utilities for manipulating `StridedMemRefType`. -// -//===----------------------------------------------------------------------===// - -// We shouldn't need to use `detail::safelyEQ` here since the `1` is a literal. #define ASSERT_NO_STRIDE(MEMREF) \ do { \ assert((MEMREF) && "Memref is nullptr"); \ assert(((MEMREF)->strides[0] == 1) && "Memref has non-trivial stride"); \ } while (false) -// All our functions use `uint64_t` for ranks, but `StridedMemRefType::sizes` -// uses `int64_t` on some platforms. So we explicitly cast this lookup to -// ensure we get a consistent type, and we use `checkOverflowCast` rather -// than `static_cast` just to be extremely sure that the casting can't -// go awry. (The cast should aways be safe since (1) sizes should never -// be negative, and (2) the maximum `int64_t` is smaller than the maximum -// `uint64_t`. But it's better to be safe than sorry.) #define MEMREF_GET_USIZE(MEMREF) \ detail::checkOverflowCast<uint64_t>((MEMREF)->sizes[0]) @@ -137,22 +84,13 @@ class SparseTensorIterator final { #define MEMREF_GET_PAYLOAD(MEMREF) ((MEMREF)->data + (MEMREF)->offset) -/// Initializes the memref with the provided size and data pointer. This +/// Initializes the memref with the provided size and data pointer. This /// is designed for functions which want to "return" a memref that aliases /// into memory owned by some other object (e.g., `SparseTensorStorage`), /// without doing any actual copying. (The "return" is in scarequotes /// because the `_mlir_ciface_` calling convention migrates any returned /// memrefs into an out-parameter passed before all the other function /// parameters.) -/// -/// We make this a function rather than a macro mainly for type safety -/// reasons. This function does not modify the data pointer, but it -/// cannot be marked `const` because it is stored into the (necessarily) -/// non-`const` memref. This function is templated over the `DataSizeT` -/// to work around signedness warnings due to many data types having -/// varying signedness across different platforms. The templating allows -/// this function to ensure that it does the right thing and never -/// introduces errors due to implicit conversions. template <typename DataSizeT, typename T> static inline void aliasIntoMemref(DataSizeT size, T *data, StridedMemRefType<T, 1> &ref) { @@ -200,20 +138,11 @@ extern "C" { dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \ dimRank, tensor); \ } \ - case Action::kFuture: { \ - break; \ - } \ case Action::kToCOO: { \ assert(ptr && "Received nullptr for SparseTensorStorage object"); \ auto &tensor = *static_cast<SparseTensorStorage<P, C, V> *>(ptr); \ return tensor.toCOO(lvlRank, lvlSizes, dimRank, dim2lvl, lvl2dim); \ } \ - case Action::kToIterator: { \ - assert(ptr && "Received nullptr for SparseTensorStorage object"); \ - auto &tensor = *static_cast<SparseTensorStorage<P, C, V> *>(ptr); \ - auto *coo = tensor.toCOO(lvlRank, lvlSizes, dimRank, dim2lvl, lvl2dim); \ - return new SparseTensorIterator<V>(coo); \ - } \ case Action::kPack: { \ assert(ptr && "Received nullptr for SparseTensorStorage object"); \ intptr_t *buffers = static_cast<intptr_t *>(ptr); \ @@ -372,7 +301,6 @@ void *_mlir_ciface_newSparseTensor( // NOLINT CASE_SECSAME(OverheadType::kU64, PrimaryType::kC32, uint64_t, complex32); // Unsupported case (add above if needed). - // TODO: better pretty-printing of enum values! MLIR_SPARSETENSOR_FATAL( "unsupported combination of types: <P=%d, C=%d, V=%d>\n", static_cast<int>(posTp), static_cast<int>(crdTp), @@ -428,29 +356,6 @@ MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSECOORDINATES) MLIR_SPARSETENSOR_FOREVERY_V(IMPL_FORWARDINGINSERT) #undef IMPL_FORWARDINGINSERT -// NOTE: the `cref` argument uses the same coordinate-space as the `iter` -// (which can be either dim- or lvl-coords, depending on context). -#define IMPL_GETNEXT(VNAME, V) \ - bool _mlir_ciface_getNext##VNAME(void *iter, \ - StridedMemRefType<index_type, 1> *cref, \ - StridedMemRefType<V, 0> *vref) { \ - assert(iter &&vref); \ - ASSERT_NO_STRIDE(cref); \ - index_type *coords = MEMREF_GET_PAYLOAD(cref); \ - V *value = MEMREF_GET_PAYLOAD(vref); \ - const uint64_t rank = MEMREF_GET_USIZE(cref); \ - const Element<V> *elem = \ - static_cast<SparseTensorIterator<V> *>(iter)->getNext(); \ - if (elem == nullptr) \ - return false; \ - for (uint64_t d = 0; d < rank; d++) \ - coords[d] = elem->coords[d]; \ - *value = elem->value; \ - return true; \ - } -MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT) -#undef IMPL_GETNEXT - #define IMPL_LEXINSERT(VNAME, V) \ void _mlir_ciface_lexInsert##VNAME( \ void *t, StridedMemRefType<index_type, 1> *lvlCoordsRef, \ @@ -636,7 +541,6 @@ void *_mlir_ciface_newSparseTensorFromReader( CASE_SECSAME(kU64, kC32, uint64_t, complex32); // Unsupported case (add above if needed). - // TODO: better pretty-printing of enum values! MLIR_SPARSETENSOR_FATAL( "unsupported combination of types: <P=%d, C=%d, V=%d>\n", static_cast<int>(posTp), static_cast<int>(crdTp), @@ -701,7 +605,7 @@ void endLexInsert(void *tensor) { #define IMPL_OUTSPARSETENSOR(VNAME, V) \ void outSparseTensor##VNAME(void *coo, void *dest, bool sort) { \ - assert(coo && "Got nullptr for COO object"); \ + assert(coo); \ auto &coo_ = *static_cast<SparseTensorCOO<V> *>(coo); \ if (sort) \ coo_.sort(); \ @@ -721,13 +625,6 @@ void delSparseTensor(void *tensor) { MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELCOO) #undef IMPL_DELCOO -#define IMPL_DELITER(VNAME, V) \ - void delSparseTensorIterator##VNAME(void *iter) { \ - delete static_cast<SparseTensorIterator<V> *>(iter); \ - } -MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELITER) -#undef IMPL_DELITER - char *getTensorFilename(index_type id) { constexpr size_t BUF_SIZE = 80; char var[BUF_SIZE]; _______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits