nicolasvasilache created this revision.
Herald added subscribers: bviyer, hanchung, Moerafaat, bzcheeseman, sdasgup3, 
wenzhicui, wrengr, cota, teijeong, rdzhabarov, tatianashp, msifontes, jurahul, 
Kayjukh, grosul1, Joonsoo, liufengdb, aartbik, mgester, arpith-jacob, 
antiagainst, shauheen, rriddle, mehdi_amini, thopre, bixia.
Herald added a project: All.
nicolasvasilache requested review of this revision.
Herald added subscribers: cfe-commits, stephenneuendorffer.
Herald added projects: clang, MLIR.

These old patterns are not in use in either MLIR or downstream projects except 
for one test.
Additionally this is redundant with logic in the tensor.pad tiling 
implementation.

Drop SplitPaddingPatterns to reduce entropy.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D148207

Files:
  clang/docs/tools/clang-formatted-files.txt
  mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
  mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
  mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
  mlir/test/Dialect/Tensor/split-padding.mlir
  mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp

Index: mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
===================================================================
--- mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
+++ mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
@@ -43,11 +43,6 @@
 
   void runOnOperation() override;
 
-  Option<bool> testSplitPaddingPatterns{
-      *this, "test-split-padding-patterns",
-      llvm::cl::desc("Test patterns to split tensor.pad ops"),
-      llvm::cl::init(false)};
-
   Option<bool> testFoldConstantExtractSlice{
       *this, "test-fold-constant-extract-slice",
       llvm::cl::desc("Test folding arith.constant and tensor.extract_slice"),
@@ -111,12 +106,6 @@
   (void)applyPatternsAndFoldGreedily(rootOp, std::move(patterns));
 }
 
-static void applySplitPaddingPatterns(Operation *rootOp) {
-  RewritePatternSet patterns(rootOp->getContext());
-  tensor::populateSplitPaddingPatterns(patterns);
-  (void)applyPatternsAndFoldGreedily(rootOp, std::move(patterns));
-}
-
 static void applyFoldConstantExtractSlicePatterns(Operation *rootOp) {
   RewritePatternSet patterns(rootOp->getContext());
   tensor::ControlConstantExtractSliceFusionFn controlFn =
@@ -291,8 +280,6 @@
   Operation *rootOp = getOperation();
   if (testSimplifyPackPatterns)
     applySimplifyPackPatterns(rootOp);
-  if (testSplitPaddingPatterns)
-    applySplitPaddingPatterns(rootOp);
   if (testFoldConstantExtractSlice)
     applyFoldConstantExtractSlicePatterns(rootOp);
   if (testFoldConsecutiveInsertExtractSlice)
Index: mlir/test/Dialect/Tensor/split-padding.mlir
===================================================================
--- mlir/test/Dialect/Tensor/split-padding.mlir
+++ /dev/null
@@ -1,44 +0,0 @@
-// RUN: mlir-opt -split-input-file -test-tensor-transform-patterns=test-split-padding-patterns %s | FileCheck %s
-
-// CHECK-LABEL: func @pad_all_zero_sizes
-func.func @pad_all_zero_sizes(%input: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
-  %f0 = arith.constant 0.0 : f32
-  %c0 = arith.constant 0 : index
-  %0 = tensor.pad %input low[0, %c0, 0] high[%c0, 0, 0] {
-  ^bb0(%dim0: index, %dim1: index, %dim2: index):
-    tensor.yield %f0 : f32
-  } : tensor<?x?x?xf32> to tensor<?x?x?xf32>
-  return %0 : tensor<?x?x?xf32>
-}
-
-// CHECK-NOT: scf.if
-//     CHECK: tensor.pad
-
-// -----
-
-// CHECK-LABEL: func @pad_non_zero_sizes
-//  CHECK-SAME: (%[[INPUT:.+]]: tensor<?x?x8xf32>, %[[LOW0:.+]]: index, %[[HIGH1:.+]]: index)
-func.func @pad_non_zero_sizes(%input: tensor<?x?x8xf32>, %low0: index, %high1: index) -> tensor<?x?x8xf32> {
-  %f0 = arith.constant 0.0 : f32
-  %0 = tensor.pad %input low[%low0, 0, 0] high[0, %high1, 0] {
-  ^bb0(%dim0: index, %dim1: index, %dim2: index):
-    tensor.yield %f0 : f32
-  } : tensor<?x?x8xf32> to tensor<?x?x8xf32>
-  return %0 : tensor<?x?x8xf32>
-}
-
-// CHECK-DAG: %[[F0:.+]] = arith.constant 0.000000e+00 : f32
-// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
-// CHECK: %[[EQ0:.+]] = arith.cmpi eq, %[[LOW0]], %[[C0]] : index
-// CHECK: %[[EQ1:.+]] = arith.cmpi eq, %[[HIGH1]], %[[C0]] : index
-// CHECK: %[[AND:.+]] = arith.andi %[[EQ0]], %[[EQ1]] : i1
-// CHECK: %[[IF:.+]] = scf.if %[[AND]] -> (tensor<?x?x8xf32>) {
-// CHECK:   scf.yield %[[INPUT]] : tensor<?x?x8xf32>
-// CHECK: } else {
-// CHECK:   %[[PAD:.+]] = tensor.pad %[[INPUT]] low[%[[LOW0]], 0, 0] high[0, %[[HIGH1]], 0]  {
-// CHECK:   ^bb0(%{{.+}}: index, %{{.+}}: index, %{{.+}}: index):
-// CHECK:     tensor.yield %[[F0]] : f32
-// CHECK:   } : tensor<?x?x8xf32> to tensor<?x?x8xf32>
-// CHECK:   scf.yield %[[PAD]] : tensor<?x?x8xf32>
-// CHECK: }
-// CHECK: return %[[IF]] : tensor<?x?x8xf32>
Index: mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
===================================================================
--- mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-//===- SplitPaddingPatterns.cpp - Splitting tensor.pad Op -----------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements patterns to wrap a tensor.pad op with an scf.if op
-/// to separate the cases where we don't need padding (all pad sizes are
-/// actually zeros) and where we indeed need padding.
-//
-//===----------------------------------------------------------------------===//
-
-#include "mlir/Dialect/Arith/IR/Arith.h"
-#include "mlir/Dialect/SCF/IR/SCF.h"
-#include "mlir/Dialect/Tensor/IR/Tensor.h"
-#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
-#include "mlir/Dialect/Utils/StaticValueUtils.h"
-#include "mlir/IR/PatternMatch.h"
-#include "llvm/Support/Debug.h"
-
-#define DEBUG_TYPE "mlir-tensor-split-padding"
-
-using namespace mlir;
-
-/// Returns true if the the given `attrOrValue` is a constant zero.
-static bool isZero(OpFoldResult attrOrValue) {
-  if (std::optional<int64_t> val = getConstantIntValue(attrOrValue))
-    return *val == 0;
-  return false;
-}
-
-/// Gets the given `attrOrValue` as a Value by creating constant ops for
-/// attributes.
-static Value getAsValue(OpFoldResult attrOrValue, OpBuilder &builder,
-                        Location loc) {
-  if (Value val = attrOrValue.dyn_cast<Value>())
-    return val;
-  auto attr = attrOrValue.get<Attribute>().cast<IntegerAttr>();
-  return builder.create<arith::ConstantIndexOp>(loc, attr.getInt());
-}
-
-namespace {
-
-struct SplitPadding final : public OpRewritePattern<tensor::PadOp> {
-  using OpRewritePattern::OpRewritePattern;
-
-  LogicalResult matchAndRewrite(tensor::PadOp padOp,
-                                PatternRewriter &rewriter) const override {
-    // Avoid infinitely applying this pattern.
-    if (padOp->getParentOfType<scf::IfOp>())
-      return failure();
-
-    // If all padding sizes are zero, we don't need to do anything.
-    SmallVector<OpFoldResult> lowPads = padOp.getMixedLowPad();
-    SmallVector<OpFoldResult> highPads = padOp.getMixedHighPad();
-    if (llvm::all_of(lowPads, isZero) && llvm::all_of(highPads, isZero))
-      return failure();
-
-    // Build the condition for the scf.if op: all pad sizes are zero.
-    Location loc = padOp.getLoc();
-    Value cstZero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
-    SmallVector<Value> eqZeroCmpVals;
-    for (OpFoldResult pad : llvm::concat<OpFoldResult>(lowPads, highPads)) {
-      if (!isZero(pad))
-        eqZeroCmpVals.push_back(rewriter.create<arith::CmpIOp>(
-            loc, arith::CmpIPredicate::eq, getAsValue(pad, rewriter, loc),
-            cstZero));
-    }
-    Value ifCond = eqZeroCmpVals.front();
-    for (Value cmp : llvm::ArrayRef(eqZeroCmpVals).drop_front())
-      ifCond = rewriter.create<arith::AndIOp>(loc, ifCond, cmp);
-
-    // Build the scf.if op itself. For the "then" branch, we can elide the
-    // padding. For the "else" branch, we retain the clone op.
-    auto thenBuilder = [&padOp](OpBuilder &builder, Location loc) {
-      builder.create<scf::YieldOp>(loc, padOp.getSource());
-    };
-    auto elseBuilder = [&padOp](OpBuilder &builder, Location loc) {
-      Operation *newOp = builder.clone(*padOp);
-      builder.create<scf::YieldOp>(loc, newOp->getResults());
-    };
-    rewriter.replaceOpWithNewOp<scf::IfOp>(padOp, ifCond, thenBuilder,
-                                           elseBuilder);
-    return success();
-  }
-};
-
-} // namespace
-
-void tensor::populateSplitPaddingPatterns(RewritePatternSet &patterns,
-                                          PatternBenefit baseBenefit) {
-  patterns.add<SplitPadding>(patterns.getContext(), baseBenefit);
-}
Index: mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
===================================================================
--- mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
+++ mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
@@ -7,7 +7,6 @@
   FoldTensorSubsetOps.cpp
   MergeConsecutiveInsertExtractSlicePatterns.cpp
   ReshapePatterns.cpp
-  SplitPaddingPatterns.cpp
   SwapExtractSliceWithProducerPatterns.cpp
 
   ADDITIONAL_HEADER_DIRS
Index: mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
===================================================================
--- mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
+++ mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
@@ -41,12 +41,6 @@
 /// ops into `patterns`.
 void populateFoldTensorSubsetOpPatterns(RewritePatternSet &patterns);
 
-/// Populates `patterns` with patterns to wrap a tensor.pad op with an scf.if op
-/// to separate the cases where we don't need padding (all pad sizes are
-/// actually zeros) and where we indeed need padding.
-void populateSplitPaddingPatterns(RewritePatternSet &patterns,
-                                  PatternBenefit baseBenefit = 1);
-
 /// Collects patterns to merge consecutive tensor.insert_slice/extract_slice
 /// into one. These patterns are in in this separate entry point because the
 /// bufferization is sensitive over IR structure, particularly those
Index: clang/docs/tools/clang-formatted-files.txt
===================================================================
--- clang/docs/tools/clang-formatted-files.txt
+++ clang/docs/tools/clang-formatted-files.txt
@@ -8250,7 +8250,6 @@
 mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
 mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp
 mlir/lib/Dialect/Tensor/Transforms/PassDetail.h
-mlir/lib/Dialect/Tensor/Transforms/SplitPadding.cpp
 mlir/lib/Dialect/Tensor/Utils/Utils.cpp
 mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
 mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D148207: [mlir][... Nicolas Vasilache via Phabricator via cfe-commits

Reply via email to