Author: Nikita Popov Date: 2025-02-11T14:15:45-08:00 New Revision: 5777d5df62a659e165b4df74aefae29ae01d2509
URL: https://github.com/llvm/llvm-project/commit/5777d5df62a659e165b4df74aefae29ae01d2509 DIFF: https://github.com/llvm/llvm-project/commit/5777d5df62a659e165b4df74aefae29ae01d2509.diff LOG: [ValueTracking] Fix bit width handling in computeKnownBits() for GEPs (#125532) For GEPs, we have three bit widths involved: The pointer bit width, the index bit width, and the bit width of the GEP operands. The correct behavior here is: * We need to sextOrTrunc the GEP operand to the index width *before* multiplying by the scale. * If the index width and pointer width differ, GEP only ever modifies the low bits. Adds should not overflow into the high bits. I'm testing this via unit tests because it's a bit tricky to test in IR with InstCombine canonicalization getting in the way. (cherry picked from commit 3bd11b502c1846afa5e1257c94b7a70566e34686) Added: Modified: llvm/lib/Analysis/ValueTracking.cpp llvm/unittests/Analysis/ValueTrackingTest.cpp Removed: ################################################################################ diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index b63a0a07f7de2..8a674914641a8 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1445,7 +1445,22 @@ static void computeKnownBitsFromOperator(const Operator *I, computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); // Accumulate the constant indices in a separate variable // to minimize the number of calls to computeForAddSub. - APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); + unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType()); + APInt AccConstIndices(IndexWidth, 0); + + auto AddIndexToKnown = [&](KnownBits IndexBits) { + if (IndexWidth == BitWidth) { + // Note that inbounds does *not* guarantee nsw for the addition, as only + // the offset is signed, while the base address is unsigned. + Known = KnownBits::add(Known, IndexBits); + } else { + // If the index width is smaller than the pointer width, only add the + // value to the low bits. + assert(IndexWidth < BitWidth && + "Index width can't be larger than pointer width"); + Known.insertBits(KnownBits::add(Known.trunc(IndexWidth), IndexBits), 0); + } + }; gep_type_iterator GTI = gep_type_begin(I); for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { @@ -1483,43 +1498,34 @@ static void computeKnownBitsFromOperator(const Operator *I, break; } - unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); - KnownBits IndexBits(IndexBitWidth); - computeKnownBits(Index, IndexBits, Depth + 1, Q); - TypeSize IndexTypeSize = GTI.getSequentialElementStride(Q.DL); - uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue(); - KnownBits ScalingFactor(IndexBitWidth); + TypeSize Stride = GTI.getSequentialElementStride(Q.DL); + uint64_t StrideInBytes = Stride.getKnownMinValue(); + if (!Stride.isScalable()) { + // Fast path for constant offset. + if (auto *CI = dyn_cast<ConstantInt>(Index)) { + AccConstIndices += + CI->getValue().sextOrTrunc(IndexWidth) * StrideInBytes; + continue; + } + } + + KnownBits IndexBits = + computeKnownBits(Index, Depth + 1, Q).sextOrTrunc(IndexWidth); + KnownBits ScalingFactor(IndexWidth); // Multiply by current sizeof type. // &A[i] == A + i * sizeof(*A[i]). - if (IndexTypeSize.isScalable()) { + if (Stride.isScalable()) { // For scalable types the only thing we know about sizeof is // that this is a multiple of the minimum size. - ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes)); - } else if (IndexBits.isConstant()) { - APInt IndexConst = IndexBits.getConstant(); - APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); - IndexConst *= ScalingFactor; - AccConstIndices += IndexConst.sextOrTrunc(BitWidth); - continue; + ScalingFactor.Zero.setLowBits(llvm::countr_zero(StrideInBytes)); } else { ScalingFactor = - KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes)); + KnownBits::makeConstant(APInt(IndexWidth, StrideInBytes)); } - IndexBits = KnownBits::mul(IndexBits, ScalingFactor); - - // If the offsets have a diff erent width from the pointer, according - // to the language reference we need to sign-extend or truncate them - // to the width of the pointer. - IndexBits = IndexBits.sextOrTrunc(BitWidth); - - // Note that inbounds does *not* guarantee nsw for the addition, as only - // the offset is signed, while the base address is unsigned. - Known = KnownBits::add(Known, IndexBits); - } - if (!Known.isUnknown() && !AccConstIndices.isZero()) { - KnownBits Index = KnownBits::makeConstant(AccConstIndices); - Known = KnownBits::add(Known, Index); + AddIndexToKnown(KnownBits::mul(IndexBits, ScalingFactor)); } + if (!Known.isUnknown() && !AccConstIndices.isZero()) + AddIndexToKnown(KnownBits::makeConstant(AccConstIndices)); break; } case Instruction::PHI: { diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp index 39865fa195cf7..50e5e0e6b2ff5 100644 --- a/llvm/unittests/Analysis/ValueTrackingTest.cpp +++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp @@ -2680,7 +2680,7 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAbsoluteSymbol) { } TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPExtendBeforeMul) { - // FIXME: The index should be extended before multiplying with the scale. + // The index should be extended before multiplying with the scale. parseAssembly(R"( target datalayout = "p:16:16:16" @@ -2692,12 +2692,12 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPExtendBeforeMul) { } )"); KnownBits Known = computeKnownBits(A, M->getDataLayout()); - EXPECT_EQ(~64 & 0x7fff, Known.Zero); - EXPECT_EQ(64, Known.One); + EXPECT_EQ(~320 & 0x7fff, Known.Zero); + EXPECT_EQ(320, Known.One); } TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPOnlyIndexBits) { - // FIXME: GEP should only affect the index width. + // GEP should only affect the index width. parseAssembly(R"( target datalayout = "p:16:16:16:8" @@ -2710,8 +2710,8 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPOnlyIndexBits) { } )"); KnownBits Known = computeKnownBits(A, M->getDataLayout()); - EXPECT_EQ(0x7eff, Known.Zero); - EXPECT_EQ(0x100, Known.One); + EXPECT_EQ(0x7fff, Known.Zero); + EXPECT_EQ(0, Known.One); } TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) { _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits