Skip to content

Commit 5a56a25

Browse files
committed
[CodeGenPrepare] Make TargetPassConfig required
The code paths in the absence of TargetMachine, TargetLowering or TargetRegisterInfo are poorly tested. As rL285987 said, requiring TargetPassConfig allows us to delete many (untested) checks littered everywhere. Reviewed By: arsenm Differential Revision: https://reviews.llvm.org/D73754
1 parent 8ebe001 commit 5a56a25

13 files changed

+59
-80
lines changed

llvm/lib/CodeGen/CodeGenPrepare.cpp

Lines changed: 44 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -327,6 +327,7 @@ class TypePromotionTransaction;
327327
// FIXME: When we can selectively preserve passes, preserve the domtree.
328328
AU.addRequired<ProfileSummaryInfoWrapperPass>();
329329
AU.addRequired<TargetLibraryInfoWrapperPass>();
330+
AU.addRequired<TargetPassConfig>();
330331
AU.addRequired<TargetTransformInfoWrapperPass>();
331332
AU.addRequired<LoopInfoWrapperPass>();
332333
}
@@ -428,12 +429,10 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
428429
InsertedInsts.clear();
429430
PromotedInsts.clear();
430431

431-
if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
432-
TM = &TPC->getTM<TargetMachine>();
433-
SubtargetInfo = TM->getSubtargetImpl(F);
434-
TLI = SubtargetInfo->getTargetLowering();
435-
TRI = SubtargetInfo->getRegisterInfo();
436-
}
432+
TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
433+
SubtargetInfo = TM->getSubtargetImpl(F);
434+
TLI = SubtargetInfo->getTargetLowering();
435+
TRI = SubtargetInfo->getRegisterInfo();
437436
TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
438437
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
439438
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
@@ -450,10 +449,9 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
450449

451450
/// This optimization identifies DIV instructions that can be
452451
/// profitably bypassed and carried out with a shorter, faster divide.
453-
if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI &&
454-
TLI->isSlowDivBypassed()) {
452+
if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
455453
const DenseMap<unsigned int, unsigned int> &BypassWidths =
456-
TLI->getBypassSlowDivWidths();
454+
TLI->getBypassSlowDivWidths();
457455
BasicBlock* BB = &*F.begin();
458456
while (BB != nullptr) {
459457
// bypassSlowDivision may create new BBs, but we don't want to reapply the
@@ -1813,7 +1811,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
18131811
const TargetLowering *TLI,
18141812
const DataLayout *DL,
18151813
bool &ModifiedDT) {
1816-
if (!TLI || !DL)
1814+
if (!DL)
18171815
return false;
18181816

18191817
// If a zero input is undefined, it doesn't make sense to despeculate that.
@@ -1877,7 +1875,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
18771875
// Lower inline assembly if we can.
18781876
// If we found an inline asm expession, and if the target knows how to
18791877
// lower it to normal LLVM code, do so now.
1880-
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
1878+
if (isa<InlineAsm>(CI->getCalledValue())) {
18811879
if (TLI->ExpandInlineAsm(CI)) {
18821880
// Avoid invalidating the iterator.
18831881
CurInstIterator = BB->begin();
@@ -1894,7 +1892,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
18941892
// Align the pointer arguments to this call if the target thinks it's a good
18951893
// idea
18961894
unsigned MinSize, PrefAlign;
1897-
if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
1895+
if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
18981896
for (auto &Arg : CI->arg_operands()) {
18991897
// We want to align both objects whose address is used directly and
19001898
// objects whose address is used in casts and GEPs, though it only makes
@@ -2028,17 +2026,15 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
20282026
}
20292027
}
20302028

2031-
if (TLI) {
2032-
SmallVector<Value*, 2> PtrOps;
2033-
Type *AccessTy;
2034-
if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2035-
while (!PtrOps.empty()) {
2036-
Value *PtrVal = PtrOps.pop_back_val();
2037-
unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2038-
if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2039-
return true;
2040-
}
2041-
}
2029+
SmallVector<Value *, 2> PtrOps;
2030+
Type *AccessTy;
2031+
if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2032+
while (!PtrOps.empty()) {
2033+
Value *PtrVal = PtrOps.pop_back_val();
2034+
unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2035+
if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2036+
return true;
2037+
}
20422038
}
20432039

20442040
// From here on out we're working with named functions.
@@ -2089,9 +2085,6 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
20892085
/// ret i32 %tmp2
20902086
/// @endcode
20912087
bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) {
2092-
if (!TLI)
2093-
return false;
2094-
20952088
ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
20962089
if (!RetI)
20972090
return false;
@@ -4907,7 +4900,7 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
49074900
if (SunkAddr->getType() != Addr->getType())
49084901
SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
49094902
} else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
4910-
TM && SubtargetInfo->addrSinkUsingGEPs())) {
4903+
SubtargetInfo->addrSinkUsingGEPs())) {
49114904
// By default, we use the GEP-based method when AA is used later. This
49124905
// prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
49134906
LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
@@ -5258,7 +5251,7 @@ bool CodeGenPrepare::tryToPromoteExts(
52585251
// this check inside the for loop is to catch the case where an extension
52595252
// is directly fed by a load because in such case the extension can be moved
52605253
// up without any promotion on its operands.
5261-
if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion)
5254+
if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
52625255
return false;
52635256

52645257
// Get the action to perform the promotion.
@@ -5583,11 +5576,6 @@ bool CodeGenPrepare::canFormExtLd(
55835576
/// \p Inst[in/out] the extension may be modified during the process if some
55845577
/// promotions apply.
55855578
bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
5586-
// ExtLoad formation and address type promotion infrastructure requires TLI to
5587-
// be effective.
5588-
if (!TLI)
5589-
return false;
5590-
55915579
bool AllowPromotionWithoutCommonHeader = false;
55925580
/// See if it is an interesting sext operations for the address type
55935581
/// promotion before trying to promote it, e.g., the ones with the right
@@ -5717,7 +5705,7 @@ bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
57175705
return false;
57185706

57195707
// Only do this xform if truncating is free.
5720-
if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
5708+
if (!TLI->isTruncateFree(I->getType(), Src->getType()))
57215709
return false;
57225710

57235711
// Only safe to perform the optimization if the source is also defined in
@@ -6064,9 +6052,8 @@ bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
60646052
/// turn it into a branch.
60656053
bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
60666054
// If branch conversion isn't desirable, exit early.
6067-
if (DisableSelectToBranch ||
6068-
OptSize || llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()) ||
6069-
!TLI)
6055+
if (DisableSelectToBranch || OptSize ||
6056+
llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))
60706057
return false;
60716058

60726059
// Find all consecutive select instructions that share the same condition.
@@ -6252,7 +6239,7 @@ bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
62526239
BasicBlock *DefBB = SVI->getParent();
62536240

62546241
// Only do this xform if variable vector shifts are particularly expensive.
6255-
if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
6242+
if (!TLI->isVectorShiftByScalarCheap(SVI->getType()))
62566243
return false;
62576244

62586245
// We only expect better codegen by sinking a shuffle if we can recognise a
@@ -6304,7 +6291,7 @@ bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
63046291
// If the operands of I can be folded into a target instruction together with
63056292
// I, duplicate and sink them.
63066293
SmallVector<Use *, 4> OpsToSink;
6307-
if (!TLI || !TLI->shouldSinkOperands(I, OpsToSink))
6294+
if (!TLI->shouldSinkOperands(I, OpsToSink))
63086295
return false;
63096296

63106297
// OpsToSink can contain multiple uses in a use chain (e.g.
@@ -6357,7 +6344,7 @@ bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
63576344
}
63586345

63596346
bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
6360-
if (!TLI || !DL)
6347+
if (!DL)
63616348
return false;
63626349

63636350
Value *Cond = SI->getCondition();
@@ -6723,7 +6710,7 @@ void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
67236710
/// has this feature and this is profitable.
67246711
bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
67256712
unsigned CombineCost = std::numeric_limits<unsigned>::max();
6726-
if (DisableStoreExtract || !TLI ||
6713+
if (DisableStoreExtract ||
67276714
(!StressStoreExtract &&
67286715
!TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
67296716
Inst->getOperand(1), CombineCost)))
@@ -7092,16 +7079,15 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
70927079
if (isa<Constant>(CI->getOperand(0)))
70937080
return false;
70947081

7095-
if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL))
7082+
if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
70967083
return true;
70977084

70987085
if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
70997086
/// Sink a zext or sext into its user blocks if the target type doesn't
71007087
/// fit in one register
7101-
if (TLI &&
7102-
TLI->getTypeAction(CI->getContext(),
7088+
if (TLI->getTypeAction(CI->getContext(),
71037089
TLI->getValueType(*DL, CI->getType())) ==
7104-
TargetLowering::TypeExpandInteger) {
7090+
TargetLowering::TypeExpandInteger) {
71057091
return SinkCast(CI);
71067092
} else {
71077093
bool MadeChange = optimizeExt(I);
@@ -7112,30 +7098,24 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
71127098
}
71137099

71147100
if (auto *Cmp = dyn_cast<CmpInst>(I))
7115-
if (TLI && optimizeCmp(Cmp, ModifiedDT))
7101+
if (optimizeCmp(Cmp, ModifiedDT))
71167102
return true;
71177103

71187104
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
71197105
LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
7120-
if (TLI) {
7121-
bool Modified = optimizeLoadExt(LI);
7122-
unsigned AS = LI->getPointerAddressSpace();
7123-
Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
7124-
return Modified;
7125-
}
7126-
return false;
7106+
bool Modified = optimizeLoadExt(LI);
7107+
unsigned AS = LI->getPointerAddressSpace();
7108+
Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
7109+
return Modified;
71277110
}
71287111

71297112
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
7130-
if (TLI && splitMergedValStore(*SI, *DL, *TLI))
7113+
if (splitMergedValStore(*SI, *DL, *TLI))
71317114
return true;
71327115
SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
7133-
if (TLI) {
7134-
unsigned AS = SI->getPointerAddressSpace();
7135-
return optimizeMemoryInst(I, SI->getOperand(1),
7136-
SI->getOperand(0)->getType(), AS);
7137-
}
7138-
return false;
7116+
unsigned AS = SI->getPointerAddressSpace();
7117+
return optimizeMemoryInst(I, SI->getOperand(1),
7118+
SI->getOperand(0)->getType(), AS);
71397119
}
71407120

71417121
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
@@ -7152,15 +7132,14 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
71527132

71537133
BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
71547134

7155-
if (BinOp && (BinOp->getOpcode() == Instruction::And) &&
7156-
EnableAndCmpSinking && TLI)
7135+
if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking)
71577136
return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
71587137

71597138
// TODO: Move this into the switch on opcode - it handles shifts already.
71607139
if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
71617140
BinOp->getOpcode() == Instruction::LShr)) {
71627141
ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
7163-
if (TLI && CI && TLI->hasExtractBitsInsn())
7142+
if (CI && TLI->hasExtractBitsInsn())
71647143
if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
71657144
return true;
71667145
}
@@ -7239,7 +7218,7 @@ bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) {
72397218
}
72407219

72417220
bool MadeBitReverse = true;
7242-
while (TLI && MadeBitReverse) {
7221+
while (MadeBitReverse) {
72437222
MadeBitReverse = false;
72447223
for (auto &I : reverse(BB)) {
72457224
if (makeBitReverse(I, *DL, *TLI)) {
@@ -7351,7 +7330,7 @@ static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
73517330
/// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
73527331
///
73537332
bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) {
7354-
if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive())
7333+
if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
73557334
return false;
73567335

73577336
bool MadeChange = false;

llvm/test/CodeGen/AArch64/sve-vscale.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
; RUN: llc -mtriple aarch64 -mattr=+sve -asm-verbose=0 < %s | FileCheck %s
2-
; RUN: opt -codegenprepare -S < %s | llc -mtriple aarch64 -mattr=+sve -asm-verbose=0 | FileCheck %s
2+
; RUN: opt -mtriple=aarch64 -codegenprepare -S < %s | llc -mtriple=aarch64 -mattr=+sve -asm-verbose=0 | FileCheck %s
33

44
;
55
; RDVL

llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
; Test that CodeGenPrepare respect the volatile flag when splitting a store.
22
;
3-
; RUN: opt -S -codegenprepare -force-split-store < %s | FileCheck %s
3+
; RUN: opt -S -mtriple=powerpc64le -codegenprepare -force-split-store < %s | FileCheck %s
44

55
define void @fun(i16* %Src, i16* %Dst) {
66
; CHECK: store volatile i16 %8, i16* %Dst

llvm/test/CodeGen/Generic/dont-remove-empty-preheader.ll renamed to llvm/test/CodeGen/X86/dont-remove-empty-preheader.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; RUN: opt -codegenprepare -S < %s | FileCheck %s
1+
; RUN: opt -mtriple=x86_64 -codegenprepare -S < %s | FileCheck %s
22
; CHECK: for.body.preheader
33

44
@N = common global i32 0, align 4

llvm/test/DebugInfo/Generic/codegenprep-value.ll renamed to llvm/test/DebugInfo/X86/codegenprep-value.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; RUN: opt -codegenprepare -S %s | FileCheck %s
1+
; RUN: opt -S -mtriple=x86_64 -codegenprepare %s | FileCheck %s
22
;
33
; Generated from the following source with:
44
; clang -O2 -g -S -emit-llvm -mllvm -stop-after=indirectbr-expand test.cpp

llvm/test/DebugInfo/Generic/sunk-compare.ll renamed to llvm/test/DebugInfo/X86/sunk-compare.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; RUN: opt -S -codegenprepare < %s | FileCheck %s
1+
; RUN: opt -S -mtriple=x86_64 -codegenprepare < %s | FileCheck %s
22
;
33
; This test case has been generated by hand but is inspired by the
44
; observation that compares that are sunk into the basic blocks where

llvm/test/Other/2007-04-24-eliminate-mostly-empty-blocks.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
;RUN: opt < %s -codegenprepare -S | FileCheck %s
1+
;RUN: opt < %s -codegenprepare -S -mtriple=x86_64 | FileCheck %s
22

33
;CHECK: define void @foo()
44
;CHECK-NEXT: entry:

llvm/test/Transforms/CodeGenPrepare/bitreverse-hang.ll renamed to llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; RUN: opt < %s -loop-unroll -codegenprepare -S | FileCheck %s
1+
; RUN: opt < %s -loop-unroll -codegenprepare -S -mtriple=x86_64 | FileCheck %s
22

33
; This test is a worst-case scenario for bitreversal/byteswap detection.
44
; After loop unrolling (the unrolled loop is unreadably large so it has been kept

llvm/test/Transforms/CodeGenPrepare/gep-unmerging.ll renamed to llvm/test/Transforms/CodeGenPrepare/X86/gep-unmerging.ll

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; RUN: opt -codegenprepare -S < %s | FileCheck %s
1+
; RUN: opt -codegenprepare -S -mtriple=x86_64 < %s | FileCheck %s
22

33
@exit_addr = constant i8* blockaddress(@gep_unmerging, %exit)
44
@op1_addr = constant i8* blockaddress(@gep_unmerging, %op1)
@@ -25,8 +25,8 @@ entry:
2525

2626
op1:
2727
; CHECK-LABEL: op1:
28-
; CHECK-NEXT: %p1_inc2 = getelementptr i8, i8* %p_postinc, i64 2
29-
; CHECK-NEXT: %p1_inc1 = getelementptr i8, i8* %p_postinc, i64 1
28+
; CHECK-NEXT: %p1_inc2 = getelementptr i8, i8* %p_preinc, i64 3
29+
; CHECK-NEXT: %p1_inc1 = getelementptr i8, i8* %p_preinc, i64 2
3030
%p1_inc2 = getelementptr i8, i8* %p_preinc, i64 3
3131
%p1_inc1 = getelementptr i8, i8* %p_preinc, i64 2
3232
%a10 = load i8, i8* %p_postinc
@@ -37,7 +37,7 @@ op1:
3737

3838
op2:
3939
; CHECK-LABEL: op2:
40-
; CHECK-NEXT: %p2_inc = getelementptr i8, i8* %p_postinc, i64 1
40+
; CHECK-NEXT: %p2_inc = getelementptr i8, i8* %p_preinc, i64 2
4141
%p2_inc = getelementptr i8, i8* %p_preinc, i64 2
4242
%a2 = load i8, i8* %p_postinc
4343
store i8 %a2, i8* @dummy

llvm/test/Transforms/CodeGenPrepare/invariant.group.ll renamed to llvm/test/Transforms/CodeGenPrepare/X86/invariant.group.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; RUN: opt -codegenprepare -S < %s | FileCheck %s
1+
; RUN: opt -codegenprepare -S -mtriple=x86_64 < %s | FileCheck %s
22

33
@tmp = global i8 0
44

0 commit comments

Comments
 (0)