@@ -327,6 +327,7 @@ class TypePromotionTransaction;
327
327
// FIXME: When we can selectively preserve passes, preserve the domtree.
328
328
AU.addRequired <ProfileSummaryInfoWrapperPass>();
329
329
AU.addRequired <TargetLibraryInfoWrapperPass>();
330
+ AU.addRequired <TargetPassConfig>();
330
331
AU.addRequired <TargetTransformInfoWrapperPass>();
331
332
AU.addRequired <LoopInfoWrapperPass>();
332
333
}
@@ -428,12 +429,10 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
428
429
InsertedInsts.clear ();
429
430
PromotedInsts.clear ();
430
431
431
- if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
432
- TM = &TPC->getTM <TargetMachine>();
433
- SubtargetInfo = TM->getSubtargetImpl (F);
434
- TLI = SubtargetInfo->getTargetLowering ();
435
- TRI = SubtargetInfo->getRegisterInfo ();
436
- }
432
+ TM = &getAnalysis<TargetPassConfig>().getTM <TargetMachine>();
433
+ SubtargetInfo = TM->getSubtargetImpl (F);
434
+ TLI = SubtargetInfo->getTargetLowering ();
435
+ TRI = SubtargetInfo->getRegisterInfo ();
437
436
TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI (F);
438
437
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI (F);
439
438
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo ();
@@ -450,10 +449,9 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
450
449
451
450
// / This optimization identifies DIV instructions that can be
452
451
// / profitably bypassed and carried out with a shorter, faster divide.
453
- if (!OptSize && !PSI->hasHugeWorkingSetSize () && TLI &&
454
- TLI->isSlowDivBypassed ()) {
452
+ if (!OptSize && !PSI->hasHugeWorkingSetSize () && TLI->isSlowDivBypassed ()) {
455
453
const DenseMap<unsigned int , unsigned int > &BypassWidths =
456
- TLI->getBypassSlowDivWidths ();
454
+ TLI->getBypassSlowDivWidths ();
457
455
BasicBlock* BB = &*F.begin ();
458
456
while (BB != nullptr ) {
459
457
// bypassSlowDivision may create new BBs, but we don't want to reapply the
@@ -1813,7 +1811,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
1813
1811
const TargetLowering *TLI,
1814
1812
const DataLayout *DL,
1815
1813
bool &ModifiedDT) {
1816
- if (!TLI || ! DL)
1814
+ if (!DL)
1817
1815
return false ;
1818
1816
1819
1817
// If a zero input is undefined, it doesn't make sense to despeculate that.
@@ -1877,7 +1875,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
1877
1875
// Lower inline assembly if we can.
1878
1876
// If we found an inline asm expession, and if the target knows how to
1879
1877
// lower it to normal LLVM code, do so now.
1880
- if (TLI && isa<InlineAsm>(CI->getCalledValue ())) {
1878
+ if (isa<InlineAsm>(CI->getCalledValue ())) {
1881
1879
if (TLI->ExpandInlineAsm (CI)) {
1882
1880
// Avoid invalidating the iterator.
1883
1881
CurInstIterator = BB->begin ();
@@ -1894,7 +1892,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
1894
1892
// Align the pointer arguments to this call if the target thinks it's a good
1895
1893
// idea
1896
1894
unsigned MinSize, PrefAlign;
1897
- if (TLI && TLI ->shouldAlignPointerArgs (CI, MinSize, PrefAlign)) {
1895
+ if (TLI->shouldAlignPointerArgs (CI, MinSize, PrefAlign)) {
1898
1896
for (auto &Arg : CI->arg_operands ()) {
1899
1897
// We want to align both objects whose address is used directly and
1900
1898
// objects whose address is used in casts and GEPs, though it only makes
@@ -2028,17 +2026,15 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
2028
2026
}
2029
2027
}
2030
2028
2031
- if (TLI) {
2032
- SmallVector<Value*, 2 > PtrOps;
2033
- Type *AccessTy;
2034
- if (TLI->getAddrModeArguments (II, PtrOps, AccessTy))
2035
- while (!PtrOps.empty ()) {
2036
- Value *PtrVal = PtrOps.pop_back_val ();
2037
- unsigned AS = PtrVal->getType ()->getPointerAddressSpace ();
2038
- if (optimizeMemoryInst (II, PtrVal, AccessTy, AS))
2039
- return true ;
2040
- }
2041
- }
2029
+ SmallVector<Value *, 2 > PtrOps;
2030
+ Type *AccessTy;
2031
+ if (TLI->getAddrModeArguments (II, PtrOps, AccessTy))
2032
+ while (!PtrOps.empty ()) {
2033
+ Value *PtrVal = PtrOps.pop_back_val ();
2034
+ unsigned AS = PtrVal->getType ()->getPointerAddressSpace ();
2035
+ if (optimizeMemoryInst (II, PtrVal, AccessTy, AS))
2036
+ return true ;
2037
+ }
2042
2038
}
2043
2039
2044
2040
// From here on out we're working with named functions.
@@ -2089,9 +2085,6 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
2089
2085
// / ret i32 %tmp2
2090
2086
// / @endcode
2091
2087
bool CodeGenPrepare::dupRetToEnableTailCallOpts (BasicBlock *BB, bool &ModifiedDT) {
2092
- if (!TLI)
2093
- return false ;
2094
-
2095
2088
ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator ());
2096
2089
if (!RetI)
2097
2090
return false ;
@@ -4907,7 +4900,7 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
4907
4900
if (SunkAddr->getType () != Addr->getType ())
4908
4901
SunkAddr = Builder.CreatePointerCast (SunkAddr, Addr->getType ());
4909
4902
} else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences () &&
4910
- TM && SubtargetInfo->addrSinkUsingGEPs ())) {
4903
+ SubtargetInfo->addrSinkUsingGEPs ())) {
4911
4904
// By default, we use the GEP-based method when AA is used later. This
4912
4905
// prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
4913
4906
LLVM_DEBUG (dbgs () << " CGP: SINKING nonlocal addrmode: " << AddrMode
@@ -5258,7 +5251,7 @@ bool CodeGenPrepare::tryToPromoteExts(
5258
5251
// this check inside the for loop is to catch the case where an extension
5259
5252
// is directly fed by a load because in such case the extension can be moved
5260
5253
// up without any promotion on its operands.
5261
- if (!TLI || !TLI ->enableExtLdPromotion () || DisableExtLdPromotion)
5254
+ if (!TLI->enableExtLdPromotion () || DisableExtLdPromotion)
5262
5255
return false ;
5263
5256
5264
5257
// Get the action to perform the promotion.
@@ -5583,11 +5576,6 @@ bool CodeGenPrepare::canFormExtLd(
5583
5576
// / \p Inst[in/out] the extension may be modified during the process if some
5584
5577
// / promotions apply.
5585
5578
bool CodeGenPrepare::optimizeExt (Instruction *&Inst) {
5586
- // ExtLoad formation and address type promotion infrastructure requires TLI to
5587
- // be effective.
5588
- if (!TLI)
5589
- return false ;
5590
-
5591
5579
bool AllowPromotionWithoutCommonHeader = false ;
5592
5580
// / See if it is an interesting sext operations for the address type
5593
5581
// / promotion before trying to promote it, e.g., the ones with the right
@@ -5717,7 +5705,7 @@ bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
5717
5705
return false ;
5718
5706
5719
5707
// Only do this xform if truncating is free.
5720
- if (TLI && !TLI->isTruncateFree (I->getType (), Src->getType ()))
5708
+ if (!TLI->isTruncateFree (I->getType (), Src->getType ()))
5721
5709
return false ;
5722
5710
5723
5711
// Only safe to perform the optimization if the source is also defined in
@@ -6064,9 +6052,8 @@ bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
6064
6052
// / turn it into a branch.
6065
6053
bool CodeGenPrepare::optimizeSelectInst (SelectInst *SI) {
6066
6054
// If branch conversion isn't desirable, exit early.
6067
- if (DisableSelectToBranch ||
6068
- OptSize || llvm::shouldOptimizeForSize (SI->getParent (), PSI, BFI.get ()) ||
6069
- !TLI)
6055
+ if (DisableSelectToBranch || OptSize ||
6056
+ llvm::shouldOptimizeForSize (SI->getParent (), PSI, BFI.get ()))
6070
6057
return false ;
6071
6058
6072
6059
// Find all consecutive select instructions that share the same condition.
@@ -6252,7 +6239,7 @@ bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
6252
6239
BasicBlock *DefBB = SVI->getParent ();
6253
6240
6254
6241
// Only do this xform if variable vector shifts are particularly expensive.
6255
- if (!TLI || !TLI ->isVectorShiftByScalarCheap (SVI->getType ()))
6242
+ if (!TLI->isVectorShiftByScalarCheap (SVI->getType ()))
6256
6243
return false ;
6257
6244
6258
6245
// We only expect better codegen by sinking a shuffle if we can recognise a
@@ -6304,7 +6291,7 @@ bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
6304
6291
// If the operands of I can be folded into a target instruction together with
6305
6292
// I, duplicate and sink them.
6306
6293
SmallVector<Use *, 4 > OpsToSink;
6307
- if (!TLI || !TLI ->shouldSinkOperands (I, OpsToSink))
6294
+ if (!TLI->shouldSinkOperands (I, OpsToSink))
6308
6295
return false ;
6309
6296
6310
6297
// OpsToSink can contain multiple uses in a use chain (e.g.
@@ -6357,7 +6344,7 @@ bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
6357
6344
}
6358
6345
6359
6346
bool CodeGenPrepare::optimizeSwitchInst (SwitchInst *SI) {
6360
- if (!TLI || ! DL)
6347
+ if (!DL)
6361
6348
return false ;
6362
6349
6363
6350
Value *Cond = SI->getCondition ();
@@ -6723,7 +6710,7 @@ void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
6723
6710
// / has this feature and this is profitable.
6724
6711
bool CodeGenPrepare::optimizeExtractElementInst (Instruction *Inst) {
6725
6712
unsigned CombineCost = std::numeric_limits<unsigned >::max ();
6726
- if (DisableStoreExtract || !TLI ||
6713
+ if (DisableStoreExtract ||
6727
6714
(!StressStoreExtract &&
6728
6715
!TLI->canCombineStoreAndExtract (Inst->getOperand (0 )->getType (),
6729
6716
Inst->getOperand (1 ), CombineCost)))
@@ -7092,16 +7079,15 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
7092
7079
if (isa<Constant>(CI->getOperand (0 )))
7093
7080
return false ;
7094
7081
7095
- if (TLI && OptimizeNoopCopyExpression (CI, *TLI, *DL))
7082
+ if (OptimizeNoopCopyExpression (CI, *TLI, *DL))
7096
7083
return true ;
7097
7084
7098
7085
if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
7099
7086
// / Sink a zext or sext into its user blocks if the target type doesn't
7100
7087
// / fit in one register
7101
- if (TLI &&
7102
- TLI->getTypeAction (CI->getContext (),
7088
+ if (TLI->getTypeAction (CI->getContext (),
7103
7089
TLI->getValueType (*DL, CI->getType ())) ==
7104
- TargetLowering::TypeExpandInteger) {
7090
+ TargetLowering::TypeExpandInteger) {
7105
7091
return SinkCast (CI);
7106
7092
} else {
7107
7093
bool MadeChange = optimizeExt (I);
@@ -7112,30 +7098,24 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
7112
7098
}
7113
7099
7114
7100
if (auto *Cmp = dyn_cast<CmpInst>(I))
7115
- if (TLI && optimizeCmp (Cmp, ModifiedDT))
7101
+ if (optimizeCmp (Cmp, ModifiedDT))
7116
7102
return true ;
7117
7103
7118
7104
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
7119
7105
LI->setMetadata (LLVMContext::MD_invariant_group, nullptr );
7120
- if (TLI) {
7121
- bool Modified = optimizeLoadExt (LI);
7122
- unsigned AS = LI->getPointerAddressSpace ();
7123
- Modified |= optimizeMemoryInst (I, I->getOperand (0 ), LI->getType (), AS);
7124
- return Modified;
7125
- }
7126
- return false ;
7106
+ bool Modified = optimizeLoadExt (LI);
7107
+ unsigned AS = LI->getPointerAddressSpace ();
7108
+ Modified |= optimizeMemoryInst (I, I->getOperand (0 ), LI->getType (), AS);
7109
+ return Modified;
7127
7110
}
7128
7111
7129
7112
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
7130
- if (TLI && splitMergedValStore (*SI, *DL, *TLI))
7113
+ if (splitMergedValStore (*SI, *DL, *TLI))
7131
7114
return true ;
7132
7115
SI->setMetadata (LLVMContext::MD_invariant_group, nullptr );
7133
- if (TLI) {
7134
- unsigned AS = SI->getPointerAddressSpace ();
7135
- return optimizeMemoryInst (I, SI->getOperand (1 ),
7136
- SI->getOperand (0 )->getType (), AS);
7137
- }
7138
- return false ;
7116
+ unsigned AS = SI->getPointerAddressSpace ();
7117
+ return optimizeMemoryInst (I, SI->getOperand (1 ),
7118
+ SI->getOperand (0 )->getType (), AS);
7139
7119
}
7140
7120
7141
7121
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
@@ -7152,15 +7132,14 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
7152
7132
7153
7133
BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
7154
7134
7155
- if (BinOp && (BinOp->getOpcode () == Instruction::And) &&
7156
- EnableAndCmpSinking && TLI)
7135
+ if (BinOp && (BinOp->getOpcode () == Instruction::And) && EnableAndCmpSinking)
7157
7136
return sinkAndCmp0Expression (BinOp, *TLI, InsertedInsts);
7158
7137
7159
7138
// TODO: Move this into the switch on opcode - it handles shifts already.
7160
7139
if (BinOp && (BinOp->getOpcode () == Instruction::AShr ||
7161
7140
BinOp->getOpcode () == Instruction::LShr)) {
7162
7141
ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand (1 ));
7163
- if (TLI && CI && TLI->hasExtractBitsInsn ())
7142
+ if (CI && TLI->hasExtractBitsInsn ())
7164
7143
if (OptimizeExtractBits (BinOp, CI, *TLI, *DL))
7165
7144
return true ;
7166
7145
}
@@ -7239,7 +7218,7 @@ bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) {
7239
7218
}
7240
7219
7241
7220
bool MadeBitReverse = true ;
7242
- while (TLI && MadeBitReverse) {
7221
+ while (MadeBitReverse) {
7243
7222
MadeBitReverse = false ;
7244
7223
for (auto &I : reverse (BB)) {
7245
7224
if (makeBitReverse (I, *DL, *TLI)) {
@@ -7351,7 +7330,7 @@ static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
7351
7330
// / FIXME: Remove the (equivalent?) implementation in SelectionDAG.
7352
7331
// /
7353
7332
bool CodeGenPrepare::splitBranchCondition (Function &F, bool &ModifiedDT) {
7354
- if (!TM || !TM ->Options .EnableFastISel || !TLI || TLI->isJumpExpensive ())
7333
+ if (!TM->Options .EnableFastISel || TLI->isJumpExpensive ())
7355
7334
return false ;
7356
7335
7357
7336
bool MadeChange = false ;
0 commit comments