@@ -164,7 +164,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
164
164
165
165
// If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
166
166
// 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
167
- // FIXME: Should we be limitting the atomic size on other configs? Default is
167
+ // FIXME: Should we be limiting the atomic size on other configs? Default is
168
168
// 1024.
169
169
if (!Subtarget.hasCmpxchg8b())
170
170
setMaxAtomicSizeInBitsSupported(32);
@@ -4335,7 +4335,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
4335
4335
// (within module) calls are supported at the moment.
4336
4336
// To keep the stack aligned according to platform abi the function
4337
4337
// GetAlignedArgumentStackSize ensures that argument delta is always multiples
4338
- // of stack alignment. (Dynamic linkers need this - darwin 's dyld for example)
4338
+ // of stack alignment. (Dynamic linkers need this - Darwin 's dyld for example)
4339
4339
// If a tail called function callee has more arguments than the caller the
4340
4340
// caller needs to make sure that there is room to move the RETADDR to. This is
4341
4341
// achieved by reserving an area the size of the argument delta right after the
@@ -5723,7 +5723,7 @@ static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5723
5723
return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5724
5724
}
5725
5725
5726
- // Helper function to collect subvector ops that are concated together,
5726
+ // Helper function to collect subvector ops that are concatenated together,
5727
5727
// either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5728
5728
// The subvectors in Ops are guaranteed to be the same type.
5729
5729
static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
@@ -5898,7 +5898,7 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5898
5898
DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5899
5899
if (SubVecNumElems * 2 == NumElems) {
5900
5900
// Special case, use legal zero extending insert_subvector. This allows
5901
- // isel to opimitize when bits are known zero.
5901
+ // isel to optimize when bits are known zero.
5902
5902
Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
5903
5903
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5904
5904
DAG.getConstant(0, dl, WideOpVT),
@@ -7581,7 +7581,7 @@ static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7581
7581
KnownZero, DAG, Depth, ResolveKnownElts);
7582
7582
}
7583
7583
7584
- /// Returns the scalar element that will make up the ith
7584
+ /// Returns the scalar element that will make up the i'th
7585
7585
/// element of the result of the vector shuffle.
7586
7586
static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
7587
7587
unsigned Depth) {
@@ -8437,7 +8437,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
8437
8437
SDValue Ld = BVOp->getSplatValue(&UndefElements);
8438
8438
8439
8439
// Attempt to use VBROADCASTM
8440
- // From this paterrn :
8440
+ // From this pattern :
8441
8441
// a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
8442
8442
// b. t1 = (build_vector t0 t0)
8443
8443
//
@@ -36636,7 +36636,7 @@ static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
36636
36636
if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
36637
36637
return V;
36638
36638
36639
- // Recognize the IR pattern for the movmsk intrinsic under SSE1 befoer type
36639
+ // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
36640
36640
// legalization destroys the v4i32 type.
36641
36641
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && SrcVT == MVT::v4i1 &&
36642
36642
VT.isScalarInteger() && N0.getOpcode() == ISD::SETCC &&
0 commit comments