@@ -164,7 +164,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
164164
165165 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
166166 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
167- // FIXME: Should we be limitting the atomic size on other configs? Default is
167+ // FIXME: Should we be limiting the atomic size on other configs? Default is
168168 // 1024.
169169 if (!Subtarget.hasCmpxchg8b())
170170 setMaxAtomicSizeInBitsSupported(32);
@@ -4335,7 +4335,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
43354335// (within module) calls are supported at the moment.
43364336// To keep the stack aligned according to platform abi the function
43374337// GetAlignedArgumentStackSize ensures that argument delta is always multiples
4338- // of stack alignment. (Dynamic linkers need this - darwin 's dyld for example)
4338+ // of stack alignment. (Dynamic linkers need this - Darwin 's dyld for example)
43394339// If a tail called function callee has more arguments than the caller the
43404340// caller needs to make sure that there is room to move the RETADDR to. This is
43414341// achieved by reserving an area the size of the argument delta right after the
@@ -5723,7 +5723,7 @@ static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
57235723 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
57245724}
57255725
5726- // Helper function to collect subvector ops that are concated together,
5726+ // Helper function to collect subvector ops that are concatenated together,
57275727// either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
57285728// The subvectors in Ops are guaranteed to be the same type.
57295729static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
@@ -5898,7 +5898,7 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
58985898 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
58995899 if (SubVecNumElems * 2 == NumElems) {
59005900 // Special case, use legal zero extending insert_subvector. This allows
5901- // isel to opimitize when bits are known zero.
5901+ // isel to optimize when bits are known zero.
59025902 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
59035903 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
59045904 DAG.getConstant(0, dl, WideOpVT),
@@ -7581,7 +7581,7 @@ static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
75817581 KnownZero, DAG, Depth, ResolveKnownElts);
75827582}
75837583
7584- /// Returns the scalar element that will make up the ith
7584+ /// Returns the scalar element that will make up the i'th
75857585/// element of the result of the vector shuffle.
75867586static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
75877587 unsigned Depth) {
@@ -8437,7 +8437,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
84378437 SDValue Ld = BVOp->getSplatValue(&UndefElements);
84388438
84398439 // Attempt to use VBROADCASTM
8440- // From this paterrn :
8440+ // From this pattern :
84418441 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
84428442 // b. t1 = (build_vector t0 t0)
84438443 //
@@ -36636,7 +36636,7 @@ static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
3663636636 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
3663736637 return V;
3663836638
36639- // Recognize the IR pattern for the movmsk intrinsic under SSE1 befoer type
36639+ // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
3664036640 // legalization destroys the v4i32 type.
3664136641 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && SrcVT == MVT::v4i1 &&
3664236642 VT.isScalarInteger() && N0.getOpcode() == ISD::SETCC &&
0 commit comments