@@ -9376,10 +9376,9 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
9376
9376
9377
9377
// A pointer vector can not be the return type of the ldN intrinsics. Need to
9378
9378
// load integer vectors first and then convert to pointer vectors.
9379
- Type *EltTy = VecTy->getVectorElementType ();
9379
+ Type *EltTy = VecTy->getElementType ();
9380
9380
if (EltTy->isPointerTy())
9381
- VecTy =
9382
- VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
9381
+ VecTy = VectorType::get(DL.getIntPtrType(EltTy), VecTy->getNumElements());
9383
9382
9384
9383
IRBuilder<> Builder(LI);
9385
9384
@@ -9389,15 +9388,15 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
9389
9388
if (NumLoads > 1) {
9390
9389
// If we're going to generate more than one load, reset the sub-vector type
9391
9390
// to something legal.
9392
- VecTy = VectorType::get(VecTy->getVectorElementType (),
9393
- VecTy->getVectorNumElements () / NumLoads);
9391
+ VecTy = VectorType::get(VecTy->getElementType (),
9392
+ VecTy->getNumElements () / NumLoads);
9394
9393
9395
9394
// We will compute the pointer operand of each load from the original base
9396
9395
// address using GEPs. Cast the base address to a pointer to the scalar
9397
9396
// element type.
9398
9397
BaseAddr = Builder.CreateBitCast(
9399
- BaseAddr, VecTy->getVectorElementType()->getPointerTo(
9400
- LI->getPointerAddressSpace()));
9398
+ BaseAddr,
9399
+ VecTy->getElementType()->getPointerTo( LI->getPointerAddressSpace()));
9401
9400
}
9402
9401
9403
9402
Type *PtrTy = VecTy->getPointerTo(LI->getPointerAddressSpace());
@@ -9418,9 +9417,8 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
9418
9417
// If we're generating more than one load, compute the base address of
9419
9418
// subsequent loads as an offset from the previous.
9420
9419
if (LoadCount > 0)
9421
- BaseAddr =
9422
- Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr,
9423
- VecTy->getVectorNumElements() * Factor);
9420
+ BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr,
9421
+ VecTy->getNumElements() * Factor);
9424
9422
9425
9423
CallInst *LdN = Builder.CreateCall(
9426
9424
LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), "ldN");
@@ -9435,8 +9433,8 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
9435
9433
// Convert the integer vector to pointer vector if the element is pointer.
9436
9434
if (EltTy->isPointerTy())
9437
9435
SubVec = Builder.CreateIntToPtr(
9438
- SubVec, VectorType::get(SVI->getType()->getVectorElementType (),
9439
- VecTy->getVectorNumElements ()));
9436
+ SubVec, VectorType::get(SVI->getType()->getElementType (),
9437
+ VecTy->getNumElements ()));
9440
9438
SubVecs[SVI].push_back(SubVec);
9441
9439
}
9442
9440
}
@@ -9488,11 +9486,10 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
9488
9486
"Invalid interleave factor");
9489
9487
9490
9488
VectorType *VecTy = SVI->getType();
9491
- assert(VecTy->getVectorNumElements() % Factor == 0 &&
9492
- "Invalid interleaved store");
9489
+ assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
9493
9490
9494
- unsigned LaneLen = VecTy->getVectorNumElements () / Factor;
9495
- Type *EltTy = VecTy->getVectorElementType ();
9491
+ unsigned LaneLen = VecTy->getNumElements () / Factor;
9492
+ Type *EltTy = VecTy->getElementType ();
9496
9493
VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
9497
9494
9498
9495
const DataLayout &DL = SI->getModule()->getDataLayout();
@@ -9513,7 +9510,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
9513
9510
// vectors to integer vectors.
9514
9511
if (EltTy->isPointerTy()) {
9515
9512
Type *IntTy = DL.getIntPtrType(EltTy);
9516
- unsigned NumOpElts = Op0->getType()->getVectorNumElements ();
9513
+ unsigned NumOpElts = cast<VectorType>( Op0->getType())->getNumElements ();
9517
9514
9518
9515
// Convert to the corresponding integer vector.
9519
9516
Type *IntVecTy = VectorType::get(IntTy, NumOpElts);
@@ -9530,14 +9527,14 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
9530
9527
// If we're going to generate more than one store, reset the lane length
9531
9528
// and sub-vector type to something legal.
9532
9529
LaneLen /= NumStores;
9533
- SubVecTy = VectorType::get(SubVecTy->getVectorElementType (), LaneLen);
9530
+ SubVecTy = VectorType::get(SubVecTy->getElementType (), LaneLen);
9534
9531
9535
9532
// We will compute the pointer operand of each store from the original base
9536
9533
// address using GEPs. Cast the base address to a pointer to the scalar
9537
9534
// element type.
9538
9535
BaseAddr = Builder.CreateBitCast(
9539
- BaseAddr, SubVecTy->getVectorElementType()->getPointerTo(
9540
- SI->getPointerAddressSpace()));
9536
+ BaseAddr,
9537
+ SubVecTy->getElementType()->getPointerTo( SI->getPointerAddressSpace()));
9541
9538
}
9542
9539
9543
9540
auto Mask = SVI->getShuffleMask();
@@ -9582,7 +9579,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
9582
9579
// If we generating more than one store, we compute the base address of
9583
9580
// subsequent stores as an offset from the previous.
9584
9581
if (StoreCount > 0)
9585
- BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType (),
9582
+ BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType (),
9586
9583
BaseAddr, LaneLen * Factor);
9587
9584
9588
9585
Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
@@ -9697,7 +9694,7 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
9697
9694
return false;
9698
9695
9699
9696
// FIXME: Update this method to support scalable addressing modes.
9700
- if (Ty->isVectorTy() && Ty->getVectorIsScalable ())
9697
+ if (Ty->isVectorTy() && cast<VectorType>(Ty)->isScalable ())
9701
9698
return AM.HasBaseReg && !AM.BaseOffs && !AM.Scale;
9702
9699
9703
9700
// check reg + imm case:
0 commit comments