@@ -338,13 +338,15 @@ static RegisterPass<MemCpyOpt> X("memcpyopt",
338
338
bool MemCpyOpt::processStore (StoreInst *SI, BasicBlock::iterator& BBI) {
339
339
if (SI->isVolatile ()) return false ;
340
340
341
+ LLVMContext &Context = SI->getContext ();
342
+
341
343
// There are two cases that are interesting for this code to handle: memcpy
342
344
// and memset. Right now we only handle memset.
343
345
344
346
// Ensure that the value being stored is something that can be memset'able a
345
347
// byte at a time like "0" or "-1" or any width, as well as things like
346
348
// 0xA0A0A0A0 and 0.0.
347
- Value *ByteVal = isBytewiseValue (SI->getOperand (0 ), SI-> getContext () );
349
+ Value *ByteVal = isBytewiseValue (SI->getOperand (0 ), Context );
348
350
if (!ByteVal)
349
351
return false ;
350
352
@@ -385,8 +387,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
385
387
if (NextStore->isVolatile ()) break ;
386
388
387
389
// Check to see if this stored value is of the same byte-splattable value.
388
- if (ByteVal != isBytewiseValue (NextStore->getOperand (0 ),
389
- NextStore->getContext ()))
390
+ if (ByteVal != isBytewiseValue (NextStore->getOperand (0 ), Context))
390
391
break ;
391
392
392
393
// Check to see if this store is to a constant offset from the start ptr.
@@ -406,7 +407,6 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
406
407
// store as well. We try to avoid this unless there is at least something
407
408
// interesting as a small compile-time optimization.
408
409
Ranges.addStore (0 , SI);
409
-
410
410
411
411
Function *MemSetF = 0 ;
412
412
@@ -430,28 +430,25 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
430
430
BasicBlock::iterator InsertPt = BI;
431
431
432
432
if (MemSetF == 0 ) {
433
- const Type *Tys[] = {Type::getInt64Ty (SI->getContext ())};
434
- MemSetF = Intrinsic::getDeclaration (M, Intrinsic::memset,
435
- Tys, 1 );
436
- }
433
+ const Type *Ty = Type::getInt64Ty (Context);
434
+ MemSetF = Intrinsic::getDeclaration (M, Intrinsic::memset, &Ty, 1 );
435
+ }
437
436
438
437
// Get the starting pointer of the block.
439
438
StartPtr = Range.StartPtr ;
440
439
441
440
// Cast the start ptr to be i8* as memset requires.
442
- const Type *i8Ptr =
443
- PointerType::getUnqual (Type::getInt8Ty (SI->getContext ()));
441
+ const Type *i8Ptr = PointerType::getUnqual (Type::getInt8Ty (Context));
444
442
if (StartPtr->getType () != i8Ptr)
445
443
StartPtr = new BitCastInst (StartPtr, i8Ptr, StartPtr->getName (),
446
444
InsertPt);
447
445
448
446
Value *Ops[] = {
449
447
StartPtr, ByteVal, // Start, value
450
448
// size
451
- ConstantInt::get (Type::getInt64Ty (SI->getContext ()),
452
- Range.End -Range.Start ),
449
+ ConstantInt::get (Type::getInt64Ty (Context), Range.End -Range.Start ),
453
450
// align
454
- ConstantInt::get (Type::getInt32Ty (SI-> getContext () ), Range.Alignment )
451
+ ConstantInt::get (Type::getInt32Ty (Context ), Range.Alignment )
455
452
};
456
453
Value *C = CallInst::Create (MemSetF, Ops, Ops+4 , " " , InsertPt);
457
454
DEBUG (cerr << " Replace stores:\n " ;
@@ -463,7 +460,8 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
463
460
BBI = BI;
464
461
465
462
// Zap all the stores.
466
- for (SmallVector<StoreInst*, 16 >::const_iterator SI = Range.TheStores .begin (),
463
+ for (SmallVector<StoreInst*, 16 >::const_iterator
464
+ SI = Range.TheStores .begin (),
467
465
SE = Range.TheStores .end (); SI != SE; ++SI)
468
466
(*SI)->eraseFromParent ();
469
467
++NumMemSetInfer;
0 commit comments