Skip to content

Commit e71fb46

Browse files
committed
[TargetLowering] SimplifyDemandedVectorElts - add DemandedElts mask to ISD::BITCAST SimplifyDemandedBits call.
This fixes most of the regressions introduced in the rG4bc6f6332028 bugfix. The vector-trunc.ll issue should be fixed by D66004.
1 parent 6b37c47 commit e71fb46

14 files changed

+212
-289
lines changed

llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2310,7 +2310,8 @@ bool TargetLowering::SimplifyDemandedVectorElts(
23102310
}
23112311

23122312
KnownBits Known;
2313-
if (SimplifyDemandedBits(Src, SrcDemandedBits, Known, TLO, Depth + 1))
2313+
if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
2314+
TLO, Depth + 1))
23142315
return true;
23152316
}
23162317

llvm/test/CodeGen/PowerPC/pre-inc-disable.ll

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -367,10 +367,10 @@ define void @test16(i16* nocapture readonly %sums, i32 signext %delta, i32 signe
367367
; CHECK-NEXT: vsplth v2, v2, 3
368368
; CHECK-NEXT: addis r3, r2, .LCPI3_0@toc@ha
369369
; CHECK-NEXT: addi r3, r3, .LCPI3_0@toc@l
370-
; CHECK-NEXT: vmrglh v4, v3, v4
371370
; CHECK-NEXT: vmrglh v2, v3, v2
372-
; CHECK-NEXT: vsplth v3, v3, 7
373-
; CHECK-NEXT: vmrglw v3, v4, v3
371+
; CHECK-NEXT: vmrglh v3, v3, v4
372+
; CHECK-NEXT: xxlxor v4, v4, v4
373+
; CHECK-NEXT: vmrglw v3, v3, v4
374374
; CHECK-NEXT: lxvx v4, 0, r3
375375
; CHECK-NEXT: li r3, 0
376376
; CHECK-NEXT: vperm v2, v2, v3, v4

llvm/test/CodeGen/PowerPC/srem-vector-lkk.ll

Lines changed: 44 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1071,6 +1071,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) {
10711071
; P9LE-NEXT: extsw r4, r4
10721072
; P9LE-NEXT: mulld r5, r4, r5
10731073
; P9LE-NEXT: rldicl r5, r5, 32, 32
1074+
; P9LE-NEXT: xxlxor v4, v4, v4
10741075
; P9LE-NEXT: add r4, r5, r4
10751076
; P9LE-NEXT: srwi r5, r4, 31
10761077
; P9LE-NEXT: srawi r4, r4, 9
@@ -1079,9 +1080,6 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) {
10791080
; P9LE-NEXT: mulli r4, r4, 654
10801081
; P9LE-NEXT: subf r3, r4, r3
10811082
; P9LE-NEXT: mtvsrd f0, r3
1082-
; P9LE-NEXT: li r3, 0
1083-
; P9LE-NEXT: xxswapd v3, vs0
1084-
; P9LE-NEXT: mtvsrd f0, r3
10851083
; P9LE-NEXT: li r3, 4
10861084
; P9LE-NEXT: vextuhrx r3, r3, v2
10871085
; P9LE-NEXT: extsh r4, r3
@@ -1096,7 +1094,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) {
10961094
; P9LE-NEXT: lis r5, 24749
10971095
; P9LE-NEXT: mulli r4, r4, 23
10981096
; P9LE-NEXT: subf r3, r4, r3
1099-
; P9LE-NEXT: xxswapd v4, vs0
1097+
; P9LE-NEXT: xxswapd v3, vs0
11001098
; P9LE-NEXT: mtvsrd f0, r3
11011099
; P9LE-NEXT: li r3, 6
11021100
; P9LE-NEXT: vextuhrx r3, r3, v2
@@ -1181,6 +1179,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) {
11811179
; P8LE-NEXT: lis r3, 24749
11821180
; P8LE-NEXT: lis r8, -19946
11831181
; P8LE-NEXT: lis r10, -14230
1182+
; P8LE-NEXT: xxlxor v5, v5, v5
11841183
; P8LE-NEXT: ori r3, r3, 47143
11851184
; P8LE-NEXT: ori r8, r8, 17097
11861185
; P8LE-NEXT: mfvsrd r4, f0
@@ -1213,21 +1212,18 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) {
12131212
; P8LE-NEXT: mulli r3, r3, 5423
12141213
; P8LE-NEXT: add r7, r7, r9
12151214
; P8LE-NEXT: mulli r8, r8, 23
1216-
; P8LE-NEXT: li r9, 0
12171215
; P8LE-NEXT: mulli r7, r7, 654
1218-
; P8LE-NEXT: mtvsrd f0, r9
12191216
; P8LE-NEXT: subf r3, r3, r5
1220-
; P8LE-NEXT: xxswapd v4, vs0
1221-
; P8LE-NEXT: subf r5, r8, r6
1217+
; P8LE-NEXT: mtvsrd f0, r3
1218+
; P8LE-NEXT: subf r3, r8, r6
1219+
; P8LE-NEXT: subf r4, r7, r4
12221220
; P8LE-NEXT: mtvsrd f1, r3
1223-
; P8LE-NEXT: subf r3, r7, r4
1224-
; P8LE-NEXT: mtvsrd f2, r5
1225-
; P8LE-NEXT: mtvsrd f3, r3
1226-
; P8LE-NEXT: xxswapd v2, vs1
1227-
; P8LE-NEXT: xxswapd v3, vs2
1228-
; P8LE-NEXT: xxswapd v5, vs3
1221+
; P8LE-NEXT: mtvsrd f2, r4
1222+
; P8LE-NEXT: xxswapd v2, vs0
1223+
; P8LE-NEXT: xxswapd v3, vs1
1224+
; P8LE-NEXT: xxswapd v4, vs2
12291225
; P8LE-NEXT: vmrglh v2, v2, v3
1230-
; P8LE-NEXT: vmrglh v3, v5, v4
1226+
; P8LE-NEXT: vmrglh v3, v4, v5
12311227
; P8LE-NEXT: vmrglw v2, v2, v3
12321228
; P8LE-NEXT: blr
12331229
;
@@ -1332,11 +1328,9 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) {
13321328
; P9LE-NEXT: subf r3, r4, r3
13331329
; P9LE-NEXT: xxswapd v4, vs0
13341330
; P9LE-NEXT: mtvsrd f0, r3
1335-
; P9LE-NEXT: li r3, 0
13361331
; P9LE-NEXT: xxswapd v2, vs0
1337-
; P9LE-NEXT: mtvsrd f0, r3
13381332
; P9LE-NEXT: vmrglh v3, v4, v3
1339-
; P9LE-NEXT: xxswapd v4, vs0
1333+
; P9LE-NEXT: xxlxor v4, v4, v4
13401334
; P9LE-NEXT: vmrglh v2, v2, v4
13411335
; P9LE-NEXT: vmrglw v2, v3, v2
13421336
; P9LE-NEXT: blr
@@ -1394,49 +1388,47 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) {
13941388
; P8LE-LABEL: dont_fold_urem_i16_smax:
13951389
; P8LE: # %bb.0:
13961390
; P8LE-NEXT: xxswapd vs0, v2
1397-
; P8LE-NEXT: lis r3, 24749
1398-
; P8LE-NEXT: lis r8, -19946
1399-
; P8LE-NEXT: ori r3, r3, 47143
1400-
; P8LE-NEXT: ori r8, r8, 17097
1401-
; P8LE-NEXT: mfvsrd r4, f0
1402-
; P8LE-NEXT: rldicl r5, r4, 16, 48
1403-
; P8LE-NEXT: rldicl r6, r4, 32, 48
1404-
; P8LE-NEXT: extsh r7, r5
1405-
; P8LE-NEXT: extsh r9, r6
1406-
; P8LE-NEXT: extsw r7, r7
1391+
; P8LE-NEXT: lis r6, 24749
1392+
; P8LE-NEXT: lis r7, -19946
1393+
; P8LE-NEXT: xxlxor v5, v5, v5
1394+
; P8LE-NEXT: ori r6, r6, 47143
1395+
; P8LE-NEXT: ori r7, r7, 17097
1396+
; P8LE-NEXT: mfvsrd r3, f0
1397+
; P8LE-NEXT: rldicl r4, r3, 16, 48
1398+
; P8LE-NEXT: rldicl r5, r3, 32, 48
1399+
; P8LE-NEXT: extsh r8, r4
1400+
; P8LE-NEXT: extsh r9, r5
1401+
; P8LE-NEXT: extsw r8, r8
14071402
; P8LE-NEXT: extsw r9, r9
1408-
; P8LE-NEXT: mulld r3, r7, r3
1409-
; P8LE-NEXT: mulld r7, r9, r8
1410-
; P8LE-NEXT: rldicl r4, r4, 48, 48
1411-
; P8LE-NEXT: rldicl r8, r3, 1, 63
1412-
; P8LE-NEXT: rldicl r3, r3, 32, 32
1403+
; P8LE-NEXT: mulld r6, r8, r6
1404+
; P8LE-NEXT: mulld r7, r9, r7
1405+
; P8LE-NEXT: rldicl r3, r3, 48, 48
1406+
; P8LE-NEXT: rldicl r8, r6, 32, 32
14131407
; P8LE-NEXT: rldicl r7, r7, 32, 32
1414-
; P8LE-NEXT: srawi r3, r3, 11
1408+
; P8LE-NEXT: rldicl r6, r6, 1, 63
1409+
; P8LE-NEXT: srawi r8, r8, 11
14151410
; P8LE-NEXT: add r7, r7, r9
1416-
; P8LE-NEXT: add r3, r3, r8
1417-
; P8LE-NEXT: li r9, 0
1411+
; P8LE-NEXT: add r6, r8, r6
14181412
; P8LE-NEXT: srwi r8, r7, 31
14191413
; P8LE-NEXT: srawi r7, r7, 4
1420-
; P8LE-NEXT: mtvsrd f0, r9
1421-
; P8LE-NEXT: mulli r3, r3, 5423
1414+
; P8LE-NEXT: mulli r6, r6, 5423
14221415
; P8LE-NEXT: add r7, r7, r8
1423-
; P8LE-NEXT: extsh r8, r4
1416+
; P8LE-NEXT: extsh r8, r3
14241417
; P8LE-NEXT: mulli r7, r7, 23
14251418
; P8LE-NEXT: srawi r8, r8, 15
1426-
; P8LE-NEXT: xxswapd v4, vs0
1427-
; P8LE-NEXT: subf r3, r3, r5
1428-
; P8LE-NEXT: addze r5, r8
1429-
; P8LE-NEXT: slwi r5, r5, 15
1430-
; P8LE-NEXT: subf r6, r7, r6
1431-
; P8LE-NEXT: mtvsrd f1, r3
1432-
; P8LE-NEXT: subf r3, r5, r4
1433-
; P8LE-NEXT: mtvsrd f2, r6
1434-
; P8LE-NEXT: mtvsrd f3, r3
1435-
; P8LE-NEXT: xxswapd v2, vs1
1436-
; P8LE-NEXT: xxswapd v3, vs2
1437-
; P8LE-NEXT: xxswapd v5, vs3
1419+
; P8LE-NEXT: subf r4, r6, r4
1420+
; P8LE-NEXT: addze r6, r8
1421+
; P8LE-NEXT: mtvsrd f0, r4
1422+
; P8LE-NEXT: slwi r4, r6, 15
1423+
; P8LE-NEXT: subf r5, r7, r5
1424+
; P8LE-NEXT: subf r3, r4, r3
1425+
; P8LE-NEXT: mtvsrd f1, r5
1426+
; P8LE-NEXT: xxswapd v2, vs0
1427+
; P8LE-NEXT: mtvsrd f2, r3
1428+
; P8LE-NEXT: xxswapd v3, vs1
1429+
; P8LE-NEXT: xxswapd v4, vs2
14381430
; P8LE-NEXT: vmrglh v2, v2, v3
1439-
; P8LE-NEXT: vmrglh v3, v5, v4
1431+
; P8LE-NEXT: vmrglh v3, v4, v5
14401432
; P8LE-NEXT: vmrglw v2, v2, v3
14411433
; P8LE-NEXT: blr
14421434
;

llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll

Lines changed: 28 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1006,11 +1006,9 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) {
10061006
; P9LE-NEXT: subf r3, r4, r3
10071007
; P9LE-NEXT: xxswapd v4, vs0
10081008
; P9LE-NEXT: mtvsrd f0, r3
1009-
; P9LE-NEXT: li r3, 0
10101009
; P9LE-NEXT: xxswapd v2, vs0
1011-
; P9LE-NEXT: mtvsrd f0, r3
10121010
; P9LE-NEXT: vmrglh v3, v4, v3
1013-
; P9LE-NEXT: xxswapd v4, vs0
1011+
; P9LE-NEXT: xxlxor v4, v4, v4
10141012
; P9LE-NEXT: vmrglh v2, v2, v4
10151013
; P9LE-NEXT: vmrglw v2, v3, v2
10161014
; P9LE-NEXT: blr
@@ -1066,43 +1064,41 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) {
10661064
; P8LE: # %bb.0:
10671065
; P8LE-NEXT: xxswapd vs0, v2
10681066
; P8LE-NEXT: li r3, 0
1069-
; P8LE-NEXT: lis r9, 24749
1070-
; P8LE-NEXT: oris r5, r3, 51306
1071-
; P8LE-NEXT: oris r3, r3, 45590
1072-
; P8LE-NEXT: ori r9, r9, 47143
1073-
; P8LE-NEXT: ori r5, r5, 30865
1074-
; P8LE-NEXT: ori r3, r3, 17097
1067+
; P8LE-NEXT: lis r8, 24749
1068+
; P8LE-NEXT: xxlxor v5, v5, v5
1069+
; P8LE-NEXT: oris r5, r3, 45590
1070+
; P8LE-NEXT: ori r8, r8, 47143
1071+
; P8LE-NEXT: oris r3, r3, 51306
1072+
; P8LE-NEXT: ori r5, r5, 17097
1073+
; P8LE-NEXT: ori r3, r3, 30865
10751074
; P8LE-NEXT: mfvsrd r4, f0
1076-
; P8LE-NEXT: rldicl r6, r4, 48, 48
1077-
; P8LE-NEXT: rldicl r7, r4, 32, 48
1078-
; P8LE-NEXT: rlwinm r8, r6, 31, 17, 31
1079-
; P8LE-NEXT: rldicl r4, r4, 16, 48
1080-
; P8LE-NEXT: mulld r5, r8, r5
1081-
; P8LE-NEXT: rlwinm r8, r7, 0, 16, 31
1082-
; P8LE-NEXT: mulld r3, r8, r3
1083-
; P8LE-NEXT: rlwinm r8, r4, 0, 16, 31
1084-
; P8LE-NEXT: mulld r8, r8, r9
1085-
; P8LE-NEXT: li r9, 0
1086-
; P8LE-NEXT: mtvsrd f0, r9
1087-
; P8LE-NEXT: rldicl r5, r5, 24, 40
1088-
; P8LE-NEXT: rldicl r3, r3, 28, 36
1089-
; P8LE-NEXT: mulli r5, r5, 654
1090-
; P8LE-NEXT: xxswapd v2, vs0
1075+
; P8LE-NEXT: rldicl r6, r4, 32, 48
1076+
; P8LE-NEXT: rldicl r7, r4, 16, 48
1077+
; P8LE-NEXT: rlwinm r9, r6, 0, 16, 31
1078+
; P8LE-NEXT: rldicl r4, r4, 48, 48
1079+
; P8LE-NEXT: mulld r5, r9, r5
1080+
; P8LE-NEXT: rlwinm r9, r7, 0, 16, 31
1081+
; P8LE-NEXT: mulld r8, r9, r8
1082+
; P8LE-NEXT: rlwinm r9, r4, 31, 17, 31
1083+
; P8LE-NEXT: mulld r3, r9, r3
1084+
; P8LE-NEXT: rldicl r5, r5, 28, 36
10911085
; P8LE-NEXT: rldicl r8, r8, 21, 43
1092-
; P8LE-NEXT: mulli r3, r3, 23
1086+
; P8LE-NEXT: mulli r5, r5, 23
1087+
; P8LE-NEXT: rldicl r3, r3, 24, 40
10931088
; P8LE-NEXT: mulli r8, r8, 5423
1089+
; P8LE-NEXT: mulli r3, r3, 654
10941090
; P8LE-NEXT: subf r5, r5, r6
1095-
; P8LE-NEXT: subf r3, r3, r7
1096-
; P8LE-NEXT: mtvsrd f1, r5
1097-
; P8LE-NEXT: subf r4, r8, r4
1091+
; P8LE-NEXT: subf r6, r8, r7
1092+
; P8LE-NEXT: mtvsrd f0, r5
1093+
; P8LE-NEXT: subf r3, r3, r4
1094+
; P8LE-NEXT: mtvsrd f1, r6
10981095
; P8LE-NEXT: mtvsrd f2, r3
1099-
; P8LE-NEXT: mtvsrd f3, r4
1096+
; P8LE-NEXT: xxswapd v2, vs0
11001097
; P8LE-NEXT: xxswapd v3, vs1
11011098
; P8LE-NEXT: xxswapd v4, vs2
1102-
; P8LE-NEXT: xxswapd v5, vs3
11031099
; P8LE-NEXT: vmrglh v2, v3, v2
1104-
; P8LE-NEXT: vmrglh v3, v5, v4
1105-
; P8LE-NEXT: vmrglw v2, v3, v2
1100+
; P8LE-NEXT: vmrglh v3, v4, v5
1101+
; P8LE-NEXT: vmrglw v2, v2, v3
11061102
; P8LE-NEXT: blr
11071103
;
11081104
; P8BE-LABEL: dont_fold_urem_one:

llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1400,9 +1400,7 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
14001400
; AVX-64-LABEL: f4xi64_i128:
14011401
; AVX-64: # %bb.0:
14021402
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
1403-
; AVX-64-NEXT: movl $1, %eax
1404-
; AVX-64-NEXT: vmovq %rax, %xmm2
1405-
; AVX-64-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
1403+
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
14061404
; AVX-64-NEXT: vpaddq %xmm2, %xmm1, %xmm1
14071405
; AVX-64-NEXT: vpaddq %xmm2, %xmm0, %xmm0
14081406
; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1460,17 +1458,15 @@ define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
14601458
; AVX-64-LABEL: f8xi64_i128:
14611459
; AVX-64: # %bb.0:
14621460
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
1463-
; AVX-64-NEXT: movl $1, %eax
1464-
; AVX-64-NEXT: vmovq %rax, %xmm3
1465-
; AVX-64-NEXT: vpslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6,7]
1461+
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
14661462
; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
14671463
; AVX-64-NEXT: vpaddq %xmm3, %xmm1, %xmm1
14681464
; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
14691465
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
14701466
; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
14711467
; AVX-64-NEXT: vpaddq %xmm3, %xmm0, %xmm0
14721468
; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1473-
; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [0,1,0,1]
1469+
; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
14741470
; AVX-64-NEXT: # ymm2 = mem[0,1,0,1]
14751471
; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0
14761472
; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1
@@ -1539,9 +1535,7 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
15391535
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
15401536
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3]
15411537
; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
1542-
; AVX-64-NEXT: movl $1, %eax
1543-
; AVX-64-NEXT: vmovq %rax, %xmm4
1544-
; AVX-64-NEXT: vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
1538+
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm4 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
15451539
; AVX-64-NEXT: vpaddq %xmm4, %xmm1, %xmm1
15461540
; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
15471541
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2

llvm/test/CodeGen/X86/load-partial.ll

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -307,25 +307,17 @@ define i32 @load_partial_illegal_type() {
307307
; SSE2: # %bb.0:
308308
; SSE2-NEXT: movzwl {{.*}}(%rip), %eax
309309
; SSE2-NEXT: movd %eax, %xmm0
310-
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255]
311-
; SSE2-NEXT: pand %xmm1, %xmm0
312-
; SSE2-NEXT: movl $2, %eax
313-
; SSE2-NEXT: movd %eax, %xmm2
314-
; SSE2-NEXT: pslld $16, %xmm2
315-
; SSE2-NEXT: pandn %xmm2, %xmm1
316-
; SSE2-NEXT: por %xmm0, %xmm1
317-
; SSE2-NEXT: movd %xmm1, %eax
310+
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
311+
; SSE2-NEXT: por {{.*}}(%rip), %xmm0
312+
; SSE2-NEXT: movd %xmm0, %eax
318313
; SSE2-NEXT: retq
319314
;
320315
; SSSE3-LABEL: load_partial_illegal_type:
321316
; SSSE3: # %bb.0:
322317
; SSSE3-NEXT: movzwl {{.*}}(%rip), %eax
323318
; SSSE3-NEXT: movd %eax, %xmm0
324-
; SSSE3-NEXT: movl $2, %eax
325-
; SSSE3-NEXT: movd %eax, %xmm1
326-
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
327319
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[3,4,5,6,7,8,9,10,11,12,13,14,15]
328-
; SSSE3-NEXT: por %xmm1, %xmm0
320+
; SSSE3-NEXT: por {{.*}}(%rip), %xmm0
329321
; SSSE3-NEXT: movd %xmm0, %eax
330322
; SSSE3-NEXT: retq
331323
;

0 commit comments

Comments
 (0)