Skip to content

Commit e73aa6f

Browse files
committed
[AMDGPU][GlobalISel] Add combines with or/and that only use half of 64bit values
1 parent cdf75df commit e73aa6f

15 files changed

+842
-668
lines changed

llvm/lib/Target/AMDGPU/AMDGPUCombine.td

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,21 @@ def zext_of_shift_amount_combines : GICombineGroup<[
151151
canonicalize_zext_lshr, canonicalize_zext_ashr, canonicalize_zext_shl
152152
]>;
153153

154+
// (or i64:x, (zext i32:y)) -> i64:(merge (or i32:y, lo_32(x)), hi_32(x))
155+
def combine_or_s64_s32 : GICombineRule<
156+
(defs root:$dst),
157+
(match (G_ZEXT $zext_val, i32:$src_s32):$zext_inst,
158+
(G_OR $dst, i64:$src_s64, $zext_val):$dst),
159+
(apply [{ Helper.applyCombineOrS64S32(*${dst}, ${src_s64}.getReg(), ${src_s32}.getReg()); }])>;
160+
161+
// (and i64:x, 0xFFFFFFFF00000000) -> i64:(merge i32:0, hi_32(x))
162+
def combine_and_s64_himask : GICombineRule<
163+
(defs root:$dst),
164+
(match (G_CONSTANT $const, 0xFFFFFFFF00000000),
165+
(G_AND $dst, i64:$src_s64, $const):$dst),
166+
(apply [{ Helper.applyCombineAndS64HiMask(*${dst}, ${src_s64}.getReg()); }])>;
167+
168+
154169
let Predicates = [Has16BitInsts, NotHasMed3_16] in {
155170
// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
156171
// saves one instruction compared to the promotion.
@@ -180,15 +195,17 @@ def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
180195
def AMDGPUPreLegalizerCombiner: GICombiner<
181196
"AMDGPUPreLegalizerCombinerImpl",
182197
[all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16,
183-
foldable_fneg, combine_shuffle_vector_to_build_vector]> {
198+
foldable_fneg, combine_shuffle_vector_to_build_vector,
199+
combine_or_s64_s32, combine_and_s64_himask]> {
184200
let CombineAllMethodName = "tryCombineAllImpl";
185201
}
186202

187203
def AMDGPUPostLegalizerCombiner: GICombiner<
188204
"AMDGPUPostLegalizerCombinerImpl",
189205
[all_combines, gfx6gfx7_combines, gfx8_combines, combine_fmul_with_select_to_fldexp,
190206
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
191-
rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> {
207+
rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64,
208+
combine_or_s64_s32, combine_and_s64_himask]> {
192209
let CombineAllMethodName = "tryCombineAllImpl";
193210
}
194211

llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -516,3 +516,29 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
516516

517517
return true;
518518
}
519+
520+
void AMDGPUCombinerHelper::applyCombineOrS64S32(MachineInstr &MI,
521+
Register SrcS64,
522+
Register SrcS32) const {
523+
Register DstReg = MI.getOperand(0).getReg();
524+
525+
auto UnmergeParts = Builder.buildUnmerge(LLT::scalar(32), SrcS64);
526+
Register SrcS64Lo = UnmergeParts.getReg(0);
527+
Register SrcS64Hi = UnmergeParts.getReg(1);
528+
529+
auto Or = Builder.buildOr(LLT::scalar(32), SrcS32, SrcS64Lo).getReg(0);
530+
Builder.buildMergeValues(DstReg, {Or, SrcS64Hi});
531+
MI.eraseFromParent();
532+
}
533+
534+
void AMDGPUCombinerHelper::applyCombineAndS64HiMask(MachineInstr &MI,
535+
Register SrcS64) const {
536+
Register DstReg = MI.getOperand(0).getReg();
537+
538+
auto UnmergeParts = Builder.buildUnmerge(LLT::scalar(32), SrcS64);
539+
Register SrcS64Hi = UnmergeParts.getReg(1);
540+
541+
auto Const = Builder.buildConstant(LLT::scalar(32), 0).getReg(0);
542+
Builder.buildMergeValues(DstReg, {Const, SrcS64Hi});
543+
MI.eraseFromParent();
544+
}

llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,11 @@ class AMDGPUCombinerHelper : public CombinerHelper {
4343
bool matchCombineFmulWithSelectToFldexp(
4444
MachineInstr &MI, MachineInstr &Sel,
4545
std::function<void(MachineIRBuilder &)> &MatchInfo) const;
46+
47+
void applyCombineOrS64S32(MachineInstr &MI, Register S64, Register S32) const;
48+
49+
void applyCombineAndS64HiMask(MachineInstr &MI, Register S64) const;
50+
4651
};
4752

4853
} // namespace llvm
Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
2+
# RUN: llc -mtriple=amdgcn -mcpu=tahiti -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck %s
3+
4+
---
5+
name: test_combine_or_s64_s32
6+
tracksRegLiveness: true
7+
body: |
8+
bb.0:
9+
liveins: $sgpr0, $sgpr1, $sgpr2
10+
; CHECK-LABEL: name: test_combine_or_s64_s32
11+
; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
12+
; CHECK-NEXT: {{ $}}
13+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
14+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
15+
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
16+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY2]], [[COPY]]
17+
; CHECK-NEXT: $sgpr0 = COPY [[OR]](s32)
18+
; CHECK-NEXT: $sgpr1 = COPY [[COPY1]](s32)
19+
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
20+
%0:_(s32) = COPY $sgpr0
21+
%1:_(s32) = COPY $sgpr1
22+
%2:_(s32) = COPY $sgpr2
23+
%3:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
24+
%4:_(s64) = G_ZEXT %2(s32)
25+
%5:_(s64) = disjoint G_OR %3, %4
26+
%6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %5(s64)
27+
$sgpr0 = COPY %6(s32)
28+
$sgpr1 = COPY %7(s32)
29+
SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
30+
...
31+
---
32+
name: test_combine_and_s64_himask
33+
tracksRegLiveness: true
34+
body: |
35+
bb.0:
36+
liveins: $sgpr0, $sgpr1, $sgpr2
37+
; CHECK-LABEL: name: test_combine_and_s64_himask
38+
; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
39+
; CHECK-NEXT: {{ $}}
40+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr1
41+
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
42+
; CHECK-NEXT: $sgpr0 = COPY [[C]](s32)
43+
; CHECK-NEXT: $sgpr1 = COPY [[COPY]](s32)
44+
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
45+
%0:_(s32) = COPY $sgpr0
46+
%1:_(s32) = COPY $sgpr1
47+
%2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
48+
%3:_(s64) = G_CONSTANT i64 -4294967296
49+
%4:_(s64) = G_AND %2, %3
50+
%5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4(s64)
51+
$sgpr0 = COPY %5(s32)
52+
$sgpr1 = COPY %6(s32)
53+
SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
54+
...
55+
---
56+
name: test_combined
57+
tracksRegLiveness: true
58+
body: |
59+
bb.0:
60+
liveins: $sgpr0, $sgpr1, $sgpr2
61+
; CHECK-LABEL: name: test_combined
62+
; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
63+
; CHECK-NEXT: {{ $}}
64+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr1
65+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
66+
; CHECK-NEXT: $sgpr0 = COPY [[COPY1]](s32)
67+
; CHECK-NEXT: $sgpr1 = COPY [[COPY]](s32)
68+
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
69+
%0:_(s32) = COPY $sgpr0
70+
%1:_(s32) = COPY $sgpr1
71+
%2:_(s32) = COPY $sgpr2
72+
%3:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
73+
%4:_(s64) = G_CONSTANT i64 -4294967296
74+
%5:_(s64) = G_AND %3, %4
75+
%6:_(s64) = G_ZEXT %2(s32)
76+
%7:_(s64) = disjoint G_OR %5, %6
77+
%8:_(s32), %9:_(s32) = G_UNMERGE_VALUES %7(s64)
78+
$sgpr0 = COPY %8(s32)
79+
$sgpr1 = COPY %9(s32)
80+
SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
81+
...
82+
83+
---
84+
name: test_combine_or_s64_s32_no_merge_unmerge
85+
tracksRegLiveness: true
86+
body: |
87+
bb.0:
88+
liveins: $sgpr0_sgpr1, $sgpr2
89+
; CHECK-LABEL: name: test_combine_or_s64_s32_no_merge_unmerge
90+
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
91+
; CHECK-NEXT: {{ $}}
92+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
93+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
94+
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
95+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY1]], [[UV]]
96+
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[UV1]](s32)
97+
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
98+
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
99+
%0:_(s64) = COPY $sgpr0_sgpr1
100+
%1:_(s32) = COPY $sgpr2
101+
%2:_(s64) = G_ZEXT %1(s32)
102+
%3:_(s64) = disjoint G_OR %0, %2
103+
$sgpr0_sgpr1 = COPY %3(s64)
104+
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
105+
...
106+
---
107+
name: test_combine_and_s64_himask_no_merge_unmerge
108+
tracksRegLiveness: true
109+
body: |
110+
bb.0:
111+
liveins: $sgpr0_sgpr1, $sgpr2
112+
; CHECK-LABEL: name: test_combine_and_s64_himask_no_merge_unmerge
113+
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
114+
; CHECK-NEXT: {{ $}}
115+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
116+
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
117+
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
118+
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
119+
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
120+
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
121+
%0:_(s64) = COPY $sgpr0_sgpr1
122+
%1:_(s64) = G_CONSTANT i64 -4294967296
123+
%2:_(s64) = G_AND %0, %1
124+
$sgpr0_sgpr1 = COPY %2(s64)
125+
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
126+
...
127+
---
128+
name: test_combined_vgpr_no_merge_unmerge
129+
tracksRegLiveness: true
130+
body: |
131+
bb.0:
132+
liveins: $vgpr0_vgpr1, $vgpr2
133+
134+
; CHECK-LABEL: name: test_combined_vgpr
135+
; CHECK: liveins: $vgpr0_vgpr1, $vgpr2
136+
; CHECK-NEXT: {{ $}}
137+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
138+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
139+
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
140+
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[UV1]](s32)
141+
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
142+
%1:_(s64) = COPY $vgpr0_vgpr1
143+
%0:_(s32) = COPY $vgpr2
144+
%2:_(s64) = G_CONSTANT i64 -4294967296
145+
%3:_(s64) = G_AND %1, %2
146+
%4:_(s64) = G_ZEXT %0
147+
%5:_(s64) = G_OR %3, %4
148+
$vgpr0_vgpr1 = COPY %5
149+
...
150+
---
151+
name: negative_test_incorrect_types
152+
tracksRegLiveness: true
153+
body: |
154+
bb.0:
155+
liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
156+
157+
; CHECK-LABEL: name: negative_test_incorrect_types
158+
; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
159+
; CHECK-NEXT: {{ $}}
160+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
161+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
162+
; CHECK-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -4294967296
163+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s128) = G_AND [[COPY]], [[C]]
164+
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s64)
165+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s128) = G_OR [[AND]], [[ZEXT]]
166+
; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[OR]](s128)
167+
%1:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
168+
%0:_(s64) = COPY $vgpr4_vgpr5
169+
%2:_(s128) = G_CONSTANT i128 -4294967296
170+
%3:_(s128) = G_AND %1, %2
171+
%4:_(s128) = G_ZEXT %0
172+
%5:_(s128) = G_OR %3, %4
173+
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %5
174+
...
175+

llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -227,33 +227,30 @@ exit:
227227
define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3 x i32> inreg %.WorkgroupId, <3 x i32> %.LocalInvocationId) #0 {
228228
; GFX10-LABEL: single_lane_execution_attribute:
229229
; GFX10: ; %bb.0: ; %.entry
230-
; GFX10-NEXT: s_mov_b32 s6, 0
231230
; GFX10-NEXT: s_getpc_b64 s[4:5]
232-
; GFX10-NEXT: s_mov_b32 s7, -1
233-
; GFX10-NEXT: s_mov_b32 s2, s1
234-
; GFX10-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
235-
; GFX10-NEXT: s_mov_b32 s1, 0
231+
; GFX10-NEXT: s_mov_b32 s2, s0
232+
; GFX10-NEXT: s_mov_b32 s3, s5
236233
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, -1, 0
237-
; GFX10-NEXT: s_or_b64 s[12:13], s[4:5], s[0:1]
238-
; GFX10-NEXT: s_load_dwordx8 s[4:11], s[12:13], 0x0
234+
; GFX10-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x0
239235
; GFX10-NEXT: v_mbcnt_hi_u32_b32 v1, -1, v1
240236
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v1
241237
; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
242238
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
243-
; GFX10-NEXT: s_xor_b32 s3, vcc_lo, exec_lo
239+
; GFX10-NEXT: s_xor_b32 s2, vcc_lo, exec_lo
244240
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
245241
; GFX10-NEXT: buffer_load_dword v2, v2, s[4:7], 0 offen
246-
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s3
242+
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
243+
; GFX10-NEXT: s_mov_b32 s2, 0
247244
; GFX10-NEXT: s_waitcnt vmcnt(0)
248245
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
249246
; GFX10-NEXT: s_cbranch_vccnz .LBB4_4
250247
; GFX10-NEXT: ; %bb.1: ; %.preheader.preheader
251248
; GFX10-NEXT: s_mov_b32 s3, 0
252249
; GFX10-NEXT: .LBB4_2: ; %.preheader
253250
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
254-
; GFX10-NEXT: v_mov_b32_e32 v3, s1
251+
; GFX10-NEXT: v_mov_b32_e32 v3, s2
255252
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
256-
; GFX10-NEXT: s_add_i32 s1, s1, 4
253+
; GFX10-NEXT: s_add_i32 s2, s2, 4
257254
; GFX10-NEXT: buffer_load_dword v3, v3, s[4:7], 0 offen
258255
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
259256
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -262,19 +259,19 @@ define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3
262259
; GFX10-NEXT: s_cbranch_vccnz .LBB4_2
263260
; GFX10-NEXT: ; %bb.3: ; %.preheader._crit_edge
264261
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s3, v2
265-
; GFX10-NEXT: s_or_b32 s1, s0, vcc_lo
266-
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
262+
; GFX10-NEXT: s_or_b32 s2, s0, vcc_lo
263+
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
267264
; GFX10-NEXT: s_branch .LBB4_6
268265
; GFX10-NEXT: .LBB4_4:
269-
; GFX10-NEXT: s_mov_b32 s1, exec_lo
266+
; GFX10-NEXT: s_mov_b32 s2, exec_lo
270267
; GFX10-NEXT: ; implicit-def: $vgpr1
271-
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s1
268+
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
272269
; GFX10-NEXT: s_cbranch_vccz .LBB4_6
273270
; GFX10-NEXT: ; %bb.5: ; %.19
274271
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
275272
; GFX10-NEXT: v_or_b32_e32 v1, 2, v1
276273
; GFX10-NEXT: .LBB4_6: ; %.22
277-
; GFX10-NEXT: v_add_lshl_u32 v0, v0, s2, 2
274+
; GFX10-NEXT: v_add_lshl_u32 v0, v0, s1, 2
278275
; GFX10-NEXT: buffer_store_dword v1, v0, s[8:11], 0 offen
279276
; GFX10-NEXT: s_endpgm
280277
.entry:

0 commit comments

Comments
 (0)