-
Notifications
You must be signed in to change notification settings - Fork 14.7k
[AMDGPU][GlobalISel] Combine for breaking s64 and/or into two s32 insts #151731
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
When either one of the operands is all ones in high or low parts, splitting these opens up other opportunities for combines. One of two new instructions will either be removed or become a simple copy.
@llvm/pr-subscribers-backend-amdgpu Author: Mirko Brkušanin (mbrkusanin) ChangesWhen either one of the operands is all ones in high or low parts, Patch is 31.74 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/151731.diff 12 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
index 9587fad1ecd63..b5dac95b57a2d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
@@ -151,6 +151,25 @@ def zext_of_shift_amount_combines : GICombineGroup<[
canonicalize_zext_lshr, canonicalize_zext_ashr, canonicalize_zext_shl
]>;
+// (and/or i64:x, i64:y) -> i64:(merge (and/or lo_32(x), lo_32(y)), (and/or hi_32(x), hi_32(y)))
+// when either x or y is all ones in low or high parts
+class combine_binop_s64_with_s32_mask<Instruction opcode> : GICombineRule<
+ (defs root:$dst),
+ (match (opcode $dst, i64:$x, i64:$y):$dst,
+ [{ return Helper.matchConstantIs32BitMask(${x}.getReg()) ||
+ Helper.matchConstantIs32BitMask(${y}.getReg()); }]),
+ (apply (G_UNMERGE_VALUES i32:$x_lo, i32:$x_hi, $x),
+ (G_UNMERGE_VALUES i32:$y_lo, i32:$y_hi, $y),
+ (opcode i32:$lo, $x_lo, $y_lo),
+ (opcode i32:$hi, $x_hi, $y_hi),
+ (G_MERGE_VALUES $dst, $lo, $hi))>;
+
+def combine_or_s64_with_s32_mask : combine_binop_s64_with_s32_mask<G_OR>;
+def combine_and_s64_with_s32_mask : combine_binop_s64_with_s32_mask<G_AND>;
+def binop_s64_with_s32_mask_combines : GICombineGroup<[
+ combine_or_s64_with_s32_mask, combine_and_s64_with_s32_mask
+]>;
+
let Predicates = [Has16BitInsts, NotHasMed3_16] in {
// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
// saves one instruction compared to the promotion.
@@ -180,7 +199,8 @@ def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
def AMDGPUPreLegalizerCombiner: GICombiner<
"AMDGPUPreLegalizerCombinerImpl",
[all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16,
- foldable_fneg, combine_shuffle_vector_to_build_vector]> {
+ foldable_fneg, combine_shuffle_vector_to_build_vector,
+ binop_s64_with_s32_mask_combines]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
@@ -188,7 +208,8 @@ def AMDGPUPostLegalizerCombiner: GICombiner<
"AMDGPUPostLegalizerCombinerImpl",
[all_combines, gfx6gfx7_combines, gfx8_combines, combine_fmul_with_select_to_fldexp,
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
- rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> {
+ rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64,
+ binop_s64_with_s32_mask_combines]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
index bcc19932fd938..15dd0ad63a167 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
@@ -9,6 +9,7 @@
#include "AMDGPUCombinerHelper.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -516,3 +517,9 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
return true;
}
+
+bool AMDGPUCombinerHelper::matchConstantIs32BitMask(Register Reg) const {
+ const KnownBits &Known = VT->getKnownBits(Reg);
+ return Known.One.extractBits(32, 0).isAllOnes() ||
+ Known.One.extractBits(32, 32).isAllOnes();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
index 96e0b12a168a6..243474a57cfab 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
@@ -43,6 +43,8 @@ class AMDGPUCombinerHelper : public CombinerHelper {
bool matchCombineFmulWithSelectToFldexp(
MachineInstr &MI, MachineInstr &Sel,
std::function<void(MachineIRBuilder &)> &MatchInfo) const;
+
+ bool matchConstantIs32BitMask(Register Reg) const;
};
} // namespace llvm
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-binop-s64-with-s32-mask.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-binop-s64-with-s32-mask.mir
new file mode 100644
index 0000000000000..022ab513f7429
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-binop-s64-with-s32-mask.mir
@@ -0,0 +1,259 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=tahiti -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck %s
+
+---
+name: test_and_mask_hi_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_hi_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_AND %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_and_mask_hi_lhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_hi_lhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_AND %1, %0
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+
+---
+name: test_and_mask_lo_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_lo_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[ZEXT]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 4294967295
+ %2:_(s64) = G_AND %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_and_mask_lo_lhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_lo_lhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[ZEXT]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 4294967295
+ %2:_(s64) = G_AND %1, %0
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_and_mask_hi_with_merge_unmerge
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_hi_with_merge_unmerge
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $sgpr0 = COPY [[C]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+ %3:_(s64) = G_CONSTANT i64 -4294967296
+ %4:_(s64) = G_AND %2, %3
+ %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4(s64)
+ $sgpr0 = COPY %5(s32)
+ $sgpr1 = COPY %6(s32)
+ SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+...
+---
+name: negative_and_test_incorrect_types
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+
+ ; CHECK-LABEL: name: negative_and_test_incorrect_types
+ ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -4294967296
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s128) = G_AND [[COPY]], [[C]]
+ ; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[AND]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s64) = COPY $vgpr4_vgpr5
+ %2:_(s128) = G_CONSTANT i128 -4294967296
+ %3:_(s128) = G_AND %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+---
+name: test_or_mask_hi_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_hi_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC]](s32), [[C]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_OR %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_or_mask_hi_lhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_hi_lhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC]](s32), [[C]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_OR %1, %0
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+
+---
+name: test_or_mask_lo_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_lo_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 4294967295
+ %2:_(s64) = G_OR %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_or_mask_lo_lhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_lo_lhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 4294967295
+ %2:_(s64) = G_OR %1, %0
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_or_mask_hi_with_merge_unmerge
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_hi_with_merge_unmerge
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: $sgpr0 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[C]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+ %3:_(s64) = G_CONSTANT i64 -4294967296
+ %4:_(s64) = G_OR %2, %3
+ %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4(s64)
+ $sgpr0 = COPY %5(s32)
+ $sgpr1 = COPY %6(s32)
+ SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+...
+---
+name: negative_or_test_incorrect_types
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+
+ ; CHECK-LABEL: name: negative_or_test_incorrect_types
+ ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -4294967296
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s128) = G_OR [[COPY]], [[C]]
+ ; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[OR]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s64) = COPY $vgpr4_vgpr5
+ %2:_(s128) = G_CONSTANT i128 -4294967296
+ %3:_(s128) = G_OR %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index ff26ea21390e2..5dff8c16f7c89 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -227,54 +227,52 @@ exit:
define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3 x i32> inreg %.WorkgroupId, <3 x i32> %.LocalInvocationId) #0 {
; GFX10-LABEL: single_lane_execution_attribute:
; GFX10: ; %bb.0: ; %.entry
-; GFX10-NEXT: s_mov_b32 s6, 0
-; GFX10-NEXT: s_getpc_b64 s[4:5]
-; GFX10-NEXT: s_mov_b32 s7, -1
-; GFX10-NEXT: s_mov_b32 s2, s1
-; GFX10-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GFX10-NEXT: s_mov_b32 s1, 0
+; GFX10-NEXT: s_getpc_b64 s[12:13]
+; GFX10-NEXT: s_mov_b32 s12, 0
+; GFX10-NEXT: s_mov_b32 s2, s0
+; GFX10-NEXT: s_mov_b32 s3, s12
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, -1, 0
-; GFX10-NEXT: s_or_b64 s[12:13], s[4:5], s[0:1]
-; GFX10-NEXT: s_load_dwordx8 s[4:11], s[12:13], 0x0
+; GFX10-NEXT: s_or_b64 s[2:3], s[12:13], s[2:3]
+; GFX10-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x0
; GFX10-NEXT: v_mbcnt_hi_u32_b32 v1, -1, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
-; GFX10-NEXT: s_xor_b32 s3, vcc_lo, exec_lo
+; GFX10-NEXT: s_xor_b32 s2, vcc_lo, exec_lo
+; GFX10-NEXT: s_and_b32 vcc_lo, s2, exec_lo
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v2, v2, s[4:7], 0 offen
-; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s3
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
; GFX10-NEXT: s_cbranch_vccnz .LBB4_4
; GFX10-NEXT: ; %bb.1: ; %.preheader.preheader
-; GFX10-NEXT: s_mov_b32 s3, 0
+; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: .LBB4_2: ; %.preheader
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_mov_b32_e32 v3, s1
+; GFX10-NEXT: v_mov_b32_e32 v3, s12
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
-; GFX10-NEXT: s_add_i32 s1, s1, 4
+; GFX10-NEXT: s_add_i32 s12, s12, 4
; GFX10-NEXT: buffer_load_dword v3, v3, s[4:7], 0 offen
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_readfirstlane_b32 s12, v3
-; GFX10-NEXT: s_add_i32 s3, s12, s3
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: s_add_i32 s2, s3, s2
; GFX10-NEXT: s_cbranch_vccnz .LBB4_2
; GFX10-NEXT: ; %bb.3: ; %.preheader._crit_edge
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s3, v2
-; GFX10-NEXT: s_or_b32 s1, s0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s2, v2
+; GFX10-NEXT: s_or_b32 s2, s0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
; GFX10-NEXT: s_branch .LBB4_6
; GFX10-NEXT: .LBB4_4:
-; GFX10-NEXT: s_mov_b32 s1, exec_lo
+; GFX10-NEXT: s_mov_b32 s2, exec_lo
; GFX10-NEXT: ; implicit-def: $vgpr1
-; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s1
+; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
; GFX10-NEXT: s_cbranch_vccz .LBB4_6
; GFX10-NEXT: ; %bb.5: ; %.19
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
; GFX10-NEXT: v_or_b32_e32 v1, 2, v1
; GFX10-NEXT: .LBB4_6: ; %.22
-; GFX10-NEXT: v_add_lshl_u32 v0, v0, s2, 2
+; GFX10-NEXT: v_add_lshl_u32 v0, v0, s1, 2
; GFX10-NEXT: buffer_store_dword v1, v0, s[8:11], 0 offen
; GFX10-NEXT: s_endpgm
.entry:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizer-combiner-zextload-from-and.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizer-combiner-zextload-from-and.mir
index 73e06de1923df..8d72d11fe71d7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizer-combiner-zextload-from-and.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizer-combiner-zextload-from-and.mir
@@ -14,9 +14,9 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load (s64), addrspace 1)
- ; CHECK-NEXT: %k:_(s64) = G_CONSTANT i64 4294967295
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[LOAD]], %k
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[AND]](s64)
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LOAD]](s64)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = G_LOAD %0 :: (load (s64), align 8, addrspace 1)
%k:_(s64) = G_CONSTANT i64 4294967295
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 4031fe0be2823..f57fc005b994b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -194,10 +194,8 @@ declare i32 @llvm.amdgcn.readfirstlane(i32)
define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_sdiv_i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; CHECK-NEXT: s_mov_b32 s7, -1
-; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
+; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[0:1], 0
; CHECK-NEXT: s_mov_b32 s0, 1
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
@@ -218,7 +216,6 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v0
; CHECK-NEXT: s_subb_u32 s5, 0, s11
-; CHECK-NEXT: s_xor_b64 s[6:7], s[6:7], s[8:9]
; CHECK-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; CHECK-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; CHECK-NEXT: v_trunc_f32_e32 v2, v1
@@ -327,9 +324,10 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CH...
[truncated]
|
@llvm/pr-subscribers-llvm-globalisel Author: Mirko Brkušanin (mbrkusanin) ChangesWhen either one of the operands is all ones in high or low parts, Patch is 31.74 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/151731.diff 12 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
index 9587fad1ecd63..b5dac95b57a2d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
@@ -151,6 +151,25 @@ def zext_of_shift_amount_combines : GICombineGroup<[
canonicalize_zext_lshr, canonicalize_zext_ashr, canonicalize_zext_shl
]>;
+// (and/or i64:x, i64:y) -> i64:(merge (and/or lo_32(x), lo_32(y)), (and/or hi_32(x), hi_32(y)))
+// when either x or y is all ones in low or high parts
+class combine_binop_s64_with_s32_mask<Instruction opcode> : GICombineRule<
+ (defs root:$dst),
+ (match (opcode $dst, i64:$x, i64:$y):$dst,
+ [{ return Helper.matchConstantIs32BitMask(${x}.getReg()) ||
+ Helper.matchConstantIs32BitMask(${y}.getReg()); }]),
+ (apply (G_UNMERGE_VALUES i32:$x_lo, i32:$x_hi, $x),
+ (G_UNMERGE_VALUES i32:$y_lo, i32:$y_hi, $y),
+ (opcode i32:$lo, $x_lo, $y_lo),
+ (opcode i32:$hi, $x_hi, $y_hi),
+ (G_MERGE_VALUES $dst, $lo, $hi))>;
+
+def combine_or_s64_with_s32_mask : combine_binop_s64_with_s32_mask<G_OR>;
+def combine_and_s64_with_s32_mask : combine_binop_s64_with_s32_mask<G_AND>;
+def binop_s64_with_s32_mask_combines : GICombineGroup<[
+ combine_or_s64_with_s32_mask, combine_and_s64_with_s32_mask
+]>;
+
let Predicates = [Has16BitInsts, NotHasMed3_16] in {
// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
// saves one instruction compared to the promotion.
@@ -180,7 +199,8 @@ def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
def AMDGPUPreLegalizerCombiner: GICombiner<
"AMDGPUPreLegalizerCombinerImpl",
[all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16,
- foldable_fneg, combine_shuffle_vector_to_build_vector]> {
+ foldable_fneg, combine_shuffle_vector_to_build_vector,
+ binop_s64_with_s32_mask_combines]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
@@ -188,7 +208,8 @@ def AMDGPUPostLegalizerCombiner: GICombiner<
"AMDGPUPostLegalizerCombinerImpl",
[all_combines, gfx6gfx7_combines, gfx8_combines, combine_fmul_with_select_to_fldexp,
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
- rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> {
+ rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64,
+ binop_s64_with_s32_mask_combines]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
index bcc19932fd938..15dd0ad63a167 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
@@ -9,6 +9,7 @@
#include "AMDGPUCombinerHelper.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -516,3 +517,9 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
return true;
}
+
+bool AMDGPUCombinerHelper::matchConstantIs32BitMask(Register Reg) const {
+ const KnownBits &Known = VT->getKnownBits(Reg);
+ return Known.One.extractBits(32, 0).isAllOnes() ||
+ Known.One.extractBits(32, 32).isAllOnes();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
index 96e0b12a168a6..243474a57cfab 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
@@ -43,6 +43,8 @@ class AMDGPUCombinerHelper : public CombinerHelper {
bool matchCombineFmulWithSelectToFldexp(
MachineInstr &MI, MachineInstr &Sel,
std::function<void(MachineIRBuilder &)> &MatchInfo) const;
+
+ bool matchConstantIs32BitMask(Register Reg) const;
};
} // namespace llvm
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-binop-s64-with-s32-mask.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-binop-s64-with-s32-mask.mir
new file mode 100644
index 0000000000000..022ab513f7429
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-binop-s64-with-s32-mask.mir
@@ -0,0 +1,259 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=tahiti -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck %s
+
+---
+name: test_and_mask_hi_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_hi_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_AND %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_and_mask_hi_lhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_hi_lhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_AND %1, %0
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+
+---
+name: test_and_mask_lo_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_lo_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[ZEXT]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 4294967295
+ %2:_(s64) = G_AND %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_and_mask_lo_lhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_lo_lhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[ZEXT]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 4294967295
+ %2:_(s64) = G_AND %1, %0
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_and_mask_hi_with_merge_unmerge
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_and_mask_hi_with_merge_unmerge
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $sgpr0 = COPY [[C]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+ %3:_(s64) = G_CONSTANT i64 -4294967296
+ %4:_(s64) = G_AND %2, %3
+ %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4(s64)
+ $sgpr0 = COPY %5(s32)
+ $sgpr1 = COPY %6(s32)
+ SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+...
+---
+name: negative_and_test_incorrect_types
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+
+ ; CHECK-LABEL: name: negative_and_test_incorrect_types
+ ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -4294967296
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s128) = G_AND [[COPY]], [[C]]
+ ; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[AND]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s64) = COPY $vgpr4_vgpr5
+ %2:_(s128) = G_CONSTANT i128 -4294967296
+ %3:_(s128) = G_AND %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+---
+name: test_or_mask_hi_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_hi_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC]](s32), [[C]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_OR %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_or_mask_hi_lhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_hi_lhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC]](s32), [[C]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_OR %1, %0
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+
+---
+name: test_or_mask_lo_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_lo_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 4294967295
+ %2:_(s64) = G_OR %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_or_mask_lo_lhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_lo_lhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 4294967295
+ %2:_(s64) = G_OR %1, %0
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_or_mask_hi_with_merge_unmerge
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_or_mask_hi_with_merge_unmerge
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: $sgpr0 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[C]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+ %3:_(s64) = G_CONSTANT i64 -4294967296
+ %4:_(s64) = G_OR %2, %3
+ %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4(s64)
+ $sgpr0 = COPY %5(s32)
+ $sgpr1 = COPY %6(s32)
+ SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+...
+---
+name: negative_or_test_incorrect_types
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+
+ ; CHECK-LABEL: name: negative_or_test_incorrect_types
+ ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -4294967296
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s128) = G_OR [[COPY]], [[C]]
+ ; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[OR]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s64) = COPY $vgpr4_vgpr5
+ %2:_(s128) = G_CONSTANT i128 -4294967296
+ %3:_(s128) = G_OR %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index ff26ea21390e2..5dff8c16f7c89 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -227,54 +227,52 @@ exit:
define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3 x i32> inreg %.WorkgroupId, <3 x i32> %.LocalInvocationId) #0 {
; GFX10-LABEL: single_lane_execution_attribute:
; GFX10: ; %bb.0: ; %.entry
-; GFX10-NEXT: s_mov_b32 s6, 0
-; GFX10-NEXT: s_getpc_b64 s[4:5]
-; GFX10-NEXT: s_mov_b32 s7, -1
-; GFX10-NEXT: s_mov_b32 s2, s1
-; GFX10-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GFX10-NEXT: s_mov_b32 s1, 0
+; GFX10-NEXT: s_getpc_b64 s[12:13]
+; GFX10-NEXT: s_mov_b32 s12, 0
+; GFX10-NEXT: s_mov_b32 s2, s0
+; GFX10-NEXT: s_mov_b32 s3, s12
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, -1, 0
-; GFX10-NEXT: s_or_b64 s[12:13], s[4:5], s[0:1]
-; GFX10-NEXT: s_load_dwordx8 s[4:11], s[12:13], 0x0
+; GFX10-NEXT: s_or_b64 s[2:3], s[12:13], s[2:3]
+; GFX10-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x0
; GFX10-NEXT: v_mbcnt_hi_u32_b32 v1, -1, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
-; GFX10-NEXT: s_xor_b32 s3, vcc_lo, exec_lo
+; GFX10-NEXT: s_xor_b32 s2, vcc_lo, exec_lo
+; GFX10-NEXT: s_and_b32 vcc_lo, s2, exec_lo
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v2, v2, s[4:7], 0 offen
-; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s3
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
; GFX10-NEXT: s_cbranch_vccnz .LBB4_4
; GFX10-NEXT: ; %bb.1: ; %.preheader.preheader
-; GFX10-NEXT: s_mov_b32 s3, 0
+; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: .LBB4_2: ; %.preheader
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_mov_b32_e32 v3, s1
+; GFX10-NEXT: v_mov_b32_e32 v3, s12
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
-; GFX10-NEXT: s_add_i32 s1, s1, 4
+; GFX10-NEXT: s_add_i32 s12, s12, 4
; GFX10-NEXT: buffer_load_dword v3, v3, s[4:7], 0 offen
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_readfirstlane_b32 s12, v3
-; GFX10-NEXT: s_add_i32 s3, s12, s3
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: s_add_i32 s2, s3, s2
; GFX10-NEXT: s_cbranch_vccnz .LBB4_2
; GFX10-NEXT: ; %bb.3: ; %.preheader._crit_edge
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s3, v2
-; GFX10-NEXT: s_or_b32 s1, s0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s2, v2
+; GFX10-NEXT: s_or_b32 s2, s0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
; GFX10-NEXT: s_branch .LBB4_6
; GFX10-NEXT: .LBB4_4:
-; GFX10-NEXT: s_mov_b32 s1, exec_lo
+; GFX10-NEXT: s_mov_b32 s2, exec_lo
; GFX10-NEXT: ; implicit-def: $vgpr1
-; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s1
+; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
; GFX10-NEXT: s_cbranch_vccz .LBB4_6
; GFX10-NEXT: ; %bb.5: ; %.19
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
; GFX10-NEXT: v_or_b32_e32 v1, 2, v1
; GFX10-NEXT: .LBB4_6: ; %.22
-; GFX10-NEXT: v_add_lshl_u32 v0, v0, s2, 2
+; GFX10-NEXT: v_add_lshl_u32 v0, v0, s1, 2
; GFX10-NEXT: buffer_store_dword v1, v0, s[8:11], 0 offen
; GFX10-NEXT: s_endpgm
.entry:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizer-combiner-zextload-from-and.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizer-combiner-zextload-from-and.mir
index 73e06de1923df..8d72d11fe71d7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizer-combiner-zextload-from-and.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizer-combiner-zextload-from-and.mir
@@ -14,9 +14,9 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load (s64), addrspace 1)
- ; CHECK-NEXT: %k:_(s64) = G_CONSTANT i64 4294967295
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[LOAD]], %k
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[AND]](s64)
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LOAD]](s64)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = G_LOAD %0 :: (load (s64), align 8, addrspace 1)
%k:_(s64) = G_CONSTANT i64 4294967295
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 4031fe0be2823..f57fc005b994b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -194,10 +194,8 @@ declare i32 @llvm.amdgcn.readfirstlane(i32)
define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_sdiv_i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; CHECK-NEXT: s_mov_b32 s7, -1
-; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
+; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[0:1], 0
; CHECK-NEXT: s_mov_b32 s0, 1
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
@@ -218,7 +216,6 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v0
; CHECK-NEXT: s_subb_u32 s5, 0, s11
-; CHECK-NEXT: s_xor_b64 s[6:7], s[6:7], s[8:9]
; CHECK-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; CHECK-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; CHECK-NEXT: v_trunc_f32_e32 v2, v1
@@ -327,9 +324,10 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CH...
[truncated]
|
When either one of the operands is all ones in high or low parts,
splitting these opens up other opportunities for combines. One of two
new instructions will either be removed or become a simple copy.