Skip to content

Commit f96810f

Browse files
committed
[AMDGPU] Expand vector trunc stores from i16 to i8
Differential Revision: https://reviews.llvm.org/D77693
1 parent f85ae05 commit f96810f

File tree

2 files changed

+65
-0
lines changed

2 files changed

+65
-0
lines changed

llvm/lib/Target/AMDGPU/SIISelLowering.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,11 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
202202
setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
203203
setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
204204
setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
205+
setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
206+
setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand);
207+
setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand);
208+
setTruncStoreAction(MVT::v16i16, MVT::v16i8, Expand);
209+
setTruncStoreAction(MVT::v32i16, MVT::v32i8, Expand);
205210

206211
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
207212
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2+
3+
; GCN-LABEL: {{^}}short_char:
4+
; GCN: global_store_byte v
5+
define protected amdgpu_kernel void @short_char(i8 addrspace(1)* %out) {
6+
entry:
7+
%tmp = load i16, i16 addrspace(1)* undef
8+
%tmp1 = trunc i16 %tmp to i8
9+
store i8 %tmp1, i8 addrspace(1)* %out
10+
ret void
11+
}
12+
13+
; GCN-LABEL: {{^}}short2_char4:
14+
; GCN: global_store_dword v
15+
define protected amdgpu_kernel void @short2_char4(<4 x i8> addrspace(1)* %out) {
16+
entry:
17+
%tmp = load <2 x i16>, <2 x i16> addrspace(1)* undef, align 4
18+
%vecinit = shufflevector <2 x i16> %tmp, <2 x i16> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
19+
%vecinit2 = shufflevector <4 x i16> %vecinit, <4 x i16> <i16 undef, i16 undef, i16 0, i16 0>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
20+
%tmp1 = trunc <4 x i16> %vecinit2 to <4 x i8>
21+
store <4 x i8> %tmp1, <4 x i8> addrspace(1)* %out, align 4
22+
ret void
23+
}
24+
25+
; GCN-LABEL: {{^}}short4_char8:
26+
; GCN: global_store_dwordx2 v
27+
define protected amdgpu_kernel void @short4_char8(<8 x i8> addrspace(1)* %out) {
28+
entry:
29+
%tmp = load <4 x i16>, <4 x i16> addrspace(1)* undef, align 8
30+
%vecinit = shufflevector <4 x i16> %tmp, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
31+
%vecinit2 = shufflevector <8 x i16> %vecinit, <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 0, i16 0, i16 0, i16 0>, <8 x i32> <i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7>
32+
%tmp1 = trunc <8 x i16> %vecinit2 to <8 x i8>
33+
store <8 x i8> %tmp1, <8 x i8> addrspace(1)* %out, align 8
34+
ret void
35+
}
36+
37+
; GCN-LABEL: {{^}}short8_char16:
38+
; GCN: global_store_dwordx4 v
39+
define protected amdgpu_kernel void @short8_char16(<16 x i8> addrspace(1)* %out) {
40+
entry:
41+
%tmp = load <8 x i16>, <8 x i16> addrspace(1)* undef, align 16
42+
%vecinit = shufflevector <8 x i16> %tmp, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
43+
%vecinit2 = shufflevector <16 x i16> %vecinit, <16 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <16 x i32> <i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7>
44+
%tmp1 = trunc <16 x i16> %vecinit2 to <16 x i8>
45+
store <16 x i8> %tmp1, <16 x i8> addrspace(1)* %out, align 16
46+
ret void
47+
}
48+
49+
; GCN-LABEL: {{^}}short16_char32:
50+
; GCN: global_store_dwordx4 v
51+
; GCN: global_store_dwordx4 v
52+
define protected amdgpu_kernel void @short16_char32(<32 x i8> addrspace(1)* %out) {
53+
entry:
54+
%tmp = load <16 x i16>, <16 x i16> addrspace(1)* undef, align 32
55+
%vecinit = shufflevector <16 x i16> %tmp, <16 x i16> undef, <32 x i32> <i32 0, i32 1, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
56+
%vecinit2 = shufflevector <32 x i16> %vecinit, <32 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 0, i16 1, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 undef, i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <32 x i32> <i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7, i32 0, i32 1, i32 6, i32 7>
57+
%tmp1 = trunc <32 x i16> %vecinit2 to <32 x i8>
58+
store <32 x i8> %tmp1, <32 x i8> addrspace(1)* %out, align 32
59+
ret void
60+
}

0 commit comments

Comments
 (0)