Skip to content

Commit de5c1c9

Browse files
committed
[RISCV] add scmp RVV CodeGen tests
1 parent 46f6e62 commit de5c1c9

File tree

2 files changed

+395
-0
lines changed

2 files changed

+395
-0
lines changed
Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,189 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3+
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
4+
5+
define <16 x i8> @scmp_i8i8(<16 x i8> %a, <16 x i8> %b) {
6+
; CHECK-LABEL: scmp_i8i8:
7+
; CHECK: # %bb.0: # %entry
8+
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
9+
; CHECK-NEXT: vmslt.vv v0, v9, v8
10+
; CHECK-NEXT: vmv.v.i v10, 0
11+
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
12+
; CHECK-NEXT: vmslt.vv v0, v8, v9
13+
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
14+
; CHECK-NEXT: ret
15+
entry:
16+
%c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> %b)
17+
ret <16 x i8> %c
18+
}
19+
20+
define <16 x i8> @scmp_z8i8(<16 x i8> %a) {
21+
; CHECK-LABEL: scmp_z8i8:
22+
; CHECK: # %bb.0: # %entry
23+
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
24+
; CHECK-NEXT: vmsle.vi v0, v8, -1
25+
; CHECK-NEXT: vmv.v.i v9, 0
26+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
27+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
28+
; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
29+
; CHECK-NEXT: ret
30+
entry:
31+
%c = call <16 x i8> @llvm.scmp(<16 x i8> zeroinitializer, <16 x i8> %a)
32+
ret <16 x i8> %c
33+
}
34+
35+
define <16 x i8> @scmp_i8z8(<16 x i8> %a) {
36+
; CHECK-LABEL: scmp_i8z8:
37+
; CHECK: # %bb.0: # %entry
38+
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
39+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
40+
; CHECK-NEXT: vmv.v.i v9, 0
41+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
42+
; CHECK-NEXT: vmsle.vi v0, v8, -1
43+
; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
44+
; CHECK-NEXT: ret
45+
entry:
46+
%c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> zeroinitializer)
47+
ret <16 x i8> %c
48+
}
49+
50+
51+
define <8 x i16> @scmp_i16i16(<8 x i16> %a, <8 x i16> %b) {
52+
; CHECK-LABEL: scmp_i16i16:
53+
; CHECK: # %bb.0: # %entry
54+
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
55+
; CHECK-NEXT: vmslt.vv v0, v9, v8
56+
; CHECK-NEXT: vmv.v.i v10, 0
57+
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
58+
; CHECK-NEXT: vmslt.vv v0, v8, v9
59+
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
60+
; CHECK-NEXT: ret
61+
entry:
62+
%c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> %b)
63+
ret <8 x i16> %c
64+
}
65+
66+
define <8 x i16> @scmp_z16i16(<8 x i16> %a) {
67+
; CHECK-LABEL: scmp_z16i16:
68+
; CHECK: # %bb.0: # %entry
69+
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
70+
; CHECK-NEXT: vmsle.vi v0, v8, -1
71+
; CHECK-NEXT: vmv.v.i v9, 0
72+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
73+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
74+
; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
75+
; CHECK-NEXT: ret
76+
entry:
77+
%c = call <8 x i16> @llvm.scmp(<8 x i16> zeroinitializer, <8 x i16> %a)
78+
ret <8 x i16> %c
79+
}
80+
81+
define <8 x i16> @scmp_i16z16(<8 x i16> %a) {
82+
; CHECK-LABEL: scmp_i16z16:
83+
; CHECK: # %bb.0: # %entry
84+
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
85+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
86+
; CHECK-NEXT: vmv.v.i v9, 0
87+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
88+
; CHECK-NEXT: vmsle.vi v0, v8, -1
89+
; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
90+
; CHECK-NEXT: ret
91+
entry:
92+
%c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> zeroinitializer)
93+
ret <8 x i16> %c
94+
}
95+
96+
97+
define <4 x i32> @scmp_i32i32(<4 x i32> %a, <4 x i32> %b) {
98+
; CHECK-LABEL: scmp_i32i32:
99+
; CHECK: # %bb.0: # %entry
100+
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
101+
; CHECK-NEXT: vmslt.vv v0, v9, v8
102+
; CHECK-NEXT: vmv.v.i v10, 0
103+
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
104+
; CHECK-NEXT: vmslt.vv v0, v8, v9
105+
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
106+
; CHECK-NEXT: ret
107+
entry:
108+
%c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> %b)
109+
ret <4 x i32> %c
110+
}
111+
112+
define <4 x i32> @scmp_z32i32(<4 x i32> %a) {
113+
; CHECK-LABEL: scmp_z32i32:
114+
; CHECK: # %bb.0: # %entry
115+
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
116+
; CHECK-NEXT: vmsle.vi v0, v8, -1
117+
; CHECK-NEXT: vmv.v.i v9, 0
118+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
119+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
120+
; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
121+
; CHECK-NEXT: ret
122+
entry:
123+
%c = call <4 x i32> @llvm.scmp(<4 x i32> zeroinitializer, <4 x i32> %a)
124+
ret <4 x i32> %c
125+
}
126+
127+
define <4 x i32> @scmp_i32z32(<4 x i32> %a) {
128+
; CHECK-LABEL: scmp_i32z32:
129+
; CHECK: # %bb.0: # %entry
130+
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
131+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
132+
; CHECK-NEXT: vmv.v.i v9, 0
133+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
134+
; CHECK-NEXT: vmsle.vi v0, v8, -1
135+
; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
136+
; CHECK-NEXT: ret
137+
entry:
138+
%c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> zeroinitializer)
139+
ret <4 x i32> %c
140+
}
141+
142+
143+
define <2 x i64> @scmp_i64i64(<2 x i64> %a, <2 x i64> %b) {
144+
; CHECK-LABEL: scmp_i64i64:
145+
; CHECK: # %bb.0: # %entry
146+
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
147+
; CHECK-NEXT: vmslt.vv v0, v9, v8
148+
; CHECK-NEXT: vmv.v.i v10, 0
149+
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
150+
; CHECK-NEXT: vmslt.vv v0, v8, v9
151+
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
152+
; CHECK-NEXT: ret
153+
entry:
154+
%c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> %b)
155+
ret <2 x i64> %c
156+
}
157+
158+
define <2 x i64> @scmp_z64i64(<2 x i64> %a) {
159+
; CHECK-LABEL: scmp_z64i64:
160+
; CHECK: # %bb.0: # %entry
161+
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
162+
; CHECK-NEXT: vmsle.vi v0, v8, -1
163+
; CHECK-NEXT: vmv.v.i v9, 0
164+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
165+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
166+
; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
167+
; CHECK-NEXT: ret
168+
entry:
169+
%c = call <2 x i64> @llvm.scmp(<2 x i64> zeroinitializer, <2 x i64> %a)
170+
ret <2 x i64> %c
171+
}
172+
173+
define <2 x i64> @scmp_i64z64(<2 x i64> %a) {
174+
; CHECK-LABEL: scmp_i64z64:
175+
; CHECK: # %bb.0: # %entry
176+
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
177+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
178+
; CHECK-NEXT: vmv.v.i v9, 0
179+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
180+
; CHECK-NEXT: vmsle.vi v0, v8, -1
181+
; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
182+
; CHECK-NEXT: ret
183+
entry:
184+
%c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> zeroinitializer)
185+
ret <2 x i64> %c
186+
}
187+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
188+
; RV32: {{.*}}
189+
; RV64: {{.*}}

llvm/test/CodeGen/RISCV/rvv/scmp.ll

Lines changed: 206 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,206 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3+
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
4+
5+
define <vscale x 16 x i8> @scmp_i8i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
6+
; CHECK-LABEL: scmp_i8i8:
7+
; CHECK: # %bb.0: # %entry
8+
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
9+
; CHECK-NEXT: vmslt.vv v0, v10, v8
10+
; CHECK-NEXT: vmv.v.i v12, 0
11+
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
12+
; CHECK-NEXT: vmslt.vv v0, v8, v10
13+
; CHECK-NEXT: vmerge.vim v8, v12, -1, v0
14+
; CHECK-NEXT: ret
15+
entry:
16+
%c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
17+
ret <vscale x 16 x i8> %c
18+
}
19+
20+
define <vscale x 16 x i8> @scmp_z8i8(<vscale x 16 x i8> %a) {
21+
; CHECK-LABEL: scmp_z8i8:
22+
; CHECK: # %bb.0: # %entry
23+
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
24+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
25+
; CHECK-NEXT: vsrl.vi v8, v8, 7
26+
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
27+
; CHECK-NEXT: ret
28+
entry:
29+
%c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> %a)
30+
ret <vscale x 16 x i8> %c
31+
}
32+
33+
define <vscale x 16 x i8> @scmp_i8z8(<vscale x 16 x i8> %a) {
34+
; CHECK-LABEL: scmp_i8z8:
35+
; CHECK: # %bb.0: # %entry
36+
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
37+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
38+
; CHECK-NEXT: vmv.v.i v10, 0
39+
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
40+
; CHECK-NEXT: vmsle.vi v0, v8, -1
41+
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
42+
; CHECK-NEXT: ret
43+
entry:
44+
%c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer)
45+
ret <vscale x 16 x i8> %c
46+
}
47+
48+
49+
define <vscale x 8 x i16> @scmp_i16i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
50+
; CHECK-LABEL: scmp_i16i16:
51+
; CHECK: # %bb.0: # %entry
52+
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
53+
; CHECK-NEXT: vmslt.vv v0, v10, v8
54+
; CHECK-NEXT: vmv.v.i v12, 0
55+
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
56+
; CHECK-NEXT: vmslt.vv v0, v8, v10
57+
; CHECK-NEXT: vmerge.vim v8, v12, -1, v0
58+
; CHECK-NEXT: ret
59+
entry:
60+
%c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
61+
ret <vscale x 8 x i16> %c
62+
}
63+
64+
define <vscale x 8 x i16> @scmp_z16i16(<vscale x 8 x i16> %a) {
65+
; CHECK-LABEL: scmp_z16i16:
66+
; CHECK: # %bb.0: # %entry
67+
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
68+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
69+
; CHECK-NEXT: vsrl.vi v8, v8, 15
70+
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
71+
; CHECK-NEXT: ret
72+
entry:
73+
%c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> %a)
74+
ret <vscale x 8 x i16> %c
75+
}
76+
77+
define <vscale x 8 x i16> @scmp_i16z16(<vscale x 8 x i16> %a) {
78+
; CHECK-LABEL: scmp_i16z16:
79+
; CHECK: # %bb.0: # %entry
80+
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
81+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
82+
; CHECK-NEXT: vmv.v.i v10, 0
83+
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
84+
; CHECK-NEXT: vmsle.vi v0, v8, -1
85+
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
86+
; CHECK-NEXT: ret
87+
entry:
88+
%c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer)
89+
ret <vscale x 8 x i16> %c
90+
}
91+
92+
93+
define <vscale x 4 x i32> @scmp_i32i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
94+
; CHECK-LABEL: scmp_i32i32:
95+
; CHECK: # %bb.0: # %entry
96+
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
97+
; CHECK-NEXT: vmslt.vv v0, v10, v8
98+
; CHECK-NEXT: vmv.v.i v12, 0
99+
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
100+
; CHECK-NEXT: vmslt.vv v0, v8, v10
101+
; CHECK-NEXT: vmerge.vim v8, v12, -1, v0
102+
; CHECK-NEXT: ret
103+
entry:
104+
%c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
105+
ret <vscale x 4 x i32> %c
106+
}
107+
108+
define <vscale x 4 x i32> @scmp_z32i32(<vscale x 4 x i32> %a) {
109+
; CHECK-LABEL: scmp_z32i32:
110+
; CHECK: # %bb.0: # %entry
111+
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
112+
; CHECK-NEXT: vmsgt.vi v0, v8, 0
113+
; CHECK-NEXT: vsrl.vi v8, v8, 31
114+
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
115+
; CHECK-NEXT: ret
116+
entry:
117+
%c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> %a)
118+
ret <vscale x 4 x i32> %c
119+
}
120+
121+
define <vscale x 4 x i32> @scmp_i32z32(<vscale x 4 x i32> %a) {
122+
; RV32-LABEL: scmp_i32z32:
123+
; RV32: # %bb.0: # %entry
124+
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
125+
; RV32-NEXT: vmsgt.vi v0, v8, 0
126+
; RV32-NEXT: vsra.vi v8, v8, 31
127+
; RV32-NEXT: vor.vi v8, v8, 1, v0.t
128+
; RV32-NEXT: ret
129+
;
130+
; RV64-LABEL: scmp_i32z32:
131+
; RV64: # %bb.0: # %entry
132+
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
133+
; RV64-NEXT: vmsgt.vi v0, v8, 0
134+
; RV64-NEXT: vmv.v.i v10, 0
135+
; RV64-NEXT: vmerge.vim v10, v10, 1, v0
136+
; RV64-NEXT: vmsle.vi v0, v8, -1
137+
; RV64-NEXT: vmerge.vim v8, v10, -1, v0
138+
; RV64-NEXT: ret
139+
entry:
140+
%c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer)
141+
ret <vscale x 4 x i32> %c
142+
}
143+
144+
145+
define <vscale x 2 x i64> @scmp_i64i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
146+
; CHECK-LABEL: scmp_i64i64:
147+
; CHECK: # %bb.0: # %entry
148+
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
149+
; CHECK-NEXT: vmslt.vv v0, v10, v8
150+
; CHECK-NEXT: vmv.v.i v12, 0
151+
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
152+
; CHECK-NEXT: vmslt.vv v0, v8, v10
153+
; CHECK-NEXT: vmerge.vim v8, v12, -1, v0
154+
; CHECK-NEXT: ret
155+
entry:
156+
%c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
157+
ret <vscale x 2 x i64> %c
158+
}
159+
160+
define <vscale x 2 x i64> @scmp_z64i64(<vscale x 2 x i64> %a) {
161+
; RV32-LABEL: scmp_z64i64:
162+
; RV32: # %bb.0: # %entry
163+
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
164+
; RV32-NEXT: vmsle.vi v0, v8, -1
165+
; RV32-NEXT: vmv.v.i v10, 0
166+
; RV32-NEXT: vmerge.vim v10, v10, 1, v0
167+
; RV32-NEXT: vmsgt.vi v0, v8, 0
168+
; RV32-NEXT: vmerge.vim v8, v10, -1, v0
169+
; RV32-NEXT: ret
170+
;
171+
; RV64-LABEL: scmp_z64i64:
172+
; RV64: # %bb.0: # %entry
173+
; RV64-NEXT: li a0, 63
174+
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
175+
; RV64-NEXT: vmsgt.vi v0, v8, 0
176+
; RV64-NEXT: vsrl.vx v8, v8, a0
177+
; RV64-NEXT: vmerge.vim v8, v8, -1, v0
178+
; RV64-NEXT: ret
179+
entry:
180+
%c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> %a)
181+
ret <vscale x 2 x i64> %c
182+
}
183+
184+
define <vscale x 2 x i64> @scmp_i64z64(<vscale x 2 x i64> %a) {
185+
; RV32-LABEL: scmp_i64z64:
186+
; RV32: # %bb.0: # %entry
187+
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
188+
; RV32-NEXT: vmsgt.vi v0, v8, 0
189+
; RV32-NEXT: vmv.v.i v10, 0
190+
; RV32-NEXT: vmerge.vim v10, v10, 1, v0
191+
; RV32-NEXT: vmsle.vi v0, v8, -1
192+
; RV32-NEXT: vmerge.vim v8, v10, -1, v0
193+
; RV32-NEXT: ret
194+
;
195+
; RV64-LABEL: scmp_i64z64:
196+
; RV64: # %bb.0: # %entry
197+
; RV64-NEXT: li a0, 63
198+
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
199+
; RV64-NEXT: vmsgt.vi v0, v8, 0
200+
; RV64-NEXT: vsra.vx v8, v8, a0
201+
; RV64-NEXT: vor.vi v8, v8, 1, v0.t
202+
; RV64-NEXT: ret
203+
entry:
204+
%c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer)
205+
ret <vscale x 2 x i64> %c
206+
}

0 commit comments

Comments
 (0)