@@ -20,12 +20,11 @@ entry:
20
20
define <16 x i8 > @scmp_z8i8 (<16 x i8 > %a ) {
21
21
; CHECK-LABEL: scmp_z8i8:
22
22
; CHECK: # %bb.0: # %entry
23
- ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
24
- ; CHECK-NEXT: vmsle.vi v0, v8, -1
25
- ; CHECK-NEXT: vmv.v.i v9, 0
26
- ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
23
+ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
27
24
; CHECK-NEXT: vmsgt.vi v0, v8, 0
28
- ; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
25
+ ; CHECK-NEXT: vmv.v.i v9, -1
26
+ ; CHECK-NEXT: vsrl.vi v9, v8, 7, v0.t
27
+ ; CHECK-NEXT: vmv.v.v v8, v9
29
28
; CHECK-NEXT: ret
30
29
entry:
31
30
%c = call <16 x i8 > @llvm.scmp (<16 x i8 > zeroinitializer , <16 x i8 > %a )
@@ -35,12 +34,11 @@ entry:
35
34
define <16 x i8 > @scmp_i8z8 (<16 x i8 > %a ) {
36
35
; CHECK-LABEL: scmp_i8z8:
37
36
; CHECK: # %bb.0: # %entry
37
+ ; CHECK-NEXT: li a0, 1
38
38
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
39
- ; CHECK-NEXT: vmsgt.vi v0, v8, 0
40
- ; CHECK-NEXT: vmv.v.i v9, 0
41
- ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
42
- ; CHECK-NEXT: vmsle.vi v0, v8, -1
43
- ; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
39
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
40
+ ; CHECK-NEXT: vsra.vi v8, v8, 7
41
+ ; CHECK-NEXT: vor.vv v8, v8, v9
44
42
; CHECK-NEXT: ret
45
43
entry:
46
44
%c = call <16 x i8 > @llvm.scmp (<16 x i8 > %a , <16 x i8 > zeroinitializer )
@@ -66,12 +64,11 @@ entry:
66
64
define <8 x i16 > @scmp_z16i16 (<8 x i16 > %a ) {
67
65
; CHECK-LABEL: scmp_z16i16:
68
66
; CHECK: # %bb.0: # %entry
69
- ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
70
- ; CHECK-NEXT: vmsle.vi v0, v8, -1
71
- ; CHECK-NEXT: vmv.v.i v9, 0
72
- ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
67
+ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
73
68
; CHECK-NEXT: vmsgt.vi v0, v8, 0
74
- ; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
69
+ ; CHECK-NEXT: vmv.v.i v9, -1
70
+ ; CHECK-NEXT: vsrl.vi v9, v8, 15, v0.t
71
+ ; CHECK-NEXT: vmv.v.v v8, v9
75
72
; CHECK-NEXT: ret
76
73
entry:
77
74
%c = call <8 x i16 > @llvm.scmp (<8 x i16 > zeroinitializer , <8 x i16 > %a )
@@ -81,12 +78,11 @@ entry:
81
78
define <8 x i16 > @scmp_i16z16 (<8 x i16 > %a ) {
82
79
; CHECK-LABEL: scmp_i16z16:
83
80
; CHECK: # %bb.0: # %entry
81
+ ; CHECK-NEXT: li a0, 1
84
82
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
85
- ; CHECK-NEXT: vmsgt.vi v0, v8, 0
86
- ; CHECK-NEXT: vmv.v.i v9, 0
87
- ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
88
- ; CHECK-NEXT: vmsle.vi v0, v8, -1
89
- ; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
83
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
84
+ ; CHECK-NEXT: vsra.vi v8, v8, 15
85
+ ; CHECK-NEXT: vor.vv v8, v8, v9
90
86
; CHECK-NEXT: ret
91
87
entry:
92
88
%c = call <8 x i16 > @llvm.scmp (<8 x i16 > %a , <8 x i16 > zeroinitializer )
@@ -112,12 +108,11 @@ entry:
112
108
define <4 x i32 > @scmp_z32i32 (<4 x i32 > %a ) {
113
109
; CHECK-LABEL: scmp_z32i32:
114
110
; CHECK: # %bb.0: # %entry
115
- ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
116
- ; CHECK-NEXT: vmsle.vi v0, v8, -1
117
- ; CHECK-NEXT: vmv.v.i v9, 0
118
- ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
111
+ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
119
112
; CHECK-NEXT: vmsgt.vi v0, v8, 0
120
- ; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
113
+ ; CHECK-NEXT: vmv.v.i v9, -1
114
+ ; CHECK-NEXT: vsrl.vi v9, v8, 31, v0.t
115
+ ; CHECK-NEXT: vmv.v.v v8, v9
121
116
; CHECK-NEXT: ret
122
117
entry:
123
118
%c = call <4 x i32 > @llvm.scmp (<4 x i32 > zeroinitializer , <4 x i32 > %a )
@@ -127,12 +122,11 @@ entry:
127
122
define <4 x i32 > @scmp_i32z32 (<4 x i32 > %a ) {
128
123
; CHECK-LABEL: scmp_i32z32:
129
124
; CHECK: # %bb.0: # %entry
125
+ ; CHECK-NEXT: li a0, 1
130
126
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
131
- ; CHECK-NEXT: vmsgt.vi v0, v8, 0
132
- ; CHECK-NEXT: vmv.v.i v9, 0
133
- ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
134
- ; CHECK-NEXT: vmsle.vi v0, v8, -1
135
- ; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
127
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
128
+ ; CHECK-NEXT: vsra.vi v8, v8, 31
129
+ ; CHECK-NEXT: vor.vv v8, v8, v9
136
130
; CHECK-NEXT: ret
137
131
entry:
138
132
%c = call <4 x i32 > @llvm.scmp (<4 x i32 > %a , <4 x i32 > zeroinitializer )
@@ -156,34 +150,51 @@ entry:
156
150
}
157
151
158
152
define <2 x i64 > @scmp_z64i64 (<2 x i64 > %a ) {
159
- ; CHECK-LABEL: scmp_z64i64:
160
- ; CHECK: # %bb.0: # %entry
161
- ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
162
- ; CHECK-NEXT: vmsle.vi v0, v8, -1
163
- ; CHECK-NEXT: vmv.v.i v9, 0
164
- ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
165
- ; CHECK-NEXT: vmsgt.vi v0, v8, 0
166
- ; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
167
- ; CHECK-NEXT: ret
153
+ ; RV32-LABEL: scmp_z64i64:
154
+ ; RV32: # %bb.0: # %entry
155
+ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
156
+ ; RV32-NEXT: vmsle.vi v0, v8, -1
157
+ ; RV32-NEXT: vmv.v.i v9, 0
158
+ ; RV32-NEXT: vmerge.vim v9, v9, 1, v0
159
+ ; RV32-NEXT: vmsgt.vi v0, v8, 0
160
+ ; RV32-NEXT: vmerge.vim v8, v9, -1, v0
161
+ ; RV32-NEXT: ret
162
+ ;
163
+ ; RV64-LABEL: scmp_z64i64:
164
+ ; RV64: # %bb.0: # %entry
165
+ ; RV64-NEXT: li a0, 63
166
+ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
167
+ ; RV64-NEXT: vmsgt.vi v0, v8, 0
168
+ ; RV64-NEXT: vmv.v.i v9, -1
169
+ ; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t
170
+ ; RV64-NEXT: vmv.v.v v8, v9
171
+ ; RV64-NEXT: ret
168
172
entry:
169
173
%c = call <2 x i64 > @llvm.scmp (<2 x i64 > zeroinitializer , <2 x i64 > %a )
170
174
ret <2 x i64 > %c
171
175
}
172
176
173
177
define <2 x i64 > @scmp_i64z64 (<2 x i64 > %a ) {
174
- ; CHECK-LABEL: scmp_i64z64:
175
- ; CHECK: # %bb.0: # %entry
176
- ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
177
- ; CHECK-NEXT: vmsgt.vi v0, v8, 0
178
- ; CHECK-NEXT: vmv.v.i v9, 0
179
- ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
180
- ; CHECK-NEXT: vmsle.vi v0, v8, -1
181
- ; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
182
- ; CHECK-NEXT: ret
178
+ ; RV32-LABEL: scmp_i64z64:
179
+ ; RV32: # %bb.0: # %entry
180
+ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
181
+ ; RV32-NEXT: vmsgt.vi v0, v8, 0
182
+ ; RV32-NEXT: vmv.v.i v9, 0
183
+ ; RV32-NEXT: vmerge.vim v9, v9, 1, v0
184
+ ; RV32-NEXT: vmsle.vi v0, v8, -1
185
+ ; RV32-NEXT: vmerge.vim v8, v9, -1, v0
186
+ ; RV32-NEXT: ret
187
+ ;
188
+ ; RV64-LABEL: scmp_i64z64:
189
+ ; RV64: # %bb.0: # %entry
190
+ ; RV64-NEXT: li a0, 63
191
+ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
192
+ ; RV64-NEXT: vmsgt.vi v0, v8, 0
193
+ ; RV64-NEXT: vmv.v.i v9, 1
194
+ ; RV64-NEXT: vsra.vx v9, v8, a0, v0.t
195
+ ; RV64-NEXT: vmv.v.v v8, v9
196
+ ; RV64-NEXT: ret
183
197
entry:
184
198
%c = call <2 x i64 > @llvm.scmp (<2 x i64 > %a , <2 x i64 > zeroinitializer )
185
199
ret <2 x i64 > %c
186
200
}
187
- ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
188
- ; RV32: {{.*}}
189
- ; RV64: {{.*}}
0 commit comments