Skip to content

Commit 9681dc9

Browse files
author
Kai Luo
committed
[PowerPC] Exploit vrl(b|h|w|d) to perform vector rotation
Summary: Currently, we set legalization action of `ISD::ROTL` vectors as `Expand` in `PPCISelLowering`. However, we can exploit `vrl(b|h|w|d)` to lower `ISD::ROTL` directly. Differential Revision: https://reviews.llvm.org/D71324
1 parent 5128026 commit 9681dc9

File tree

4 files changed

+162
-20
lines changed

4 files changed

+162
-20
lines changed

llvm/lib/Target/PowerPC/PPCISelLowering.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -714,6 +714,14 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
714714
if (!Subtarget.hasP8Altivec())
715715
setOperationAction(ISD::ABS, MVT::v2i64, Expand);
716716

717+
// With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
718+
if (Subtarget.hasAltivec())
719+
for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
720+
setOperationAction(ISD::ROTL, VT, Legal);
721+
// With hasP8Altivec set, we can lower ISD::ROTL to vrld.
722+
if (Subtarget.hasP8Altivec())
723+
setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
724+
717725
addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
718726
addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
719727
addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);

llvm/lib/Target/PowerPC/PPCInstrAltivec.td

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -861,6 +861,14 @@ def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins),
861861
def : InstAlias<"vmr $vD, $vA", (VOR vrrc:$vD, vrrc:$vA, vrrc:$vA)>;
862862
def : InstAlias<"vnot $vD, $vA", (VNOR vrrc:$vD, vrrc:$vA, vrrc:$vA)>;
863863

864+
// Rotates.
865+
def : Pat<(v16i8 (rotl v16i8:$vA, v16i8:$vB)),
866+
(v16i8 (VRLB v16i8:$vA, v16i8:$vB))>;
867+
def : Pat<(v8i16 (rotl v8i16:$vA, v8i16:$vB)),
868+
(v8i16 (VRLH v8i16:$vA, v8i16:$vB))>;
869+
def : Pat<(v4i32 (rotl v4i32:$vA, v4i32:$vB)),
870+
(v4i32 (VRLW v4i32:$vA, v4i32:$vB))>;
871+
864872
// Loads.
865873
def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>;
866874

@@ -1159,9 +1167,13 @@ def:Pat<(vmrgew_swapped_shuffle v16i8:$vA, v16i8:$vB),
11591167
def:Pat<(vmrgow_swapped_shuffle v16i8:$vA, v16i8:$vB),
11601168
(VMRGOW $vB, $vA)>;
11611169

1170+
// Vector rotates.
1171+
def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>;
1172+
1173+
def : Pat<(v2i64 (rotl v2i64:$vA, v2i64:$vB)),
1174+
(v2i64 (VRLD v2i64:$vA, v2i64:$vB))>;
11621175

11631176
// Vector shifts
1164-
def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>;
11651177
def VSLD : VXForm_1<1476, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
11661178
"vsld $vD, $vA, $vB", IIC_VecGeneral, []>;
11671179
def VSRD : VXForm_1<1732, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),

llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll

Lines changed: 5 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -75,11 +75,7 @@ define i64 @rotl_i64(i64 %x, i64 %z) {
7575
define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) {
7676
; CHECK-LABEL: rotl_v4i32:
7777
; CHECK: # %bb.0:
78-
; CHECK-NEXT: xxlxor 36, 36, 36
79-
; CHECK-NEXT: vslw 5, 2, 3
80-
; CHECK-NEXT: vsubuwm 3, 4, 3
81-
; CHECK-NEXT: vsrw 2, 2, 3
82-
; CHECK-NEXT: xxlor 34, 37, 34
78+
; CHECK-NEXT: vrlw 2, 2, 3
8379
; CHECK-NEXT: blr
8480
%f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
8581
ret <4 x i32> %f
@@ -90,13 +86,8 @@ define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) {
9086
define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) {
9187
; CHECK-LABEL: rotl_v4i32_const_shift:
9288
; CHECK: # %bb.0:
93-
; CHECK-NEXT: vspltisw 3, -16
94-
; CHECK-NEXT: vspltisw 4, 13
95-
; CHECK-NEXT: vspltisw 5, 3
96-
; CHECK-NEXT: vsubuwm 3, 4, 3
97-
; CHECK-NEXT: vslw 4, 2, 5
98-
; CHECK-NEXT: vsrw 2, 2, 3
99-
; CHECK-NEXT: xxlor 34, 36, 34
89+
; CHECK-NEXT: vspltisw 3, 3
90+
; CHECK-NEXT: vrlw 2, 2, 3
10091
; CHECK-NEXT: blr
10192
%f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
10293
ret <4 x i32> %f
@@ -167,10 +158,8 @@ define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) {
167158
; CHECK-LABEL: rotr_v4i32:
168159
; CHECK: # %bb.0:
169160
; CHECK-NEXT: xxlxor 36, 36, 36
170-
; CHECK-NEXT: vsrw 5, 2, 3
171161
; CHECK-NEXT: vsubuwm 3, 4, 3
172-
; CHECK-NEXT: vslw 2, 2, 3
173-
; CHECK-NEXT: xxlor 34, 34, 37
162+
; CHECK-NEXT: vrlw 2, 2, 3
174163
; CHECK-NEXT: blr
175164
%f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
176165
ret <4 x i32> %f
@@ -183,11 +172,8 @@ define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) {
183172
; CHECK: # %bb.0:
184173
; CHECK-NEXT: vspltisw 3, -16
185174
; CHECK-NEXT: vspltisw 4, 13
186-
; CHECK-NEXT: vspltisw 5, 3
187175
; CHECK-NEXT: vsubuwm 3, 4, 3
188-
; CHECK-NEXT: vsrw 4, 2, 5
189-
; CHECK-NEXT: vslw 2, 2, 3
190-
; CHECK-NEXT: xxlor 34, 34, 36
176+
; CHECK-NEXT: vrlw 2, 2, 3
191177
; CHECK-NEXT: blr
192178
%f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
193179
ret <4 x i32> %f
Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -O3 -mtriple=powerpc64le-unknown-unknown -ppc-asm-full-reg-names \
3+
; RUN: -verify-machineinstrs -mcpu=pwr8 < %s | \
4+
; RUN: FileCheck --check-prefix=CHECK-P8 %s
5+
; RUN: llc -O3 -mtriple=powerpc64-unknown-unknown -ppc-asm-full-reg-names \
6+
; RUN: -verify-machineinstrs -mcpu=pwr7 < %s | \
7+
; RUN: FileCheck --check-prefix=CHECK-P7 %s
8+
9+
define <16 x i8> @rotl_v16i8(<16 x i8> %a) {
10+
; CHECK-P8-LABEL: rotl_v16i8:
11+
; CHECK-P8: # %bb.0: # %entry
12+
; CHECK-P8-NEXT: addis r3, r2, .LCPI0_0@toc@ha
13+
; CHECK-P8-NEXT: addi r3, r3, .LCPI0_0@toc@l
14+
; CHECK-P8-NEXT: lvx v3, 0, r3
15+
; CHECK-P8-NEXT: vrlb v2, v2, v3
16+
; CHECK-P8-NEXT: blr
17+
;
18+
; CHECK-P7-LABEL: rotl_v16i8:
19+
; CHECK-P7: # %bb.0: # %entry
20+
; CHECK-P7-NEXT: addis r3, r2, .LCPI0_0@toc@ha
21+
; CHECK-P7-NEXT: addi r3, r3, .LCPI0_0@toc@l
22+
; CHECK-P7-NEXT: lxvw4x vs35, 0, r3
23+
; CHECK-P7-NEXT: vrlb v2, v2, v3
24+
; CHECK-P7-NEXT: blr
25+
entry:
26+
%b = shl <16 x i8> %a, <i8 1, i8 1, i8 2, i8 2, i8 3, i8 3, i8 4, i8 4, i8 5, i8 5, i8 6, i8 6, i8 7, i8 7, i8 8, i8 8>
27+
%c = lshr <16 x i8> %a, <i8 7, i8 7, i8 6, i8 6, i8 5, i8 5, i8 4, i8 4, i8 3, i8 3, i8 2, i8 2, i8 1, i8 1, i8 0, i8 0>
28+
%d = or <16 x i8> %b, %c
29+
ret <16 x i8> %d
30+
}
31+
32+
define <8 x i16> @rotl_v8i16(<8 x i16> %a) {
33+
; CHECK-P8-LABEL: rotl_v8i16:
34+
; CHECK-P8: # %bb.0: # %entry
35+
; CHECK-P8-NEXT: addis r3, r2, .LCPI1_0@toc@ha
36+
; CHECK-P8-NEXT: addi r3, r3, .LCPI1_0@toc@l
37+
; CHECK-P8-NEXT: lvx v3, 0, r3
38+
; CHECK-P8-NEXT: vrlh v2, v2, v3
39+
; CHECK-P8-NEXT: blr
40+
;
41+
; CHECK-P7-LABEL: rotl_v8i16:
42+
; CHECK-P7: # %bb.0: # %entry
43+
; CHECK-P7-NEXT: addis r3, r2, .LCPI1_0@toc@ha
44+
; CHECK-P7-NEXT: addi r3, r3, .LCPI1_0@toc@l
45+
; CHECK-P7-NEXT: lxvw4x vs35, 0, r3
46+
; CHECK-P7-NEXT: vrlh v2, v2, v3
47+
; CHECK-P7-NEXT: blr
48+
entry:
49+
%b = shl <8 x i16> %a, <i16 1, i16 2, i16 3, i16 5, i16 7, i16 11, i16 13, i16 16>
50+
%c = lshr <8 x i16> %a, <i16 15, i16 14, i16 13, i16 11, i16 9, i16 5, i16 3, i16 0>
51+
%d = or <8 x i16> %b, %c
52+
ret <8 x i16> %d
53+
}
54+
55+
define <4 x i32> @rotl_v4i32_0(<4 x i32> %a) {
56+
; CHECK-P8-LABEL: rotl_v4i32_0:
57+
; CHECK-P8: # %bb.0: # %entry
58+
; CHECK-P8-NEXT: addis r3, r2, .LCPI2_0@toc@ha
59+
; CHECK-P8-NEXT: addi r3, r3, .LCPI2_0@toc@l
60+
; CHECK-P8-NEXT: lvx v3, 0, r3
61+
; CHECK-P8-NEXT: vrlw v2, v2, v3
62+
; CHECK-P8-NEXT: blr
63+
;
64+
; CHECK-P7-LABEL: rotl_v4i32_0:
65+
; CHECK-P7: # %bb.0: # %entry
66+
; CHECK-P7-NEXT: addis r3, r2, .LCPI2_0@toc@ha
67+
; CHECK-P7-NEXT: addi r3, r3, .LCPI2_0@toc@l
68+
; CHECK-P7-NEXT: lxvw4x vs35, 0, r3
69+
; CHECK-P7-NEXT: vrlw v2, v2, v3
70+
; CHECK-P7-NEXT: blr
71+
entry:
72+
%b = shl <4 x i32> %a, <i32 29, i32 19, i32 17, i32 11>
73+
%c = lshr <4 x i32> %a, <i32 3, i32 13, i32 15, i32 21>
74+
%d = or <4 x i32> %b, %c
75+
ret <4 x i32> %d
76+
}
77+
78+
define <4 x i32> @rotl_v4i32_1(<4 x i32> %a) {
79+
; CHECK-P8-LABEL: rotl_v4i32_1:
80+
; CHECK-P8: # %bb.0: # %entry
81+
; CHECK-P8-NEXT: vspltisw v3, -16
82+
; CHECK-P8-NEXT: vspltisw v4, 7
83+
; CHECK-P8-NEXT: vsubuwm v3, v4, v3
84+
; CHECK-P8-NEXT: vrlw v2, v2, v3
85+
; CHECK-P8-NEXT: blr
86+
;
87+
; CHECK-P7-LABEL: rotl_v4i32_1:
88+
; CHECK-P7: # %bb.0: # %entry
89+
; CHECK-P7-NEXT: vspltisw v3, -16
90+
; CHECK-P7-NEXT: vspltisw v4, 7
91+
; CHECK-P7-NEXT: vsubuwm v3, v4, v3
92+
; CHECK-P7-NEXT: vrlw v2, v2, v3
93+
; CHECK-P7-NEXT: blr
94+
entry:
95+
%b = shl <4 x i32> %a, <i32 23, i32 23, i32 23, i32 23>
96+
%c = lshr <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
97+
%d = or <4 x i32> %b, %c
98+
ret <4 x i32> %d
99+
}
100+
101+
define <2 x i64> @rotl_v2i64(<2 x i64> %a) {
102+
; CHECK-P8-LABEL: rotl_v2i64:
103+
; CHECK-P8: # %bb.0: # %entry
104+
; CHECK-P8-NEXT: addis r3, r2, .LCPI4_0@toc@ha
105+
; CHECK-P8-NEXT: addi r3, r3, .LCPI4_0@toc@l
106+
; CHECK-P8-NEXT: lxvd2x vs0, 0, r3
107+
; CHECK-P8-NEXT: xxswapd vs35, vs0
108+
; CHECK-P8-NEXT: vrld v2, v2, v3
109+
; CHECK-P8-NEXT: blr
110+
;
111+
; CHECK-P7-LABEL: rotl_v2i64:
112+
; CHECK-P7: # %bb.0: # %entry
113+
; CHECK-P7-NEXT: addi r3, r1, -48
114+
; CHECK-P7-NEXT: stxvd2x vs34, 0, r3
115+
; CHECK-P7-NEXT: ld r3, -40(r1)
116+
; CHECK-P7-NEXT: sldi r4, r3, 53
117+
; CHECK-P7-NEXT: rldicl r3, r3, 53, 11
118+
; CHECK-P7-NEXT: std r4, -8(r1)
119+
; CHECK-P7-NEXT: ld r4, -48(r1)
120+
; CHECK-P7-NEXT: sldi r5, r4, 41
121+
; CHECK-P7-NEXT: rldicl r4, r4, 41, 23
122+
; CHECK-P7-NEXT: std r5, -16(r1)
123+
; CHECK-P7-NEXT: addi r5, r1, -16
124+
; CHECK-P7-NEXT: lxvw4x vs0, 0, r5
125+
; CHECK-P7-NEXT: std r3, -24(r1)
126+
; CHECK-P7-NEXT: addi r3, r1, -32
127+
; CHECK-P7-NEXT: std r4, -32(r1)
128+
; CHECK-P7-NEXT: lxvw4x vs1, 0, r3
129+
; CHECK-P7-NEXT: xxlor vs34, vs0, vs1
130+
; CHECK-P7-NEXT: blr
131+
entry:
132+
%b = shl <2 x i64> %a, <i64 41, i64 53>
133+
%c = lshr <2 x i64> %a, <i64 23, i64 11>
134+
%d = or <2 x i64> %b, %c
135+
ret <2 x i64> %d
136+
}

0 commit comments

Comments
 (0)