source: icGREP/icgrep-devel/llvm-3.8.0.src/test/CodeGen/AMDGPU/salu-to-valu.ll @ 5027

Last change on this file since 5027 was 5027, checked in by cameron, 3 years ago

Upgrade to llvm 3.8

File size: 17.7 KB
Line 
1; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=SI %s
2; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=CI %s
3; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI --check-prefix=GCN-HSA %s
4
5declare i32 @llvm.r600.read.tidig.x() #0
6declare i32 @llvm.r600.read.tidig.y() #0
7
8; In this test both the pointer and the offset operands to the
9; BUFFER_LOAD instructions end up being stored in vgprs.  This
10; requires us to add the pointer and offset together, store the
11; result in the offset operand (vaddr), and then store 0 in an
12; sgpr register pair and use that for the pointer operand
13; (low 64-bits of srsrc).
14
15; GCN-LABEL: {{^}}mubuf:
16
17; Make sure we aren't using VGPRs for the source operand of s_mov_b64
18; GCN-NOT: s_mov_b64 s[{{[0-9]+:[0-9]+}}], v
19
20; Make sure we aren't using VGPR's for the srsrc operand of BUFFER_LOAD_*
21; instructions
22; GCN-NOHSA: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
23; GCN-NOHSA: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
24; GCN-HSA: flat_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}
25; GCN-HSA: flat_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}
26
27define void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
28entry:
29  %tmp = call i32 @llvm.r600.read.tidig.x()
30  %tmp1 = call i32 @llvm.r600.read.tidig.y()
31  %tmp2 = sext i32 %tmp to i64
32  %tmp3 = sext i32 %tmp1 to i64
33  br label %loop
34
35loop:                                             ; preds = %loop, %entry
36  %tmp4 = phi i64 [ 0, %entry ], [ %tmp5, %loop ]
37  %tmp5 = add i64 %tmp2, %tmp4
38  %tmp6 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp5
39  %tmp7 = load i8, i8 addrspace(1)* %tmp6, align 1
40  %tmp8 = or i64 %tmp5, 1
41  %tmp9 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp8
42  %tmp10 = load i8, i8 addrspace(1)* %tmp9, align 1
43  %tmp11 = add i8 %tmp7, %tmp10
44  %tmp12 = sext i8 %tmp11 to i32
45  store i32 %tmp12, i32 addrspace(1)* %out
46  %tmp13 = icmp slt i64 %tmp5, 10
47  br i1 %tmp13, label %loop, label %done
48
49done:                                             ; preds = %loop
50  ret void
51}
52
53; Test moving an SMRD instruction to the VALU
54
55; GCN-LABEL: {{^}}smrd_valu:
56; FIXME: We should be using flat load for HSA.
57; GCN: buffer_load_dword [[OUT:v[0-9]+]]
58; GCN-NOHSA: buffer_store_dword [[OUT]]
59; GCN-HSA: flat_store_dword [[OUT]]
60define void @smrd_valu(i32 addrspace(2)* addrspace(1)* %in, i32 %a, i32 %b, i32 addrspace(1)* %out) #1 {
61entry:
62  %tmp = icmp ne i32 %a, 0
63  br i1 %tmp, label %if, label %else
64
65if:                                               ; preds = %entry
66  %tmp1 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in
67  br label %endif
68
69else:                                             ; preds = %entry
70  %tmp2 = getelementptr i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in
71  %tmp3 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %tmp2
72  br label %endif
73
74endif:                                            ; preds = %else, %if
75  %tmp4 = phi i32 addrspace(2)* [ %tmp1, %if ], [ %tmp3, %else ]
76  %tmp5 = getelementptr i32, i32 addrspace(2)* %tmp4, i32 3000
77  %tmp6 = load i32, i32 addrspace(2)* %tmp5
78  store i32 %tmp6, i32 addrspace(1)* %out
79  ret void
80}
81
82; Test moving an SMRD with an immediate offset to the VALU
83
84; GCN-LABEL: {{^}}smrd_valu2:
85; GCN-NOHSA-NOT: v_add
86; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16{{$}}
87; GCN-HSA: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
88define void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in) #1 {
89entry:
90  %tmp = call i32 @llvm.r600.read.tidig.x() #0
91  %tmp1 = add i32 %tmp, 4
92  %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %tmp, i32 4
93  %tmp3 = load i32, i32 addrspace(2)* %tmp2
94  store i32 %tmp3, i32 addrspace(1)* %out
95  ret void
96}
97
98; Use a big offset that will use the SMRD literal offset on CI
99; GCN-LABEL: {{^}}smrd_valu_ci_offset:
100; GCN-NOHSA-NOT: v_add
101; GCN-NOHSA: s_movk_i32 [[OFFSET:s[0-9]+]], 0x4e20{{$}}
102; GCN-NOHSA-NOT: v_add
103; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
104; GCN-NOHSA: v_add_i32_e32
105; GCN-NOHSA: buffer_store_dword
106; GCN-HSA: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
107; GCN-HSA: flat_store_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
108define void @smrd_valu_ci_offset(i32 addrspace(1)* %out, i32 addrspace(2)* %in, i32 %c) #1 {
109entry:
110  %tmp = call i32 @llvm.r600.read.tidig.x() #0
111  %tmp2 = getelementptr i32, i32 addrspace(2)* %in, i32 %tmp
112  %tmp3 = getelementptr i32, i32 addrspace(2)* %tmp2, i32 5000
113  %tmp4 = load i32, i32 addrspace(2)* %tmp3
114  %tmp5 = add i32 %tmp4, %c
115  store i32 %tmp5, i32 addrspace(1)* %out
116  ret void
117}
118
119; GCN-LABEL: {{^}}smrd_valu_ci_offset_x2:
120; GCN-NOHSA-NOT: v_add
121; GCN-NOHSA: s_mov_b32 [[OFFSET:s[0-9]+]], 0x9c40{{$}}
122; GCN-NOHSA-NOT: v_add
123; GCN-NOHSA: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
124; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
125; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
126; GCN-NOHSA: buffer_store_dwordx2
127; GCN-HSA: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
128define void @smrd_valu_ci_offset_x2(i64 addrspace(1)* %out, i64 addrspace(2)* %in, i64 %c) #1 {
129entry:
130  %tmp = call i32 @llvm.r600.read.tidig.x() #0
131  %tmp2 = getelementptr i64, i64 addrspace(2)* %in, i32 %tmp
132  %tmp3 = getelementptr i64, i64 addrspace(2)* %tmp2, i32 5000
133  %tmp4 = load i64, i64 addrspace(2)* %tmp3
134  %tmp5 = or i64 %tmp4, %c
135  store i64 %tmp5, i64 addrspace(1)* %out
136  ret void
137}
138
139; GCN-LABEL: {{^}}smrd_valu_ci_offset_x4:
140; GCN-NOHSA-NOT: v_add
141; GCN-NOHSA: s_movk_i32 [[OFFSET:s[0-9]+]], 0x4d20{{$}}
142; GCN-NOHSA-NOT: v_add
143; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
144; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
145; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
146; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
147; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
148; GCN-NOHSA: buffer_store_dwordx4
149; GCN-HSA: flat_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
150define void @smrd_valu_ci_offset_x4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(2)* %in, <4 x i32> %c) #1 {
151entry:
152  %tmp = call i32 @llvm.r600.read.tidig.x() #0
153  %tmp2 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %in, i32 %tmp
154  %tmp3 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %tmp2, i32 1234
155  %tmp4 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp3
156  %tmp5 = or <4 x i32> %tmp4, %c
157  store <4 x i32> %tmp5, <4 x i32> addrspace(1)* %out
158  ret void
159}
160
161; Original scalar load uses SGPR offset on SI and 32-bit literal on
162; CI.
163
164; GCN-LABEL: {{^}}smrd_valu_ci_offset_x8:
165; GCN-NOHSA-NOT: v_add
166; GCN-NOHSA: s_mov_b32 [[OFFSET0:s[0-9]+]], 0x9a40{{$}}
167; GCN-NOHSA-NOT: v_add
168; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET0]] addr64{{$}}
169; GCN-NOHSA-NOT: v_add
170; GCN-NOHSA: s_mov_b32 [[OFFSET1:s[0-9]+]], 0x9a50{{$}}
171; GCN-NOHSA-NOT: v_add
172; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET1]] addr64{{$}}
173
174; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
175; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
176; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
177; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
178; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
179; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
180; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
181; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
182; GCN-NOHSA: buffer_store_dwordx4
183; GCN-NOHSA: buffer_store_dwordx4
184; GCN-HSA: flat_load_dwordx4
185; GCN-HSA: flat_load_dwordx4
186define void @smrd_valu_ci_offset_x8(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(2)* %in, <8 x i32> %c) #1 {
187entry:
188  %tmp = call i32 @llvm.r600.read.tidig.x() #0
189  %tmp2 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %in, i32 %tmp
190  %tmp3 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %tmp2, i32 1234
191  %tmp4 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp3
192  %tmp5 = or <8 x i32> %tmp4, %c
193  store <8 x i32> %tmp5, <8 x i32> addrspace(1)* %out
194  ret void
195}
196
197; GCN-LABEL: {{^}}smrd_valu_ci_offset_x16:
198
199; GCN-NOHSA-NOT: v_add
200; GCN-NOHSA: s_mov_b32 [[OFFSET0:s[0-9]+]], 0x13480{{$}}
201; GCN-NOHSA-NOT: v_add
202; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET0]] addr64{{$}}
203; GCN-NOHSA-NOT: v_add
204; GCN-NOHSA: s_mov_b32 [[OFFSET1:s[0-9]+]], 0x13490{{$}}
205; GCN-NOHSA-NOT: v_add
206; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET1]] addr64{{$}}
207; GCN-NOHSA-NOT: v_add
208; GCN-NOHSA: s_mov_b32 [[OFFSET2:s[0-9]+]], 0x134a0{{$}}
209; GCN-NOHSA-NOT: v_add
210; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET2]] addr64{{$}}
211; GCN-NOHSA-NOT: v_add
212; GCN-NOHSA: s_mov_b32 [[OFFSET3:s[0-9]+]], 0x134b0{{$}}
213; GCN-NOHSA-NOT: v_add
214; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET3]] addr64{{$}}
215
216; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
217; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
218; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
219; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
220; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
221; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
222; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
223; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
224; GCN-NOHSA: buffer_store_dwordx4
225; GCN-NOHSA: buffer_store_dwordx4
226; GCN-NOHSA: buffer_store_dwordx4
227; GCN-NOHSA: buffer_store_dwordx4
228
229; GCN-HSA: flat_load_dwordx4
230; GCN-HSA: flat_load_dwordx4
231; GCN-HSA: flat_load_dwordx4
232; GCN-HSA: flat_load_dwordx4
233
234; GCN: s_endpgm
235define void @smrd_valu_ci_offset_x16(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(2)* %in, <16 x i32> %c) #1 {
236entry:
237  %tmp = call i32 @llvm.r600.read.tidig.x() #0
238  %tmp2 = getelementptr <16 x i32>, <16 x i32> addrspace(2)* %in, i32 %tmp
239  %tmp3 = getelementptr <16 x i32>, <16 x i32> addrspace(2)* %tmp2, i32 1234
240  %tmp4 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp3
241  %tmp5 = or <16 x i32> %tmp4, %c
242  store <16 x i32> %tmp5, <16 x i32> addrspace(1)* %out
243  ret void
244}
245
246; GCN-LABEL: {{^}}smrd_valu2_salu_user:
247; GCN-NOHSA: buffer_load_dword [[MOVED:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
248; GCN-HSA: flat_load_dword [[MOVED:v[0-9]+]], v[{{[0-9+:[0-9]+}}]
249; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, s{{[0-9]+}}, [[MOVED]]
250; GCN-NOHSA: buffer_store_dword [[ADD]]
251; GCN-HSA: flat_store_dword [[ADD]]
252define void @smrd_valu2_salu_user(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in, i32 %a) #1 {
253entry:
254  %tmp = call i32 @llvm.r600.read.tidig.x() #0
255  %tmp1 = add i32 %tmp, 4
256  %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %tmp, i32 4
257  %tmp3 = load i32, i32 addrspace(2)* %tmp2
258  %tmp4 = add i32 %tmp3, %a
259  store i32 %tmp4, i32 addrspace(1)* %out
260  ret void
261}
262
263; GCN-LABEL: {{^}}smrd_valu2_max_smrd_offset:
264; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1020{{$}}
265; GCN-HSA flat_load_dword v{{[0-9]}}, v{{[0-9]+:[0-9]+}}
266define void @smrd_valu2_max_smrd_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 {
267entry:
268  %tmp = call i32 @llvm.r600.read.tidig.x() #0
269  %tmp1 = add i32 %tmp, 4
270  %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(2)* %in, i32 %tmp, i32 255
271  %tmp3 = load i32, i32 addrspace(2)* %tmp2
272  store i32 %tmp3, i32 addrspace(1)* %out
273  ret void
274}
275
276; GCN-LABEL: {{^}}smrd_valu2_mubuf_offset:
277; GCN-NOHSA-NOT: v_add
278; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1024{{$}}
279; GCN-HSA: flat_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}]
280define void @smrd_valu2_mubuf_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 {
281entry:
282  %tmp = call i32 @llvm.r600.read.tidig.x() #0
283  %tmp1 = add i32 %tmp, 4
284  %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(2)* %in, i32 %tmp, i32 256
285  %tmp3 = load i32, i32 addrspace(2)* %tmp2
286  store i32 %tmp3, i32 addrspace(1)* %out
287  ret void
288}
289
290; GCN-LABEL: {{^}}s_load_imm_v8i32:
291; GCN-NOHSA: buffer_load_dwordx4
292; GCN-NOHSA: buffer_load_dwordx4
293; GCN-HSA: flat_load_dwordx4
294; GCN-HSA: flat_load_dwordx4
295define void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
296entry:
297  %tmp0 = tail call i32 @llvm.r600.read.tidig.x()
298  %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
299  %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)*
300  %tmp3 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp2, align 4
301  store <8 x i32> %tmp3, <8 x i32> addrspace(1)* %out, align 32
302  ret void
303}
304
305; GCN-LABEL: {{^}}s_load_imm_v8i32_salu_user:
306; GCN-NOHSA: buffer_load_dwordx4
307; GCN-NOHSA: buffer_load_dwordx4
308; GCN-NOHSA: v_add_i32_e32
309; GCN-NOHSA: v_add_i32_e32
310; GCN-NOHSA: v_add_i32_e32
311; GCN-NOHSA: v_add_i32_e32
312; GCN-NOHSA: v_add_i32_e32
313; GCN-NOHSA: v_add_i32_e32
314; GCN-NOHSA: v_add_i32_e32
315; GCN-NOHSA: buffer_store_dword
316; GCN-HSA: flat_load_dwordx4
317; GCN-HSA: flat_load_dwordx4
318define void @s_load_imm_v8i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
319entry:
320  %tmp0 = tail call i32 @llvm.r600.read.tidig.x()
321  %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
322  %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)*
323  %tmp3 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp2, align 4
324
325  %elt0 = extractelement <8 x i32> %tmp3, i32 0
326  %elt1 = extractelement <8 x i32> %tmp3, i32 1
327  %elt2 = extractelement <8 x i32> %tmp3, i32 2
328  %elt3 = extractelement <8 x i32> %tmp3, i32 3
329  %elt4 = extractelement <8 x i32> %tmp3, i32 4
330  %elt5 = extractelement <8 x i32> %tmp3, i32 5
331  %elt6 = extractelement <8 x i32> %tmp3, i32 6
332  %elt7 = extractelement <8 x i32> %tmp3, i32 7
333
334  %add0 = add i32 %elt0, %elt1
335  %add1 = add i32 %add0, %elt2
336  %add2 = add i32 %add1, %elt3
337  %add3 = add i32 %add2, %elt4
338  %add4 = add i32 %add3, %elt5
339  %add5 = add i32 %add4, %elt6
340  %add6 = add i32 %add5, %elt7
341
342  store i32 %add6, i32 addrspace(1)* %out
343  ret void
344}
345
346; GCN-LABEL: {{^}}s_load_imm_v16i32:
347; GCN-NOHSA: buffer_load_dwordx4
348; GCN-NOHSA: buffer_load_dwordx4
349; GCN-NOHSA: buffer_load_dwordx4
350; GCN-NOHSA: buffer_load_dwordx4
351; GCN-HSA: flat_load_dwordx4
352; GCN-HSA: flat_load_dwordx4
353; GCN-HSA: flat_load_dwordx4
354; GCN-HSA: flat_load_dwordx4
355define void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
356entry:
357  %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1
358  %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
359  %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)*
360  %tmp3 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp2, align 4
361  store <16 x i32> %tmp3, <16 x i32> addrspace(1)* %out, align 32
362  ret void
363}
364
365; GCN-LABEL: {{^}}s_load_imm_v16i32_salu_user:
366; GCN-NOHSA: buffer_load_dwordx4
367; GCN-NOHSA: buffer_load_dwordx4
368; GCN-NOHSA: buffer_load_dwordx4
369; GCN-NOHSA: buffer_load_dwordx4
370; GCN-NOHSA: v_add_i32_e32
371; GCN-NOHSA: v_add_i32_e32
372; GCN-NOHSA: v_add_i32_e32
373; GCN-NOHSA: v_add_i32_e32
374; GCN-NOHSA: v_add_i32_e32
375; GCN-NOHSA: v_add_i32_e32
376; GCN-NOHSA: v_add_i32_e32
377; GCN-NOHSA: v_add_i32_e32
378; GCN-NOHSA: v_add_i32_e32
379; GCN-NOHSA: v_add_i32_e32
380; GCN-NOHSA: v_add_i32_e32
381; GCN-NOHSA: v_add_i32_e32
382; GCN-NOHSA: v_add_i32_e32
383; GCN-NOHSA: v_add_i32_e32
384; GCN-NOHSA: v_add_i32_e32
385; GCN-NOHSA: buffer_store_dword
386; GCN-HSA: flat_load_dwordx4
387; GCN-HSA: flat_load_dwordx4
388; GCN-HSA: flat_load_dwordx4
389; GCN-HSA: flat_load_dwordx4
390define void @s_load_imm_v16i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
391entry:
392  %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1
393  %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
394  %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)*
395  %tmp3 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp2, align 4
396
397  %elt0 = extractelement <16 x i32> %tmp3, i32 0
398  %elt1 = extractelement <16 x i32> %tmp3, i32 1
399  %elt2 = extractelement <16 x i32> %tmp3, i32 2
400  %elt3 = extractelement <16 x i32> %tmp3, i32 3
401  %elt4 = extractelement <16 x i32> %tmp3, i32 4
402  %elt5 = extractelement <16 x i32> %tmp3, i32 5
403  %elt6 = extractelement <16 x i32> %tmp3, i32 6
404  %elt7 = extractelement <16 x i32> %tmp3, i32 7
405  %elt8 = extractelement <16 x i32> %tmp3, i32 8
406  %elt9 = extractelement <16 x i32> %tmp3, i32 9
407  %elt10 = extractelement <16 x i32> %tmp3, i32 10
408  %elt11 = extractelement <16 x i32> %tmp3, i32 11
409  %elt12 = extractelement <16 x i32> %tmp3, i32 12
410  %elt13 = extractelement <16 x i32> %tmp3, i32 13
411  %elt14 = extractelement <16 x i32> %tmp3, i32 14
412  %elt15 = extractelement <16 x i32> %tmp3, i32 15
413
414  %add0 = add i32 %elt0, %elt1
415  %add1 = add i32 %add0, %elt2
416  %add2 = add i32 %add1, %elt3
417  %add3 = add i32 %add2, %elt4
418  %add4 = add i32 %add3, %elt5
419  %add5 = add i32 %add4, %elt6
420  %add6 = add i32 %add5, %elt7
421  %add7 = add i32 %add6, %elt8
422  %add8 = add i32 %add7, %elt9
423  %add9 = add i32 %add8, %elt10
424  %add10 = add i32 %add9, %elt11
425  %add11 = add i32 %add10, %elt12
426  %add12 = add i32 %add11, %elt13
427  %add13 = add i32 %add12, %elt14
428  %add14 = add i32 %add13, %elt15
429
430  store i32 %add14, i32 addrspace(1)* %out
431  ret void
432}
433
434attributes #0 = { nounwind readnone }
435attributes #1 = { nounwind }
Note: See TracBrowser for help on using the repository browser.