clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUCallLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | |
14 | |
15 | #include "AMDGPUCallLowering.h" |
16 | #include "AMDGPU.h" |
17 | #include "AMDGPULegalizerInfo.h" |
18 | #include "AMDGPUTargetMachine.h" |
19 | #include "SIMachineFunctionInfo.h" |
20 | #include "SIRegisterInfo.h" |
21 | #include "llvm/CodeGen/Analysis.h" |
22 | #include "llvm/CodeGen/FunctionLoweringInfo.h" |
23 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
24 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
25 | |
26 | #define DEBUG_TYPE "amdgpu-call-lowering" |
27 | |
28 | using namespace llvm; |
29 | |
30 | namespace { |
31 | |
32 | |
33 | static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, |
34 | Register ValVReg, CCValAssign &VA) { |
35 | if (VA.getLocVT().getSizeInBits() < 32) { |
36 | |
37 | |
38 | return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); |
39 | } |
40 | |
41 | return Handler.extendRegister(ValVReg, VA); |
42 | } |
43 | |
44 | struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { |
45 | AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, |
46 | MachineInstrBuilder MIB) |
47 | : OutgoingValueHandler(B, MRI), MIB(MIB) {} |
48 | |
49 | MachineInstrBuilder MIB; |
50 | |
51 | Register getStackAddress(uint64_t Size, int64_t Offset, |
52 | MachinePointerInfo &MPO, |
53 | ISD::ArgFlagsTy Flags) override { |
54 | llvm_unreachable("not implemented"); |
55 | } |
56 | |
57 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, |
58 | MachinePointerInfo &MPO, CCValAssign &VA) override { |
59 | llvm_unreachable("not implemented"); |
60 | } |
61 | |
62 | void assignValueToReg(Register ValVReg, Register PhysReg, |
63 | CCValAssign &VA) override { |
64 | Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); |
65 | |
66 | |
67 | |
68 | |
69 | const SIRegisterInfo *TRI |
70 | = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); |
71 | if (TRI->isSGPRReg(MRI, PhysReg)) { |
72 | auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, |
73 | {MRI.getType(ExtReg)}, false) |
74 | .addReg(ExtReg); |
75 | ExtReg = ToSGPR.getReg(0); |
76 | } |
77 | |
78 | MIRBuilder.buildCopy(PhysReg, ExtReg); |
79 | MIB.addUse(PhysReg, RegState::Implicit); |
80 | } |
81 | }; |
82 | |
83 | struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { |
84 | uint64_t StackUsed = 0; |
85 | |
86 | AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) |
87 | : IncomingValueHandler(B, MRI) {} |
88 | |
89 | Register getStackAddress(uint64_t Size, int64_t Offset, |
90 | MachinePointerInfo &MPO, |
91 | ISD::ArgFlagsTy Flags) override { |
92 | auto &MFI = MIRBuilder.getMF().getFrameInfo(); |
93 | |
94 | |
95 | |
96 | const bool IsImmutable = !Flags.isByVal(); |
97 | int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); |
98 | MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); |
99 | auto AddrReg = MIRBuilder.buildFrameIndex( |
100 | LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); |
101 | StackUsed = std::max(StackUsed, Size + Offset); |
102 | return AddrReg.getReg(0); |
103 | } |
104 | |
105 | void assignValueToReg(Register ValVReg, Register PhysReg, |
106 | CCValAssign &VA) override { |
107 | markPhysRegUsed(PhysReg); |
108 | |
109 | if (VA.getLocVT().getSizeInBits() < 32) { |
110 | |
111 | |
112 | auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); |
113 | |
114 | |
115 | |
116 | auto Extended = |
117 | buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); |
118 | MIRBuilder.buildTrunc(ValVReg, Extended); |
119 | return; |
120 | } |
121 | |
122 | IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); |
123 | } |
124 | |
125 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, |
126 | MachinePointerInfo &MPO, CCValAssign &VA) override { |
127 | MachineFunction &MF = MIRBuilder.getMF(); |
128 | |
129 | auto MMO = MF.getMachineMemOperand( |
130 | MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, |
131 | inferAlignFromPtrInfo(MF, MPO)); |
132 | MIRBuilder.buildLoad(ValVReg, Addr, *MMO); |
133 | } |
134 | |
135 | |
136 | |
137 | |
138 | virtual void markPhysRegUsed(unsigned PhysReg) = 0; |
139 | }; |
140 | |
141 | struct FormalArgHandler : public AMDGPUIncomingArgHandler { |
142 | FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) |
143 | : AMDGPUIncomingArgHandler(B, MRI) {} |
144 | |
145 | void markPhysRegUsed(unsigned PhysReg) override { |
146 | MIRBuilder.getMBB().addLiveIn(PhysReg); |
147 | } |
148 | }; |
149 | |
150 | struct CallReturnHandler : public AMDGPUIncomingArgHandler { |
151 | CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, |
152 | MachineInstrBuilder MIB) |
153 | : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} |
154 | |
155 | void markPhysRegUsed(unsigned PhysReg) override { |
156 | MIB.addDef(PhysReg, RegState::Implicit); |
157 | } |
158 | |
159 | MachineInstrBuilder MIB; |
160 | }; |
161 | |
162 | struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { |
163 | |
164 | |
165 | int FPDiff; |
166 | |
167 | |
168 | Register SPReg; |
169 | |
170 | bool IsTailCall; |
171 | |
172 | AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, |
173 | MachineRegisterInfo &MRI, MachineInstrBuilder MIB, |
174 | bool IsTailCall = false, int FPDiff = 0) |
175 | : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff), |
176 | IsTailCall(IsTailCall) {} |
177 | |
178 | Register getStackAddress(uint64_t Size, int64_t Offset, |
179 | MachinePointerInfo &MPO, |
180 | ISD::ArgFlagsTy Flags) override { |
181 | MachineFunction &MF = MIRBuilder.getMF(); |
182 | const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); |
183 | const LLT S32 = LLT::scalar(32); |
184 | |
185 | if (IsTailCall) { |
186 | Offset += FPDiff; |
187 | int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); |
188 | auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI); |
189 | MPO = MachinePointerInfo::getFixedStack(MF, FI); |
190 | return FIReg.getReg(0); |
191 | } |
192 | |
193 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
194 | |
195 | if (!SPReg) |
196 | SPReg = MIRBuilder.buildCopy(PtrTy, MFI->getStackPtrOffsetReg()).getReg(0); |
197 | |
198 | auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); |
199 | |
200 | auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); |
201 | MPO = MachinePointerInfo::getStack(MF, Offset); |
202 | return AddrReg.getReg(0); |
203 | } |
204 | |
205 | void assignValueToReg(Register ValVReg, Register PhysReg, |
206 | CCValAssign &VA) override { |
207 | MIB.addUse(PhysReg, RegState::Implicit); |
208 | Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); |
209 | MIRBuilder.buildCopy(PhysReg, ExtReg); |
210 | } |
211 | |
212 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, |
213 | MachinePointerInfo &MPO, CCValAssign &VA) override { |
214 | MachineFunction &MF = MIRBuilder.getMF(); |
215 | uint64_t LocMemOffset = VA.getLocMemOffset(); |
216 | const auto &ST = MF.getSubtarget<GCNSubtarget>(); |
217 | |
218 | auto MMO = MF.getMachineMemOperand( |
219 | MPO, MachineMemOperand::MOStore, MemTy, |
220 | commonAlignment(ST.getStackAlignment(), LocMemOffset)); |
221 | MIRBuilder.buildStore(ValVReg, Addr, *MMO); |
222 | } |
223 | |
224 | void assignValueToAddress(const CallLowering::ArgInfo &Arg, |
225 | unsigned ValRegIndex, Register Addr, LLT MemTy, |
226 | MachinePointerInfo &MPO, CCValAssign &VA) override { |
227 | Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt |
228 | ? extendRegister(Arg.Regs[ValRegIndex], VA) |
229 | : Arg.Regs[ValRegIndex]; |
230 | assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA); |
231 | } |
232 | }; |
233 | } |
234 | |
235 | AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) |
236 | : CallLowering(&TLI) { |
237 | } |
238 | |
239 | |
240 | static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { |
241 | switch (MIOpc) { |
242 | case TargetOpcode::G_SEXT: |
243 | return ISD::SIGN_EXTEND; |
244 | case TargetOpcode::G_ZEXT: |
245 | return ISD::ZERO_EXTEND; |
246 | case TargetOpcode::G_ANYEXT: |
247 | return ISD::ANY_EXTEND; |
248 | default: |
249 | llvm_unreachable("not an extend opcode"); |
250 | } |
251 | } |
252 | |
253 | bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, |
254 | CallingConv::ID CallConv, |
255 | SmallVectorImpl<BaseArgInfo> &Outs, |
256 | bool IsVarArg) const { |
257 | |
258 | if (AMDGPU::isEntryFunctionCC(CallConv)) |
259 | return true; |
260 | |
261 | SmallVector<CCValAssign, 16> ArgLocs; |
262 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); |
263 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, |
264 | MF.getFunction().getContext()); |
265 | |
266 | return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); |
267 | } |
268 | |
269 | |
270 | |
271 | bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, |
272 | const Value *Val, ArrayRef<Register> VRegs, |
273 | MachineInstrBuilder &Ret) const { |
274 | if (!Val) |
275 | return true; |
276 | |
277 | auto &MF = B.getMF(); |
278 | const auto &F = MF.getFunction(); |
279 | const DataLayout &DL = MF.getDataLayout(); |
280 | MachineRegisterInfo *MRI = B.getMRI(); |
281 | LLVMContext &Ctx = F.getContext(); |
282 | |
283 | CallingConv::ID CC = F.getCallingConv(); |
284 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); |
285 | |
286 | SmallVector<EVT, 8> SplitEVTs; |
287 | ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); |
288 | assert(VRegs.size() == SplitEVTs.size() && |
289 | "For each split Type there should be exactly one VReg."); |
290 | |
291 | SmallVector<ArgInfo, 8> SplitRetInfos; |
292 | |
293 | for (unsigned i = 0; i < SplitEVTs.size(); ++i) { |
294 | EVT VT = SplitEVTs[i]; |
295 | Register Reg = VRegs[i]; |
296 | ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0); |
297 | setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); |
298 | |
299 | if (VT.isScalarInteger()) { |
300 | unsigned ExtendOp = TargetOpcode::G_ANYEXT; |
301 | if (RetInfo.Flags[0].isSExt()) { |
302 | assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); |
303 | ExtendOp = TargetOpcode::G_SEXT; |
304 | } else if (RetInfo.Flags[0].isZExt()) { |
305 | assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); |
306 | ExtendOp = TargetOpcode::G_ZEXT; |
307 | } |
308 | |
309 | EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, |
310 | extOpcodeToISDExtOpcode(ExtendOp)); |
311 | if (ExtVT != VT) { |
312 | RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); |
313 | LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); |
314 | Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); |
315 | } |
316 | } |
317 | |
318 | if (Reg != RetInfo.Regs[0]) { |
319 | RetInfo.Regs[0] = Reg; |
320 | |
321 | setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); |
322 | } |
323 | |
324 | splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); |
325 | } |
326 | |
327 | CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); |
328 | |
329 | OutgoingValueAssigner Assigner(AssignFn); |
330 | AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret); |
331 | return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B, |
332 | CC, F.isVarArg()); |
333 | } |
334 | |
335 | bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, |
336 | ArrayRef<Register> VRegs, |
337 | FunctionLoweringInfo &FLI) const { |
338 | |
339 | MachineFunction &MF = B.getMF(); |
340 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
341 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
342 | MFI->setIfReturnsVoid(!Val); |
| |
343 | |
344 | assert(!Val == VRegs.empty() && "Return value without a vreg"); |
345 | |
346 | CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); |
347 | const bool IsShader = AMDGPU::isShader(CC); |
348 | const bool IsWaveEnd = |
349 | (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); |
| 2 | | Assuming 'IsShader' is false | |
|
350 | if (IsWaveEnd) { |
| |
351 | B.buildInstr(AMDGPU::S_ENDPGM) |
352 | .addImm(0); |
353 | return true; |
354 | } |
355 | |
356 | auto const &ST = MF.getSubtarget<GCNSubtarget>(); |
357 | |
358 | unsigned ReturnOpc = |
359 | IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return; |
| |
360 | |
361 | auto Ret = B.buildInstrNoInsert(ReturnOpc); |
362 | Register ReturnAddrVReg; |
363 | if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { |
| |
364 | ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); |
365 | Ret.addUse(ReturnAddrVReg); |
366 | } |
367 | |
368 | if (!FLI.CanLowerReturn) |
| 6 | | Assuming field 'CanLowerReturn' is false | |
|
| |
369 | insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); |
| 8 | | Called C++ object pointer is null |
|
370 | else if (!lowerReturnVal(B, Val, VRegs, Ret)) |
371 | return false; |
372 | |
373 | if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { |
374 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
375 | Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), |
376 | &AMDGPU::SGPR_64RegClass); |
377 | B.buildCopy(ReturnAddrVReg, LiveInReturn); |
378 | } |
379 | |
380 | |
381 | |
382 | B.insertInstr(Ret); |
383 | return true; |
384 | } |
385 | |
386 | void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, |
387 | uint64_t Offset) const { |
388 | MachineFunction &MF = B.getMF(); |
389 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
390 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
391 | Register KernArgSegmentPtr = |
392 | MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); |
393 | Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); |
394 | |
395 | auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); |
396 | |
397 | B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); |
398 | } |
399 | |
400 | void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg, |
401 | uint64_t Offset, |
402 | Align Alignment) const { |
403 | MachineFunction &MF = B.getMF(); |
404 | const Function &F = MF.getFunction(); |
405 | const DataLayout &DL = F.getParent()->getDataLayout(); |
406 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); |
407 | |
408 | LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); |
409 | |
410 | SmallVector<ArgInfo, 32> SplitArgs; |
411 | SmallVector<uint64_t> FieldOffsets; |
412 | splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets); |
413 | |
414 | unsigned Idx = 0; |
415 | for (ArgInfo &SplitArg : SplitArgs) { |
416 | Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); |
417 | lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]); |
418 | |
419 | LLT ArgTy = getLLTForType(*SplitArg.Ty, DL); |
420 | if (SplitArg.Flags[0].isPointer()) { |
421 | |
422 | LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(), |
423 | ArgTy.getScalarSizeInBits()); |
424 | ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy) |
425 | : PtrTy; |
426 | } |
427 | |
428 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
429 | PtrInfo, |
430 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | |
431 | MachineMemOperand::MOInvariant, |
432 | ArgTy, commonAlignment(Alignment, FieldOffsets[Idx])); |
433 | |
434 | assert(SplitArg.Regs.size() == 1); |
435 | |
436 | B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO); |
437 | ++Idx; |
438 | } |
439 | } |
440 | |
441 | |
442 | static void allocateHSAUserSGPRs(CCState &CCInfo, |
443 | MachineIRBuilder &B, |
444 | MachineFunction &MF, |
445 | const SIRegisterInfo &TRI, |
446 | SIMachineFunctionInfo &Info) { |
447 | |
448 | if (Info.hasPrivateSegmentBuffer()) { |
449 | Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); |
450 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); |
451 | CCInfo.AllocateReg(PrivateSegmentBufferReg); |
452 | } |
453 | |
454 | if (Info.hasDispatchPtr()) { |
455 | Register DispatchPtrReg = Info.addDispatchPtr(TRI); |
456 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); |
457 | CCInfo.AllocateReg(DispatchPtrReg); |
458 | } |
459 | |
460 | if (Info.hasQueuePtr()) { |
461 | Register QueuePtrReg = Info.addQueuePtr(TRI); |
462 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); |
463 | CCInfo.AllocateReg(QueuePtrReg); |
464 | } |
465 | |
466 | if (Info.hasKernargSegmentPtr()) { |
467 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
468 | Register InputPtrReg = Info.addKernargSegmentPtr(TRI); |
469 | const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); |
470 | Register VReg = MRI.createGenericVirtualRegister(P4); |
471 | MRI.addLiveIn(InputPtrReg, VReg); |
472 | B.getMBB().addLiveIn(InputPtrReg); |
473 | B.buildCopy(VReg, InputPtrReg); |
474 | CCInfo.AllocateReg(InputPtrReg); |
475 | } |
476 | |
477 | if (Info.hasDispatchID()) { |
478 | Register DispatchIDReg = Info.addDispatchID(TRI); |
479 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); |
480 | CCInfo.AllocateReg(DispatchIDReg); |
481 | } |
482 | |
483 | if (Info.hasFlatScratchInit()) { |
484 | Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); |
485 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); |
486 | CCInfo.AllocateReg(FlatScratchInitReg); |
487 | } |
488 | |
489 | |
490 | |
491 | } |
492 | |
493 | bool AMDGPUCallLowering::lowerFormalArgumentsKernel( |
494 | MachineIRBuilder &B, const Function &F, |
495 | ArrayRef<ArrayRef<Register>> VRegs) const { |
496 | MachineFunction &MF = B.getMF(); |
497 | const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); |
498 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
499 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
500 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
501 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); |
502 | const DataLayout &DL = F.getParent()->getDataLayout(); |
503 | |
504 | Info->allocateModuleLDSGlobal(F.getParent()); |
505 | |
506 | SmallVector<CCValAssign, 16> ArgLocs; |
507 | CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); |
508 | |
509 | allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); |
510 | |
511 | unsigned i = 0; |
512 | const Align KernArgBaseAlign(16); |
513 | const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); |
514 | uint64_t ExplicitArgOffset = 0; |
515 | |
516 | |
517 | for (auto &Arg : F.args()) { |
518 | const bool IsByRef = Arg.hasByRefAttr(); |
519 | Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); |
520 | unsigned AllocSize = DL.getTypeAllocSize(ArgTy); |
521 | if (AllocSize == 0) |
522 | continue; |
523 | |
524 | MaybeAlign ABIAlign = IsByRef ? Arg.getParamAlign() : None; |
525 | if (!ABIAlign) |
526 | ABIAlign = DL.getABITypeAlign(ArgTy); |
527 | |
528 | uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; |
529 | ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; |
530 | |
531 | if (Arg.use_empty()) { |
532 | ++i; |
533 | continue; |
534 | } |
535 | |
536 | Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); |
537 | |
538 | if (IsByRef) { |
539 | unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); |
540 | |
541 | assert(VRegs[i].size() == 1 && |
542 | "expected only one register for byval pointers"); |
543 | if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { |
544 | lowerParameterPtr(VRegs[i][0], B, ArgOffset); |
545 | } else { |
546 | const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); |
547 | Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); |
548 | lowerParameterPtr(PtrReg, B, ArgOffset); |
549 | |
550 | B.buildAddrSpaceCast(VRegs[i][0], PtrReg); |
551 | } |
552 | } else { |
553 | ArgInfo OrigArg(VRegs[i], Arg, i); |
554 | const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex; |
555 | setArgFlags(OrigArg, OrigArgIdx, DL, F); |
556 | lowerParameter(B, OrigArg, ArgOffset, Alignment); |
557 | } |
558 | |
559 | ++i; |
560 | } |
561 | |
562 | TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); |
563 | TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); |
564 | return true; |
565 | } |
566 | |
567 | bool AMDGPUCallLowering::lowerFormalArguments( |
568 | MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, |
569 | FunctionLoweringInfo &FLI) const { |
570 | CallingConv::ID CC = F.getCallingConv(); |
571 | |
572 | |
573 | |
574 | |
575 | if (CC == CallingConv::AMDGPU_KERNEL) |
576 | return lowerFormalArgumentsKernel(B, F, VRegs); |
577 | |
578 | const bool IsGraphics = AMDGPU::isGraphics(CC); |
579 | const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); |
580 | |
581 | MachineFunction &MF = B.getMF(); |
582 | MachineBasicBlock &MBB = B.getMBB(); |
583 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
584 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
585 | const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); |
586 | const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); |
587 | const DataLayout &DL = F.getParent()->getDataLayout(); |
588 | |
589 | Info->allocateModuleLDSGlobal(F.getParent()); |
590 | |
591 | SmallVector<CCValAssign, 16> ArgLocs; |
592 | CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); |
593 | |
594 | if (!IsEntryFunc) { |
595 | Register ReturnAddrReg = TRI->getReturnAddressReg(MF); |
596 | Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, |
597 | &AMDGPU::SGPR_64RegClass); |
598 | MBB.addLiveIn(ReturnAddrReg); |
599 | B.buildCopy(LiveInReturn, ReturnAddrReg); |
600 | } |
601 | |
602 | if (Info->hasImplicitBufferPtr()) { |
603 | Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); |
604 | MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); |
605 | CCInfo.AllocateReg(ImplicitBufferPtrReg); |
606 | } |
607 | |
608 | SmallVector<ArgInfo, 32> SplitArgs; |
609 | unsigned Idx = 0; |
610 | unsigned PSInputNum = 0; |
611 | |
612 | |
613 | |
614 | if (!FLI.CanLowerReturn) |
615 | insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); |
616 | |
617 | for (auto &Arg : F.args()) { |
618 | if (DL.getTypeStoreSize(Arg.getType()) == 0) |
619 | continue; |
620 | |
621 | const bool InReg = Arg.hasAttribute(Attribute::InReg); |
622 | |
623 | |
624 | if (!IsGraphics && InReg) |
625 | return false; |
626 | |
627 | if (Arg.hasAttribute(Attribute::SwiftSelf) || |
628 | Arg.hasAttribute(Attribute::SwiftError) || |
629 | Arg.hasAttribute(Attribute::Nest)) |
630 | return false; |
631 | |
632 | if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { |
633 | const bool ArgUsed = !Arg.use_empty(); |
634 | bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); |
635 | |
636 | if (!SkipArg) { |
637 | Info->markPSInputAllocated(PSInputNum); |
638 | if (ArgUsed) |
639 | Info->markPSInputEnabled(PSInputNum); |
640 | } |
641 | |
642 | ++PSInputNum; |
643 | |
644 | if (SkipArg) { |
645 | for (int I = 0, E = VRegs[Idx].size(); I != E; ++I) |
646 | B.buildUndef(VRegs[Idx][I]); |
647 | |
648 | ++Idx; |
649 | continue; |
650 | } |
651 | } |
652 | |
653 | ArgInfo OrigArg(VRegs[Idx], Arg, Idx); |
654 | const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; |
655 | setArgFlags(OrigArg, OrigArgIdx, DL, F); |
656 | |
657 | splitToValueTypes(OrigArg, SplitArgs, DL, CC); |
658 | ++Idx; |
659 | } |
660 | |
661 | |
662 | |
663 | |
664 | |
665 | |
666 | |
667 | |
668 | |
669 | |
670 | |
671 | |
672 | |
673 | |
674 | if (CC == CallingConv::AMDGPU_PS) { |
675 | if ((Info->getPSInputAddr() & 0x7F) == 0 || |
676 | ((Info->getPSInputAddr() & 0xF) == 0 && |
677 | Info->isPSInputAllocated(11))) { |
678 | CCInfo.AllocateReg(AMDGPU::VGPR0); |
679 | CCInfo.AllocateReg(AMDGPU::VGPR1); |
680 | Info->markPSInputAllocated(0); |
681 | Info->markPSInputEnabled(0); |
682 | } |
683 | |
684 | if (Subtarget.isAmdPalOS()) { |
685 | |
686 | |
687 | |
688 | |
689 | |
690 | |
691 | |
692 | |
693 | unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); |
694 | if ((PsInputBits & 0x7F) == 0 || |
695 | ((PsInputBits & 0xF) == 0 && |
696 | (PsInputBits >> 11 & 1))) |
697 | Info->markPSInputEnabled( |
698 | countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); |
699 | } |
700 | } |
701 | |
702 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); |
703 | CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); |
704 | |
705 | if (!MBB.empty()) |
706 | B.setInstr(*MBB.begin()); |
707 | |
708 | if (!IsEntryFunc) { |
709 | |
710 | if (AMDGPUTargetMachine::EnableFixedFunctionABI) |
711 | TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); |
712 | } |
713 | |
714 | IncomingValueAssigner Assigner(AssignFn); |
715 | if (!determineAssignments(Assigner, SplitArgs, CCInfo)) |
716 | return false; |
717 | |
718 | FormalArgHandler Handler(B, MRI); |
719 | if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) |
720 | return false; |
721 | |
722 | uint64_t StackOffset = Assigner.StackOffset; |
723 | |
724 | if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) { |
725 | |
726 | TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); |
727 | } |
728 | |
729 | |
730 | if (IsEntryFunc) { |
731 | TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); |
732 | } else { |
733 | if (!Subtarget.enableFlatScratch()) |
734 | CCInfo.AllocateReg(Info->getScratchRSrcReg()); |
735 | TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); |
736 | } |
737 | |
738 | |
739 | |
740 | |
741 | |
742 | Info->setBytesInStackArgArea(StackOffset); |
743 | |
744 | |
745 | B.setMBB(MBB); |
746 | |
747 | return true; |
748 | } |
749 | |
750 | bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, |
751 | CCState &CCInfo, |
752 | SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, |
753 | CallLoweringInfo &Info) const { |
754 | MachineFunction &MF = MIRBuilder.getMF(); |
755 | |
756 | const AMDGPUFunctionArgInfo *CalleeArgInfo |
757 | = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; |
758 | |
759 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
760 | const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); |
761 | |
762 | |
763 | |
764 | |
765 | |
766 | AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { |
767 | AMDGPUFunctionArgInfo::DISPATCH_PTR, |
768 | AMDGPUFunctionArgInfo::QUEUE_PTR, |
769 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, |
770 | AMDGPUFunctionArgInfo::DISPATCH_ID, |
771 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X, |
772 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, |
773 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z |
774 | }; |
775 | |
776 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
777 | |
778 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
779 | const AMDGPULegalizerInfo *LI |
780 | = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); |
781 | |
782 | for (auto InputID : InputRegs) { |
783 | const ArgDescriptor *OutgoingArg; |
784 | const TargetRegisterClass *ArgRC; |
785 | LLT ArgTy; |
786 | |
787 | std::tie(OutgoingArg, ArgRC, ArgTy) = |
788 | CalleeArgInfo->getPreloadedValue(InputID); |
789 | if (!OutgoingArg) |
790 | continue; |
791 | |
792 | const ArgDescriptor *IncomingArg; |
793 | const TargetRegisterClass *IncomingArgRC; |
794 | std::tie(IncomingArg, IncomingArgRC, ArgTy) = |
795 | CallerArgInfo.getPreloadedValue(InputID); |
796 | assert(IncomingArgRC == ArgRC); |
797 | |
798 | Register InputReg = MRI.createGenericVirtualRegister(ArgTy); |
799 | |
800 | if (IncomingArg) { |
801 | LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); |
802 | } else { |
803 | assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); |
804 | LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); |
805 | } |
806 | |
807 | if (OutgoingArg->isRegister()) { |
808 | ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); |
809 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) |
810 | report_fatal_error("failed to allocate implicit input argument"); |
811 | } else { |
812 | LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); |
813 | return false; |
814 | } |
815 | } |
816 | |
817 | |
818 | |
819 | const ArgDescriptor *OutgoingArg; |
820 | const TargetRegisterClass *ArgRC; |
821 | LLT ArgTy; |
822 | |
823 | std::tie(OutgoingArg, ArgRC, ArgTy) = |
824 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); |
825 | if (!OutgoingArg) |
826 | std::tie(OutgoingArg, ArgRC, ArgTy) = |
827 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); |
828 | if (!OutgoingArg) |
829 | std::tie(OutgoingArg, ArgRC, ArgTy) = |
830 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); |
831 | if (!OutgoingArg) |
832 | return false; |
833 | |
834 | auto WorkitemIDX = |
835 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); |
836 | auto WorkitemIDY = |
837 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); |
838 | auto WorkitemIDZ = |
839 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); |
840 | |
841 | const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); |
842 | const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); |
843 | const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); |
844 | const LLT S32 = LLT::scalar(32); |
845 | |
846 | |
847 | |
848 | Register InputReg; |
849 | if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX) { |
850 | InputReg = MRI.createGenericVirtualRegister(S32); |
851 | LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, |
852 | std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); |
853 | } |
854 | |
855 | if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY) { |
856 | Register Y = MRI.createGenericVirtualRegister(S32); |
857 | LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), |
858 | std::get<2>(WorkitemIDY)); |
859 | |
860 | Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); |
861 | InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; |
862 | } |
863 | |
864 | if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ) { |
865 | Register Z = MRI.createGenericVirtualRegister(S32); |
866 | LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), |
867 | std::get<2>(WorkitemIDZ)); |
868 | |
869 | Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); |
870 | InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; |
871 | } |
872 | |
873 | if (!InputReg) { |
874 | InputReg = MRI.createGenericVirtualRegister(S32); |
875 | |
876 | |
877 | |
878 | ArgDescriptor IncomingArg = ArgDescriptor::createArg( |
879 | IncomingArgX ? *IncomingArgX : |
880 | IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); |
881 | LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, |
882 | &AMDGPU::VGPR_32RegClass, S32); |
883 | } |
884 | |
885 | if (OutgoingArg->isRegister()) { |
886 | ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); |
887 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) |
888 | report_fatal_error("failed to allocate implicit input argument"); |
889 | } else { |
890 | LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); |
891 | return false; |
892 | } |
893 | |
894 | return true; |
895 | } |
896 | |
897 | |
898 | |
899 | static std::pair<CCAssignFn *, CCAssignFn *> |
900 | getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { |
901 | return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; |
902 | } |
903 | |
904 | static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, |
905 | bool IsTailCall) { |
906 | return IsTailCall ? AMDGPU::SI_TCRETURN : AMDGPU::SI_CALL; |
907 | } |
908 | |
909 | |
910 | static bool addCallTargetOperands(MachineInstrBuilder &CallInst, |
911 | MachineIRBuilder &MIRBuilder, |
912 | AMDGPUCallLowering::CallLoweringInfo &Info) { |
913 | if (Info.Callee.isReg()) { |
914 | CallInst.addReg(Info.Callee.getReg()); |
915 | CallInst.addImm(0); |
916 | } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { |
917 | |
918 | |
919 | const GlobalValue *GV = Info.Callee.getGlobal(); |
920 | auto Ptr = MIRBuilder.buildGlobalValue( |
921 | LLT::pointer(GV->getAddressSpace(), 64), GV); |
922 | CallInst.addReg(Ptr.getReg(0)); |
923 | CallInst.add(Info.Callee); |
924 | } else |
925 | return false; |
926 | |
927 | return true; |
928 | } |
929 | |
930 | bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay( |
931 | CallLoweringInfo &Info, MachineFunction &MF, |
932 | SmallVectorImpl<ArgInfo> &InArgs) const { |
933 | const Function &CallerF = MF.getFunction(); |
934 | CallingConv::ID CalleeCC = Info.CallConv; |
935 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
936 | |
937 | |
938 | if (CalleeCC == CallerCC) |
939 | return true; |
940 | |
941 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
942 | |
943 | |
944 | auto TRI = ST.getRegisterInfo(); |
945 | |
946 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
947 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
948 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) |
949 | return false; |
950 | |
951 | |
952 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); |
953 | CCAssignFn *CalleeAssignFnFixed; |
954 | CCAssignFn *CalleeAssignFnVarArg; |
955 | std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = |
956 | getAssignFnsForCC(CalleeCC, TLI); |
957 | |
958 | CCAssignFn *CallerAssignFnFixed; |
959 | CCAssignFn *CallerAssignFnVarArg; |
960 | std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = |
961 | getAssignFnsForCC(CallerCC, TLI); |
962 | |
963 | |
964 | |
965 | IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed, |
966 | CalleeAssignFnVarArg); |
967 | IncomingValueAssigner CallerAssigner(CallerAssignFnFixed, |
968 | CallerAssignFnVarArg); |
969 | return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner); |
970 | } |
971 | |
972 | bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable( |
973 | CallLoweringInfo &Info, MachineFunction &MF, |
974 | SmallVectorImpl<ArgInfo> &OutArgs) const { |
975 | |
976 | if (OutArgs.empty()) |
977 | return true; |
978 | |
979 | const Function &CallerF = MF.getFunction(); |
980 | CallingConv::ID CalleeCC = Info.CallConv; |
981 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
982 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); |
983 | |
984 | CCAssignFn *AssignFnFixed; |
985 | CCAssignFn *AssignFnVarArg; |
986 | std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); |
987 | |
988 | |
989 | SmallVector<CCValAssign, 16> OutLocs; |
990 | CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); |
991 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); |
992 | |
993 | if (!determineAssignments(Assigner, OutArgs, OutInfo)) { |
994 | LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n"); |
995 | return false; |
996 | } |
997 | |
998 | |
999 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
1000 | if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { |
1001 | LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); |
1002 | return false; |
1003 | } |
1004 | |
1005 | |
1006 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
1007 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
1008 | const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); |
1009 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1010 | return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); |
1011 | } |
1012 | |
1013 | |
1014 | static bool canGuaranteeTCO(CallingConv::ID CC) { |
1015 | return CC == CallingConv::Fast; |
1016 | } |
1017 | |
1018 | |
1019 | static bool mayTailCallThisCC(CallingConv::ID CC) { |
1020 | switch (CC) { |
1021 | case CallingConv::C: |
1022 | case CallingConv::AMDGPU_Gfx: |
1023 | return true; |
1024 | default: |
1025 | return canGuaranteeTCO(CC); |
1026 | } |
1027 | } |
1028 | |
1029 | bool AMDGPUCallLowering::isEligibleForTailCallOptimization( |
1030 | MachineIRBuilder &B, CallLoweringInfo &Info, |
1031 | SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const { |
1032 | |
1033 | if (!Info.IsTailCall) |
1034 | return false; |
1035 | |
1036 | MachineFunction &MF = B.getMF(); |
1037 | const Function &CallerF = MF.getFunction(); |
1038 | CallingConv::ID CalleeCC = Info.CallConv; |
1039 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
1040 | |
1041 | const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); |
1042 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
1043 | |
1044 | |
1045 | if (!CallerPreserved) |
1046 | return false; |
1047 | |
1048 | if (!mayTailCallThisCC(CalleeCC)) { |
1049 | LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n"); |
1050 | return false; |
1051 | } |
1052 | |
1053 | if (any_of(CallerF.args(), [](const Argument &A) { |
1054 | return A.hasByValAttr() || A.hasSwiftErrorAttr(); |
1055 | })) { |
1056 | LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval " |
1057 | "or swifterror arguments\n"); |
1058 | return false; |
1059 | } |
1060 | |
1061 | |
1062 | if (MF.getTarget().Options.GuaranteedTailCallOpt) |
1063 | return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); |
1064 | |
1065 | |
1066 | |
1067 | if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { |
1068 | LLVM_DEBUG( |
1069 | dbgs() |
1070 | << "... Caller and callee have incompatible calling conventions.\n"); |
1071 | return false; |
1072 | } |
1073 | |
1074 | if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) |
1075 | return false; |
1076 | |
1077 | LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n"); |
1078 | return true; |
1079 | } |
1080 | |
1081 | |
1082 | |
1083 | |
1084 | void AMDGPUCallLowering::handleImplicitCallArguments( |
1085 | MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, |
1086 | const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo, |
1087 | ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const { |
1088 | if (!ST.enableFlatScratch()) { |
1089 | |
1090 | |
1091 | auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32), |
1092 | FuncInfo.getScratchRSrcReg()); |
1093 | MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); |
1094 | CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit); |
1095 | } |
1096 | |
1097 | for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { |
1098 | MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); |
1099 | CallInst.addReg(ArgReg.first, RegState::Implicit); |
1100 | } |
1101 | } |
1102 | |
1103 | bool AMDGPUCallLowering::lowerTailCall( |
1104 | MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, |
1105 | SmallVectorImpl<ArgInfo> &OutArgs) const { |
1106 | MachineFunction &MF = MIRBuilder.getMF(); |
1107 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
1108 | SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
1109 | const Function &F = MF.getFunction(); |
1110 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1111 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); |
1112 | |
1113 | |
1114 | bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; |
1115 | |
1116 | |
1117 | CallingConv::ID CalleeCC = Info.CallConv; |
1118 | CCAssignFn *AssignFnFixed; |
1119 | CCAssignFn *AssignFnVarArg; |
1120 | std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); |
1121 | |
1122 | MachineInstrBuilder CallSeqStart; |
1123 | if (!IsSibCall) |
1124 | CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP); |
1125 | |
1126 | unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true); |
1127 | auto MIB = MIRBuilder.buildInstrNoInsert(Opc); |
1128 | if (!addCallTargetOperands(MIB, MIRBuilder, Info)) |
1129 | return false; |
1130 | |
1131 | |
1132 | |
1133 | MIB.addImm(0); |
1134 | |
1135 | |
1136 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
1137 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); |
1138 | MIB.addRegMask(Mask); |
1139 | |
1140 | |
1141 | |
1142 | |
1143 | |
1144 | |
1145 | int FPDiff = 0; |
1146 | |
1147 | |
1148 | |
1149 | |
1150 | unsigned NumBytes = 0; |
1151 | if (!IsSibCall) { |
1152 | |
1153 | |
1154 | |
1155 | unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); |
1156 | SmallVector<CCValAssign, 16> OutLocs; |
1157 | CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); |
1158 | |
1159 | |
1160 | OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg); |
1161 | if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) |
1162 | return false; |
1163 | |
1164 | |
1165 | |
1166 | NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment()); |
1167 | |
1168 | |
1169 | |
1170 | |
1171 | FPDiff = NumReusableBytes - NumBytes; |
1172 | |
1173 | |
1174 | |
1175 | |
1176 | |
1177 | |
1178 | assert(isAligned(ST.getStackAlignment(), FPDiff) && |
1179 | "unaligned stack on tail call"); |
1180 | } |
1181 | |
1182 | SmallVector<CCValAssign, 16> ArgLocs; |
1183 | CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); |
1184 | |
1185 | |
1186 | |
1187 | |
1188 | SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; |
1189 | |
1190 | if (AMDGPUTargetMachine::EnableFixedFunctionABI && |
1191 | Info.CallConv != CallingConv::AMDGPU_Gfx) { |
1192 | |
1193 | if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) |
1194 | return false; |
1195 | } |
1196 | |
1197 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); |
1198 | |
1199 | if (!determineAssignments(Assigner, OutArgs, CCInfo)) |
1200 | return false; |
1201 | |
1202 | |
1203 | AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff); |
1204 | if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) |
1205 | return false; |
1206 | |
1207 | handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs); |
1208 | |
1209 | |
1210 | |
1211 | if (!IsSibCall) { |
1212 | MIB->getOperand(1).setImm(FPDiff); |
1213 | CallSeqStart.addImm(NumBytes).addImm(0); |
1214 | |
1215 | |
1216 | |
1217 | |
1218 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0); |
1219 | } |
1220 | |
1221 | |
1222 | MIRBuilder.insertInstr(MIB); |
1223 | |
1224 | |
1225 | |
1226 | |
1227 | |
1228 | |
1229 | |
1230 | if (MIB->getOperand(0).isReg()) { |
1231 | MIB->getOperand(0).setReg(constrainOperandRegClass( |
1232 | MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, |
1233 | MIB->getDesc(), MIB->getOperand(0), 0)); |
1234 | } |
1235 | |
1236 | MF.getFrameInfo().setHasTailCall(); |
1237 | Info.LoweredTailCall = true; |
1238 | return true; |
1239 | } |
1240 | |
1241 | bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, |
1242 | CallLoweringInfo &Info) const { |
1243 | if (Info.IsVarArg) { |
1244 | LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n"); |
1245 | return false; |
1246 | } |
1247 | |
1248 | MachineFunction &MF = MIRBuilder.getMF(); |
1249 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
1250 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
1251 | |
1252 | const Function &F = MF.getFunction(); |
1253 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1254 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); |
1255 | const DataLayout &DL = F.getParent()->getDataLayout(); |
1256 | |
1257 | if (!AMDGPUTargetMachine::EnableFixedFunctionABI && |
1258 | Info.CallConv != CallingConv::AMDGPU_Gfx) { |
1259 | LLVM_DEBUG(dbgs() << "Variable function ABI not implemented\n"); |
1260 | return false; |
1261 | } |
1262 | |
1263 | SmallVector<ArgInfo, 8> OutArgs; |
1264 | for (auto &OrigArg : Info.OrigArgs) |
1265 | splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); |
1266 | |
1267 | SmallVector<ArgInfo, 8> InArgs; |
1268 | if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) |
1269 | splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); |
1270 | |
1271 | |
1272 | bool CanTailCallOpt = |
1273 | isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); |
1274 | |
1275 | |
1276 | if (Info.IsMustTailCall && !CanTailCallOpt) { |
1277 | LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n"); |
1278 | return false; |
1279 | } |
1280 | |
1281 | if (CanTailCallOpt) |
1282 | return lowerTailCall(MIRBuilder, Info, OutArgs); |
1283 | |
1284 | |
1285 | CCAssignFn *AssignFnFixed; |
1286 | CCAssignFn *AssignFnVarArg; |
1287 | std::tie(AssignFnFixed, AssignFnVarArg) = |
1288 | getAssignFnsForCC(Info.CallConv, TLI); |
1289 | |
1290 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) |
1291 | .addImm(0) |
1292 | .addImm(0); |
1293 | |
1294 | |
1295 | |
1296 | unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false); |
1297 | |
1298 | auto MIB = MIRBuilder.buildInstrNoInsert(Opc); |
1299 | MIB.addDef(TRI->getReturnAddressReg(MF)); |
1300 | |
1301 | if (!addCallTargetOperands(MIB, MIRBuilder, Info)) |
1302 | return false; |
1303 | |
1304 | |
1305 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); |
1306 | MIB.addRegMask(Mask); |
1307 | |
1308 | SmallVector<CCValAssign, 16> ArgLocs; |
1309 | CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); |
1310 | |
1311 | |
1312 | |
1313 | |
1314 | SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; |
1315 | |
1316 | if (AMDGPUTargetMachine::EnableFixedFunctionABI && |
1317 | Info.CallConv != CallingConv::AMDGPU_Gfx) { |
1318 | |
1319 | if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) |
1320 | return false; |
1321 | } |
1322 | |
1323 | |
1324 | SmallVector<Register, 8> PhysRegs; |
1325 | |
1326 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); |
1327 | if (!determineAssignments(Assigner, OutArgs, CCInfo)) |
1328 | return false; |
1329 | |
1330 | AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false); |
1331 | if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) |
1332 | return false; |
1333 | |
1334 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1335 | |
1336 | handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs); |
1337 | |
1338 | |
1339 | unsigned NumBytes = CCInfo.getNextStackOffset(); |
1340 | |
1341 | |
1342 | |
1343 | |
1344 | |
1345 | |
1346 | |
1347 | if (MIB->getOperand(1).isReg()) { |
1348 | MIB->getOperand(1).setReg(constrainOperandRegClass( |
1349 | MF, *TRI, MRI, *ST.getInstrInfo(), |
1350 | *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), |
1351 | 1)); |
1352 | } |
1353 | |
1354 | |
1355 | MIRBuilder.insertInstr(MIB); |
1356 | |
1357 | |
1358 | |
1359 | |
1360 | if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { |
1361 | CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, |
1362 | Info.IsVarArg); |
1363 | IncomingValueAssigner Assigner(RetAssignFn); |
1364 | CallReturnHandler Handler(MIRBuilder, MRI, MIB); |
1365 | if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder, |
1366 | Info.CallConv, Info.IsVarArg)) |
1367 | return false; |
1368 | } |
1369 | |
1370 | uint64_t CalleePopBytes = NumBytes; |
1371 | |
1372 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) |
1373 | .addImm(0) |
1374 | .addImm(CalleePopBytes); |
1375 | |
1376 | if (!Info.CanLowerReturn) { |
1377 | insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, |
1378 | Info.DemoteRegister, Info.DemoteStackIndex); |
1379 | } |
1380 | |
1381 | return true; |
1382 | } |