Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
Warning:line 168, column 23
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CallLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
1//===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements some simple delegations needed for call lowering.
11///
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/Analysis.h"
15#include "llvm/CodeGen/GlobalISel/CallLowering.h"
16#include "llvm/CodeGen/GlobalISel/Utils.h"
17#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18#include "llvm/CodeGen/MachineOperand.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
20#include "llvm/CodeGen/TargetLowering.h"
21#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE"call-lowering" "call-lowering"
28
29using namespace llvm;
30
31void CallLowering::anchor() {}
32
33/// Helper function which updates \p Flags when \p AttrFn returns true.
34static void
35addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags,
36 const std::function<bool(Attribute::AttrKind)> &AttrFn) {
37 if (AttrFn(Attribute::SExt))
4
Assuming the condition is false
5
Taking false branch
38 Flags.setSExt();
39 if (AttrFn(Attribute::ZExt))
6
Assuming the condition is false
7
Taking false branch
40 Flags.setZExt();
41 if (AttrFn(Attribute::InReg))
8
Assuming the condition is false
9
Taking false branch
42 Flags.setInReg();
43 if (AttrFn(Attribute::StructRet))
10
Assuming the condition is false
11
Taking false branch
44 Flags.setSRet();
45 if (AttrFn(Attribute::Nest))
12
Assuming the condition is false
13
Taking false branch
46 Flags.setNest();
47 if (AttrFn(Attribute::ByVal))
14
Assuming the condition is false
15
Taking false branch
48 Flags.setByVal();
49 if (AttrFn(Attribute::Preallocated))
16
Assuming the condition is false
17
Taking false branch
50 Flags.setPreallocated();
51 if (AttrFn(Attribute::InAlloca))
18
Assuming the condition is false
19
Taking false branch
52 Flags.setInAlloca();
53 if (AttrFn(Attribute::Returned))
20
Assuming the condition is false
21
Taking false branch
54 Flags.setReturned();
55 if (AttrFn(Attribute::SwiftSelf))
22
Assuming the condition is false
23
Taking false branch
56 Flags.setSwiftSelf();
57 if (AttrFn(Attribute::SwiftAsync))
24
Assuming the condition is false
25
Taking false branch
58 Flags.setSwiftAsync();
59 if (AttrFn(Attribute::SwiftError))
26
Assuming the condition is false
27
Taking false branch
60 Flags.setSwiftError();
61}
28
Returning without writing to 'Flags.IsByVal', which participates in a condition later
62
63ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
64 unsigned ArgIdx) const {
65 ISD::ArgFlagsTy Flags;
66 addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
67 return Call.paramHasAttr(ArgIdx, Attr);
68 });
69 return Flags;
70}
71
72void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
73 const AttributeList &Attrs,
74 unsigned OpIdx) const {
75 addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
3
Calling 'addFlagsUsingAttrFn'
29
Returning from 'addFlagsUsingAttrFn'
76 return Attrs.hasAttribute(OpIdx, Attr);
77 });
78}
30
Returning without writing to 'Flags.IsByVal', which participates in a condition later
79
80bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
81 ArrayRef<Register> ResRegs,
82 ArrayRef<ArrayRef<Register>> ArgRegs,
83 Register SwiftErrorVReg,
84 std::function<unsigned()> GetCalleeReg) const {
85 CallLoweringInfo Info;
86 const DataLayout &DL = MIRBuilder.getDataLayout();
87 MachineFunction &MF = MIRBuilder.getMF();
88 bool CanBeTailCalled = CB.isTailCall() &&
89 isInTailCallPosition(CB, MF.getTarget()) &&
90 (MF.getFunction()
91 .getFnAttribute("disable-tail-calls")
92 .getValueAsString() != "true");
93
94 CallingConv::ID CallConv = CB.getCallingConv();
95 Type *RetTy = CB.getType();
96 bool IsVarArg = CB.getFunctionType()->isVarArg();
97
98 SmallVector<BaseArgInfo, 4> SplitArgs;
99 getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
100 Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
101
102 if (!Info.CanLowerReturn) {
103 // Callee requires sret demotion.
104 insertSRetOutgoingArgument(MIRBuilder, CB, Info);
105
106 // The sret demotion isn't compatible with tail-calls, since the sret
107 // argument points into the caller's stack frame.
108 CanBeTailCalled = false;
109 }
110
111 // First step is to marshall all the function's parameters into the correct
112 // physregs and memory locations. Gather the sequence of argument types that
113 // we'll pass to the assigner function.
114 unsigned i = 0;
115 unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
116 for (auto &Arg : CB.args()) {
117 ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
118 i < NumFixedArgs};
119 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
120
121 // If we have an explicit sret argument that is an Instruction, (i.e., it
122 // might point to function-local memory), we can't meaningfully tail-call.
123 if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
124 CanBeTailCalled = false;
125
126 Info.OrigArgs.push_back(OrigArg);
127 ++i;
128 }
129
130 // Try looking through a bitcast from one function type to another.
131 // Commonly happens with calls to objc_msgSend().
132 const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
133 if (const Function *F = dyn_cast<Function>(CalleeV))
134 Info.Callee = MachineOperand::CreateGA(F, 0);
135 else
136 Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
137
138 Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, ISD::ArgFlagsTy{}};
139 if (!Info.OrigRet.Ty->isVoidTy())
140 setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
141
142 Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
143 Info.CallConv = CallConv;
144 Info.SwiftErrorVReg = SwiftErrorVReg;
145 Info.IsMustTailCall = CB.isMustTailCall();
146 Info.IsTailCall = CanBeTailCalled;
147 Info.IsVarArg = IsVarArg;
148 return lowerCall(MIRBuilder, Info);
149}
150
151template <typename FuncInfoTy>
152void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
153 const DataLayout &DL,
154 const FuncInfoTy &FuncInfo) const {
155 auto &Flags = Arg.Flags[0];
156 const AttributeList &Attrs = FuncInfo.getAttributes();
157 addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
2
Calling 'CallLowering::addArgFlagsFromAttributes'
31
Returning from 'CallLowering::addArgFlagsFromAttributes'
158
159 PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType());
32
Assuming the object is not a 'PointerType'
33
'PtrTy' initialized to a null pointer value
160 if (PtrTy
33.1
'PtrTy' is null
) {
34
Taking false branch
161 Flags.setPointer();
162 Flags.setPointerAddrSpace(PtrTy->getPointerAddressSpace());
163 }
164
165 Align MemAlign = DL.getABITypeAlign(Arg.Ty);
166 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
35
Assuming the condition is true
167 assert(OpIdx >= AttributeList::FirstArgIndex)((void)0);
168 Type *ElementTy = PtrTy->getElementType();
36
Called C++ object pointer is null
169
170 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
171 Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
172
173 // For ByVal, alignment should be passed from FE. BE will guess if
174 // this info is not there but there are cases it cannot get right.
175 if (auto ParamAlign =
176 FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
177 MemAlign = *ParamAlign;
178 else if ((ParamAlign =
179 FuncInfo.getParamAlign(OpIdx - AttributeList::FirstArgIndex)))
180 MemAlign = *ParamAlign;
181 else
182 MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
183 } else if (OpIdx >= AttributeList::FirstArgIndex) {
184 if (auto ParamAlign =
185 FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
186 MemAlign = *ParamAlign;
187 }
188 Flags.setMemAlign(MemAlign);
189 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
190
191 // Don't try to use the returned attribute if the argument is marked as
192 // swiftself, since it won't be passed in x0.
193 if (Flags.isSwiftSelf())
194 Flags.setReturned(false);
195}
196
197template void
198CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
199 const DataLayout &DL,
200 const Function &FuncInfo) const;
201
202template void
203CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
204 const DataLayout &DL,
205 const CallBase &FuncInfo) const;
206
207void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
208 SmallVectorImpl<ArgInfo> &SplitArgs,
209 const DataLayout &DL,
210 CallingConv::ID CallConv,
211 SmallVectorImpl<uint64_t> *Offsets) const {
212 LLVMContext &Ctx = OrigArg.Ty->getContext();
213
214 SmallVector<EVT, 4> SplitVTs;
215 ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0);
216
217 if (SplitVTs.size() == 0)
218 return;
219
220 if (SplitVTs.size() == 1) {
221 // No splitting to do, but we want to replace the original type (e.g. [1 x
222 // double] -> double).
223 SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
224 OrigArg.OrigArgIndex, OrigArg.Flags[0],
225 OrigArg.IsFixed, OrigArg.OrigValue);
226 return;
227 }
228
229 // Create one ArgInfo for each virtual register in the original ArgInfo.
230 assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch")((void)0);
231
232 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
233 OrigArg.Ty, CallConv, false, DL);
234 for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
235 Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
236 SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
237 OrigArg.Flags[0], OrigArg.IsFixed);
238 if (NeedsRegBlock)
239 SplitArgs.back().Flags[0].setInConsecutiveRegs();
240 }
241
242 SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
243}
244
245/// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
246static MachineInstrBuilder
247mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
248 ArrayRef<Register> SrcRegs) {
249 MachineRegisterInfo &MRI = *B.getMRI();
250 LLT LLTy = MRI.getType(DstRegs[0]);
251 LLT PartLLT = MRI.getType(SrcRegs[0]);
252
253 // Deal with v3s16 split into v2s16
254 LLT LCMTy = getLCMType(LLTy, PartLLT);
255 if (LCMTy == LLTy) {
256 // Common case where no padding is needed.
257 assert(DstRegs.size() == 1)((void)0);
258 return B.buildConcatVectors(DstRegs[0], SrcRegs);
259 }
260
261 // We need to create an unmerge to the result registers, which may require
262 // widening the original value.
263 Register UnmergeSrcReg;
264 if (LCMTy != PartLLT) {
265 // e.g. A <3 x s16> value was split to <2 x s16>
266 // %register_value0:_(<2 x s16>)
267 // %register_value1:_(<2 x s16>)
268 // %undef:_(<2 x s16>) = G_IMPLICIT_DEF
269 // %concat:_<6 x s16>) = G_CONCAT_VECTORS %reg_value0, %reg_value1, %undef
270 // %dst_reg:_(<3 x s16>), %dead:_(<3 x s16>) = G_UNMERGE_VALUES %concat
271 const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
272 Register Undef = B.buildUndef(PartLLT).getReg(0);
273
274 // Build vector of undefs.
275 SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
276
277 // Replace the first sources with the real registers.
278 std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
279 UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0);
280 } else {
281 // We don't need to widen anything if we're extracting a scalar which was
282 // promoted to a vector e.g. s8 -> v4s8 -> s8
283 assert(SrcRegs.size() == 1)((void)0);
284 UnmergeSrcReg = SrcRegs[0];
285 }
286
287 int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
288
289 SmallVector<Register, 8> PadDstRegs(NumDst);
290 std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
291
292 // Create the excess dead defs for the unmerge.
293 for (int I = DstRegs.size(); I != NumDst; ++I)
294 PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
295
296 return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
297}
298
299/// Create a sequence of instructions to combine pieces split into register
300/// typed values to the original IR value. \p OrigRegs contains the destination
301/// value registers of type \p LLTy, and \p Regs contains the legalized pieces
302/// with type \p PartLLT. This is used for incoming values (physregs to vregs).
303static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
304 ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
305 const ISD::ArgFlagsTy Flags) {
306 MachineRegisterInfo &MRI = *B.getMRI();
307
308 if (PartLLT == LLTy) {
309 // We should have avoided introducing a new virtual register, and just
310 // directly assigned here.
311 assert(OrigRegs[0] == Regs[0])((void)0);
312 return;
313 }
314
315 if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
316 Regs.size() == 1) {
317 B.buildBitcast(OrigRegs[0], Regs[0]);
318 return;
319 }
320
321 // A vector PartLLT needs extending to LLTy's element size.
322 // E.g. <2 x s64> = G_SEXT <2 x s32>.
323 if (PartLLT.isVector() == LLTy.isVector() &&
324 PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
325 (!PartLLT.isVector() ||
326 PartLLT.getNumElements() == LLTy.getNumElements()) &&
327 OrigRegs.size() == 1 && Regs.size() == 1) {
328 Register SrcReg = Regs[0];
329
330 LLT LocTy = MRI.getType(SrcReg);
331
332 if (Flags.isSExt()) {
333 SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
334 .getReg(0);
335 } else if (Flags.isZExt()) {
336 SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
337 .getReg(0);
338 }
339
340 // Sometimes pointers are passed zero extended.
341 LLT OrigTy = MRI.getType(OrigRegs[0]);
342 if (OrigTy.isPointer()) {
343 LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
344 B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
345 return;
346 }
347
348 B.buildTrunc(OrigRegs[0], SrcReg);
349 return;
350 }
351
352 if (!LLTy.isVector() && !PartLLT.isVector()) {
353 assert(OrigRegs.size() == 1)((void)0);
354 LLT OrigTy = MRI.getType(OrigRegs[0]);
355
356 unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size();
357 if (SrcSize == OrigTy.getSizeInBits())
358 B.buildMerge(OrigRegs[0], Regs);
359 else {
360 auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
361 B.buildTrunc(OrigRegs[0], Widened);
362 }
363
364 return;
365 }
366
367 if (PartLLT.isVector()) {
368 assert(OrigRegs.size() == 1)((void)0);
369 SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
370
371 // If PartLLT is a mismatched vector in both number of elements and element
372 // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
373 // have the same elt type, i.e. v4s32.
374 if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
375 PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
376 Regs.size() == 1) {
377 LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
378 .changeElementCount(PartLLT.getElementCount() * 2);
379 CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
380 PartLLT = NewTy;
381 }
382
383 if (LLTy.getScalarType() == PartLLT.getElementType()) {
384 mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
385 } else {
386 unsigned I = 0;
387 LLT GCDTy = getGCDType(LLTy, PartLLT);
388
389 // We are both splitting a vector, and bitcasting its element types. Cast
390 // the source pieces into the appropriate number of pieces with the result
391 // element type.
392 for (Register SrcReg : CastRegs)
393 CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
394 mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
395 }
396
397 return;
398 }
399
400 assert(LLTy.isVector() && !PartLLT.isVector())((void)0);
401
402 LLT DstEltTy = LLTy.getElementType();
403
404 // Pointer information was discarded. We'll need to coerce some register types
405 // to avoid violating type constraints.
406 LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
407
408 assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits())((void)0);
409
410 if (DstEltTy == PartLLT) {
411 // Vector was trivially scalarized.
412
413 if (RealDstEltTy.isPointer()) {
414 for (Register Reg : Regs)
415 MRI.setType(Reg, RealDstEltTy);
416 }
417
418 B.buildBuildVector(OrigRegs[0], Regs);
419 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
420 // Deal with vector with 64-bit elements decomposed to 32-bit
421 // registers. Need to create intermediate 64-bit elements.
422 SmallVector<Register, 8> EltMerges;
423 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
424
425 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0)((void)0);
426
427 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
428 auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
429 // Fix the type in case this is really a vector of pointers.
430 MRI.setType(Merge.getReg(0), RealDstEltTy);
431 EltMerges.push_back(Merge.getReg(0));
432 Regs = Regs.drop_front(PartsPerElt);
433 }
434
435 B.buildBuildVector(OrigRegs[0], EltMerges);
436 } else {
437 // Vector was split, and elements promoted to a wider type.
438 // FIXME: Should handle floating point promotions.
439 LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
440 auto BV = B.buildBuildVector(BVType, Regs);
441 B.buildTrunc(OrigRegs[0], BV);
442 }
443}
444
445/// Create a sequence of instructions to expand the value in \p SrcReg (of type
446/// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
447/// contain the type of scalar value extension if necessary.
448///
449/// This is used for outgoing values (vregs to physregs)
450static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
451 Register SrcReg, LLT SrcTy, LLT PartTy,
452 unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
453 // We could just insert a regular copy, but this is unreachable at the moment.
454 assert(SrcTy != PartTy && "identical part types shouldn't reach here")((void)0);
455
456 const unsigned PartSize = PartTy.getSizeInBits();
457
458 if (PartTy.isVector() == SrcTy.isVector() &&
459 PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
460 assert(DstRegs.size() == 1)((void)0);
461 B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
462 return;
463 }
464
465 if (SrcTy.isVector() && !PartTy.isVector() &&
466 PartSize > SrcTy.getElementType().getSizeInBits()) {
467 // Vector was scalarized, and the elements extended.
468 auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
469 for (int i = 0, e = DstRegs.size(); i != e; ++i)
470 B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
471 return;
472 }
473
474 LLT GCDTy = getGCDType(SrcTy, PartTy);
475 if (GCDTy == PartTy) {
476 // If this already evenly divisible, we can create a simple unmerge.
477 B.buildUnmerge(DstRegs, SrcReg);
478 return;
479 }
480
481 MachineRegisterInfo &MRI = *B.getMRI();
482 LLT DstTy = MRI.getType(DstRegs[0]);
483 LLT LCMTy = getLCMType(SrcTy, PartTy);
484
485 const unsigned DstSize = DstTy.getSizeInBits();
486 const unsigned SrcSize = SrcTy.getSizeInBits();
487 unsigned CoveringSize = LCMTy.getSizeInBits();
488
489 Register UnmergeSrc = SrcReg;
490
491 if (CoveringSize != SrcSize) {
492 // For scalars, it's common to be able to use a simple extension.
493 if (SrcTy.isScalar() && DstTy.isScalar()) {
494 CoveringSize = alignTo(SrcSize, DstSize);
495 LLT CoverTy = LLT::scalar(CoveringSize);
496 UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
497 } else {
498 // Widen to the common type.
499 // FIXME: This should respect the extend type
500 Register Undef = B.buildUndef(SrcTy).getReg(0);
501 SmallVector<Register, 8> MergeParts(1, SrcReg);
502 for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
503 MergeParts.push_back(Undef);
504 UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
505 }
506 }
507
508 // Unmerge to the original registers and pad with dead defs.
509 SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end());
510 for (unsigned Size = DstSize * DstRegs.size(); Size != CoveringSize;
511 Size += DstSize) {
512 UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy));
513 }
514
515 B.buildUnmerge(UnmergeResults, UnmergeSrc);
516}
517
518bool CallLowering::determineAndHandleAssignments(
519 ValueHandler &Handler, ValueAssigner &Assigner,
520 SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
521 CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
522 MachineFunction &MF = MIRBuilder.getMF();
523 const Function &F = MF.getFunction();
524 SmallVector<CCValAssign, 16> ArgLocs;
525
526 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
527 if (!determineAssignments(Assigner, Args, CCInfo))
528 return false;
529
530 return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
531 ThisReturnReg);
532}
533
534static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
535 if (Flags.isSExt())
536 return TargetOpcode::G_SEXT;
537 if (Flags.isZExt())
538 return TargetOpcode::G_ZEXT;
539 return TargetOpcode::G_ANYEXT;
540}
541
542bool CallLowering::determineAssignments(ValueAssigner &Assigner,
543 SmallVectorImpl<ArgInfo> &Args,
544 CCState &CCInfo) const {
545 LLVMContext &Ctx = CCInfo.getContext();
546 const CallingConv::ID CallConv = CCInfo.getCallingConv();
547
548 unsigned NumArgs = Args.size();
549 for (unsigned i = 0; i != NumArgs; ++i) {
550 EVT CurVT = EVT::getEVT(Args[i].Ty);
551
552 MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
553
554 // If we need to split the type over multiple regs, check it's a scenario
555 // we currently support.
556 unsigned NumParts =
557 TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
558
559 if (NumParts == 1) {
560 // Try to use the register type if we couldn't assign the VT.
561 if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
562 Args[i].Flags[0], CCInfo))
563 return false;
564 continue;
565 }
566
567 // For incoming arguments (physregs to vregs), we could have values in
568 // physregs (or memlocs) which we want to extract and copy to vregs.
569 // During this, we might have to deal with the LLT being split across
570 // multiple regs, so we have to record this information for later.
571 //
572 // If we have outgoing args, then we have the opposite case. We have a
573 // vreg with an LLT which we want to assign to a physical location, and
574 // we might have to record that the value has to be split later.
575
576 // We're handling an incoming arg which is split over multiple regs.
577 // E.g. passing an s128 on AArch64.
578 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
579 Args[i].Flags.clear();
580
581 for (unsigned Part = 0; Part < NumParts; ++Part) {
582 ISD::ArgFlagsTy Flags = OrigFlags;
583 if (Part == 0) {
584 Flags.setSplit();
585 } else {
586 Flags.setOrigAlign(Align(1));
587 if (Part == NumParts - 1)
588 Flags.setSplitEnd();
589 }
590
591 Args[i].Flags.push_back(Flags);
592 if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
593 Args[i].Flags[Part], CCInfo)) {
594 // Still couldn't assign this smaller part type for some reason.
595 return false;
596 }
597 }
598 }
599
600 return true;
601}
602
603bool CallLowering::handleAssignments(ValueHandler &Handler,
604 SmallVectorImpl<ArgInfo> &Args,
605 CCState &CCInfo,
606 SmallVectorImpl<CCValAssign> &ArgLocs,
607 MachineIRBuilder &MIRBuilder,
608 Register ThisReturnReg) const {
609 MachineFunction &MF = MIRBuilder.getMF();
610 MachineRegisterInfo &MRI = MF.getRegInfo();
611 const Function &F = MF.getFunction();
612 const DataLayout &DL = F.getParent()->getDataLayout();
613
614 const unsigned NumArgs = Args.size();
615
616 for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
617 assert(j < ArgLocs.size() && "Skipped too many arg locs")((void)0);
618 CCValAssign &VA = ArgLocs[j];
619 assert(VA.getValNo() == i && "Location doesn't correspond to current arg")((void)0);
620
621 if (VA.needsCustom()) {
622 unsigned NumArgRegs =
623 Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
624 if (!NumArgRegs)
625 return false;
626 j += NumArgRegs;
627 continue;
628 }
629
630 const MVT ValVT = VA.getValVT();
631 const MVT LocVT = VA.getLocVT();
632
633 const LLT LocTy(LocVT);
634 const LLT ValTy(ValVT);
635 const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
636 const EVT OrigVT = EVT::getEVT(Args[i].Ty);
637 const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
638
639 // Expected to be multiple regs for a single incoming arg.
640 // There should be Regs.size() ArgLocs per argument.
641 // This should be the same as getNumRegistersForCallingConv
642 const unsigned NumParts = Args[i].Flags.size();
643
644 // Now split the registers into the assigned types.
645 Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
646
647 if (NumParts != 1 || NewLLT != OrigTy) {
648 // If we can't directly assign the register, we need one or more
649 // intermediate values.
650 Args[i].Regs.resize(NumParts);
651
652 // For each split register, create and assign a vreg that will store
653 // the incoming component of the larger value. These will later be
654 // merged to form the final vreg.
655 for (unsigned Part = 0; Part < NumParts; ++Part)
656 Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
657 }
658
659 assert((j + (NumParts - 1)) < ArgLocs.size() &&((void)0)
660 "Too many regs for number of args")((void)0);
661
662 // Coerce into outgoing value types before register assignment.
663 if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
664 assert(Args[i].OrigRegs.size() == 1)((void)0);
665 buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
666 ValTy, extendOpFromFlags(Args[i].Flags[0]));
667 }
668
669 for (unsigned Part = 0; Part < NumParts; ++Part) {
670 Register ArgReg = Args[i].Regs[Part];
671 // There should be Regs.size() ArgLocs per argument.
672 VA = ArgLocs[j + Part];
673 const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
674
675 if (VA.isMemLoc() && !Flags.isByVal()) {
676 // Individual pieces may have been spilled to the stack and others
677 // passed in registers.
678
679 // TODO: The memory size may be larger than the value we need to
680 // store. We may need to adjust the offset for big endian targets.
681 LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
682
683 MachinePointerInfo MPO;
684 Register StackAddr = Handler.getStackAddress(
685 MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags);
686
687 Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
688 continue;
689 }
690
691 if (VA.isMemLoc() && Flags.isByVal()) {
692 assert(Args[i].Regs.size() == 1 &&((void)0)
693 "didn't expect split byval pointer")((void)0);
694
695 if (Handler.isIncomingArgumentHandler()) {
696 // We just need to copy the frame index value to the pointer.
697 MachinePointerInfo MPO;
698 Register StackAddr = Handler.getStackAddress(
699 Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
700 MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
701 } else {
702 // For outgoing byval arguments, insert the implicit copy byval
703 // implies, such that writes in the callee do not modify the caller's
704 // value.
705 uint64_t MemSize = Flags.getByValSize();
706 int64_t Offset = VA.getLocMemOffset();
707
708 MachinePointerInfo DstMPO;
709 Register StackAddr =
710 Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
711
712 MachinePointerInfo SrcMPO(Args[i].OrigValue);
713 if (!Args[i].OrigValue) {
714 // We still need to accurately track the stack address space if we
715 // don't know the underlying value.
716 const LLT PtrTy = MRI.getType(StackAddr);
717 SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
718 }
719
720 Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
721 inferAlignFromPtrInfo(MF, DstMPO));
722
723 Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
724 inferAlignFromPtrInfo(MF, SrcMPO));
725
726 Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
727 DstMPO, DstAlign, SrcMPO, SrcAlign,
728 MemSize, VA);
729 }
730 continue;
731 }
732
733 assert(!VA.needsCustom() && "custom loc should have been handled already")((void)0);
734
735 if (i == 0 && ThisReturnReg.isValid() &&
736 Handler.isIncomingArgumentHandler() &&
737 isTypeIsValidForThisReturn(ValVT)) {
738 Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
739 continue;
740 }
741
742 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
743 }
744
745 // Now that all pieces have been assigned, re-pack the register typed values
746 // into the original value typed registers.
747 if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
748 // Merge the split registers into the expected larger result vregs of
749 // the original call.
750 buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
751 LocTy, Args[i].Flags[0]);
752 }
753
754 j += NumParts - 1;
755 }
756
757 return true;
758}
759
760void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
761 ArrayRef<Register> VRegs, Register DemoteReg,
762 int FI) const {
763 MachineFunction &MF = MIRBuilder.getMF();
764 MachineRegisterInfo &MRI = MF.getRegInfo();
765 const DataLayout &DL = MF.getDataLayout();
766
767 SmallVector<EVT, 4> SplitVTs;
768 SmallVector<uint64_t, 4> Offsets;
769 ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
770
771 assert(VRegs.size() == SplitVTs.size())((void)0);
772
773 unsigned NumValues = SplitVTs.size();
774 Align BaseAlign = DL.getPrefTypeAlign(RetTy);
775 Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
776 LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
777
778 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
779
780 for (unsigned I = 0; I < NumValues; ++I) {
781 Register Addr;
782 MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
783 auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
784 MRI.getType(VRegs[I]),
785 commonAlignment(BaseAlign, Offsets[I]));
786 MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
787 }
788}
789
790void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
791 ArrayRef<Register> VRegs,
792 Register DemoteReg) const {
793 MachineFunction &MF = MIRBuilder.getMF();
794 MachineRegisterInfo &MRI = MF.getRegInfo();
795 const DataLayout &DL = MF.getDataLayout();
796
797 SmallVector<EVT, 4> SplitVTs;
798 SmallVector<uint64_t, 4> Offsets;
799 ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
800
801 assert(VRegs.size() == SplitVTs.size())((void)0);
802
803 unsigned NumValues = SplitVTs.size();
804 Align BaseAlign = DL.getPrefTypeAlign(RetTy);
805 unsigned AS = DL.getAllocaAddrSpace();
806 LLT OffsetLLTy =
807 getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
808
809 MachinePointerInfo PtrInfo(AS);
810
811 for (unsigned I = 0; I < NumValues; ++I) {
812 Register Addr;
813 MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
814 auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
815 MRI.getType(VRegs[I]),
816 commonAlignment(BaseAlign, Offsets[I]));
817 MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
818 }
819}
820
821void CallLowering::insertSRetIncomingArgument(
822 const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
823 MachineRegisterInfo &MRI, const DataLayout &DL) const {
824 unsigned AS = DL.getAllocaAddrSpace();
825 DemoteReg = MRI.createGenericVirtualRegister(
826 LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
827
828 Type *PtrTy = PointerType::get(F.getReturnType(), AS);
829
830 SmallVector<EVT, 1> ValueVTs;
831 ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
832
833 // NOTE: Assume that a pointer won't get split into more than one VT.
834 assert(ValueVTs.size() == 1)((void)0);
835
836 ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),
837 ArgInfo::NoArgIndex);
838 setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F);
1
Calling 'CallLowering::setArgFlags'
839 DemoteArg.Flags[0].setSRet();
840 SplitArgs.insert(SplitArgs.begin(), DemoteArg);
841}
842
843void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
844 const CallBase &CB,
845 CallLoweringInfo &Info) const {
846 const DataLayout &DL = MIRBuilder.getDataLayout();
847 Type *RetTy = CB.getType();
848 unsigned AS = DL.getAllocaAddrSpace();
849 LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
850
851 int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
852 DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
853
854 Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
855 ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS),
856 ArgInfo::NoArgIndex);
857 setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
858 DemoteArg.Flags[0].setSRet();
859
860 Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
861 Info.DemoteStackIndex = FI;
862 Info.DemoteRegister = DemoteReg;
863}
864
865bool CallLowering::checkReturn(CCState &CCInfo,
866 SmallVectorImpl<BaseArgInfo> &Outs,
867 CCAssignFn *Fn) const {
868 for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
869 MVT VT = MVT::getVT(Outs[I].Ty);
870 if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
871 return false;
872 }
873 return true;
874}
875
876void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy,
877 AttributeList Attrs,
878 SmallVectorImpl<BaseArgInfo> &Outs,
879 const DataLayout &DL) const {
880 LLVMContext &Context = RetTy->getContext();
881 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
882
883 SmallVector<EVT, 4> SplitVTs;
884 ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
885 addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex);
886
887 for (EVT VT : SplitVTs) {
888 unsigned NumParts =
889 TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
890 MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
891 Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
892
893 for (unsigned I = 0; I < NumParts; ++I) {
894 Outs.emplace_back(PartTy, Flags);
895 }
896 }
897}
898
899bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
900 const auto &F = MF.getFunction();
901 Type *ReturnType = F.getReturnType();
902 CallingConv::ID CallConv = F.getCallingConv();
903
904 SmallVector<BaseArgInfo, 4> SplitArgs;
905 getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
906 MF.getDataLayout());
907 return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
908}
909
910bool CallLowering::parametersInCSRMatch(
911 const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
912 const SmallVectorImpl<CCValAssign> &OutLocs,
913 const SmallVectorImpl<ArgInfo> &OutArgs) const {
914 for (unsigned i = 0; i < OutLocs.size(); ++i) {
915 auto &ArgLoc = OutLocs[i];
916 // If it's not a register, it's fine.
917 if (!ArgLoc.isRegLoc())
918 continue;
919
920 MCRegister PhysReg = ArgLoc.getLocReg();
921
922 // Only look at callee-saved registers.
923 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
924 continue;
925
926 LLVM_DEBUG(do { } while (false)
927 dbgs()do { } while (false)
928 << "... Call has an argument passed in a callee-saved register.\n")do { } while (false);
929
930 // Check if it was copied from.
931 const ArgInfo &OutInfo = OutArgs[i];
932
933 if (OutInfo.Regs.size() > 1) {
934 LLVM_DEBUG(do { } while (false)
935 dbgs() << "... Cannot handle arguments in multiple registers.\n")do { } while (false);
936 return false;
937 }
938
939 // Check if we copy the register, walking through copies from virtual
940 // registers. Note that getDefIgnoringCopies does not ignore copies from
941 // physical registers.
942 MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
943 if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
944 LLVM_DEBUG(do { } while (false)
945 dbgs()do { } while (false)
946 << "... Parameter was not copied into a VReg, cannot tail call.\n")do { } while (false);
947 return false;
948 }
949
950 // Got a copy. Verify that it's the same as the register we want.
951 Register CopyRHS = RegDef->getOperand(1).getReg();
952 if (CopyRHS != PhysReg) {
953 LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "do { } while (false)
954 "VReg, cannot tail call.\n")do { } while (false);
955 return false;
956 }
957 }
958
959 return true;
960}
961
962bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
963 MachineFunction &MF,
964 SmallVectorImpl<ArgInfo> &InArgs,
965 ValueAssigner &CalleeAssigner,
966 ValueAssigner &CallerAssigner) const {
967 const Function &F = MF.getFunction();
968 CallingConv::ID CalleeCC = Info.CallConv;
969 CallingConv::ID CallerCC = F.getCallingConv();
970
971 if (CallerCC == CalleeCC)
972 return true;
973
974 SmallVector<CCValAssign, 16> ArgLocs1;
975 CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
976 if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
977 return false;
978
979 SmallVector<CCValAssign, 16> ArgLocs2;
980 CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
981 if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
982 return false;
983
984 // We need the argument locations to match up exactly. If there's more in
985 // one than the other, then we are done.
986 if (ArgLocs1.size() != ArgLocs2.size())
987 return false;
988
989 // Make sure that each location is passed in exactly the same way.
990 for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
991 const CCValAssign &Loc1 = ArgLocs1[i];
992 const CCValAssign &Loc2 = ArgLocs2[i];
993
994 // We need both of them to be the same. So if one is a register and one
995 // isn't, we're done.
996 if (Loc1.isRegLoc() != Loc2.isRegLoc())
997 return false;
998
999 if (Loc1.isRegLoc()) {
1000 // If they don't have the same register location, we're done.
1001 if (Loc1.getLocReg() != Loc2.getLocReg())
1002 return false;
1003
1004 // They matched, so we can move to the next ArgLoc.
1005 continue;
1006 }
1007
1008 // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1009 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1010 return false;
1011 }
1012
1013 return true;
1014}
1015
1016LLT CallLowering::ValueHandler::getStackValueStoreType(
1017 const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
1018 const MVT ValVT = VA.getValVT();
1019 if (ValVT != MVT::iPTR) {
1020 LLT ValTy(ValVT);
1021
1022 // We lost the pointeriness going through CCValAssign, so try to restore it
1023 // based on the flags.
1024 if (Flags.isPointer()) {
1025 LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(),
1026 ValTy.getScalarSizeInBits());
1027 if (ValVT.isVector())
1028 return LLT::vector(ValTy.getElementCount(), PtrTy);
1029 return PtrTy;
1030 }
1031
1032 return ValTy;
1033 }
1034
1035 unsigned AddrSpace = Flags.getPointerAddrSpace();
1036 return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));
1037}
1038
1039void CallLowering::ValueHandler::copyArgumentMemory(
1040 const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1041 const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1042 const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1043 CCValAssign &VA) const {
1044 MachineFunction &MF = MIRBuilder.getMF();
1045 MachineMemOperand *SrcMMO = MF.getMachineMemOperand(
1046 SrcPtrInfo,
1047 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize,
1048 SrcAlign);
1049
1050 MachineMemOperand *DstMMO = MF.getMachineMemOperand(
1051 DstPtrInfo,
1052 MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable,
1053 MemSize, DstAlign);
1054
1055 const LLT PtrTy = MRI.getType(DstPtr);
1056 const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1057
1058 auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1059 MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1060}
1061
1062Register CallLowering::ValueHandler::extendRegister(Register ValReg,
1063 CCValAssign &VA,
1064 unsigned MaxSizeBits) {
1065 LLT LocTy{VA.getLocVT()};
1066 LLT ValTy{VA.getValVT()};
1067
1068 if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1069 return ValReg;
1070
1071 if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1072 if (MaxSizeBits <= ValTy.getSizeInBits())
1073 return ValReg;
1074 LocTy = LLT::scalar(MaxSizeBits);
1075 }
1076
1077 const LLT ValRegTy = MRI.getType(ValReg);
1078 if (ValRegTy.isPointer()) {
1079 // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1080 // we have to cast to do the extension.
1081 LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits());
1082 ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);
1083 }
1084
1085 switch (VA.getLocInfo()) {
1086 default: break;
1087 case CCValAssign::Full:
1088 case CCValAssign::BCvt:
1089 // FIXME: bitconverting between vector types may or may not be a
1090 // nop in big-endian situations.
1091 return ValReg;
1092 case CCValAssign::AExt: {
1093 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1094 return MIB.getReg(0);
1095 }
1096 case CCValAssign::SExt: {
1097 Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1098 MIRBuilder.buildSExt(NewReg, ValReg);
1099 return NewReg;
1100 }
1101 case CCValAssign::ZExt: {
1102 Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1103 MIRBuilder.buildZExt(NewReg, ValReg);
1104 return NewReg;
1105 }
1106 }
1107 llvm_unreachable("unable to extend register")__builtin_unreachable();
1108}
1109
1110void CallLowering::ValueAssigner::anchor() {}
1111
1112Register CallLowering::IncomingValueHandler::buildExtensionHint(CCValAssign &VA,
1113 Register SrcReg,
1114 LLT NarrowTy) {
1115 switch (VA.getLocInfo()) {
1116 case CCValAssign::LocInfo::ZExt: {
1117 return MIRBuilder
1118 .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1119 NarrowTy.getScalarSizeInBits())
1120 .getReg(0);
1121 }
1122 case CCValAssign::LocInfo::SExt: {
1123 return MIRBuilder
1124 .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1125 NarrowTy.getScalarSizeInBits())
1126 .getReg(0);
1127 break;
1128 }
1129 default:
1130 return SrcReg;
1131 }
1132}
1133
1134/// Check if we can use a basic COPY instruction between the two types.
1135///
1136/// We're currently building on top of the infrastructure using MVT, which loses
1137/// pointer information in the CCValAssign. We accept copies from physical
1138/// registers that have been reported as integers if it's to an equivalent sized
1139/// pointer LLT.
1140static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1141 if (SrcTy == DstTy)
1142 return true;
1143
1144 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1145 return false;
1146
1147 SrcTy = SrcTy.getScalarType();
1148 DstTy = DstTy.getScalarType();
1149
1150 return (SrcTy.isPointer() && DstTy.isScalar()) ||
1151 (DstTy.isScalar() && SrcTy.isPointer());
1152}
1153
1154void CallLowering::IncomingValueHandler::assignValueToReg(Register ValVReg,
1155 Register PhysReg,
1156 CCValAssign &VA) {
1157 const MVT LocVT = VA.getLocVT();
1158 const LLT LocTy(LocVT);
1159 const LLT RegTy = MRI.getType(ValVReg);
1160
1161 if (isCopyCompatibleType(RegTy, LocTy)) {
1162 MIRBuilder.buildCopy(ValVReg, PhysReg);
1163 return;
1164 }
1165
1166 auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1167 auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1168 MIRBuilder.buildTrunc(ValVReg, Hint);
1169}