clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CallLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
| 1 | |
| 2 | |
| 3 | |
| 4 | |
| 5 | |
| 6 | |
| 7 | |
| 8 | |
| 9 | |
| 10 | |
| 11 | |
| 12 | |
| 13 | |
| 14 | #include "llvm/CodeGen/Analysis.h" |
| 15 | #include "llvm/CodeGen/GlobalISel/CallLowering.h" |
| 16 | #include "llvm/CodeGen/GlobalISel/Utils.h" |
| 17 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
| 18 | #include "llvm/CodeGen/MachineOperand.h" |
| 19 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 20 | #include "llvm/CodeGen/TargetLowering.h" |
| 21 | #include "llvm/IR/DataLayout.h" |
| 22 | #include "llvm/IR/Instructions.h" |
| 23 | #include "llvm/IR/LLVMContext.h" |
| 24 | #include "llvm/IR/Module.h" |
| 25 | #include "llvm/Target/TargetMachine.h" |
| 26 | |
| 27 | #define DEBUG_TYPE "call-lowering" |
| 28 | |
| 29 | using namespace llvm; |
| 30 | |
| 31 | void CallLowering::anchor() {} |
| 32 | |
| 33 | |
| 34 | static void |
| 35 | addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, |
| 36 | const std::function<bool(Attribute::AttrKind)> &AttrFn) { |
| 37 | if (AttrFn(Attribute::SExt)) |
| 4 | | Assuming the condition is false | |
|
| |
| 38 | Flags.setSExt(); |
| 39 | if (AttrFn(Attribute::ZExt)) |
| 6 | | Assuming the condition is false | |
|
| |
| 40 | Flags.setZExt(); |
| 41 | if (AttrFn(Attribute::InReg)) |
| 8 | | Assuming the condition is false | |
|
| |
| 42 | Flags.setInReg(); |
| 43 | if (AttrFn(Attribute::StructRet)) |
| 10 | | Assuming the condition is false | |
|
| |
| 44 | Flags.setSRet(); |
| 45 | if (AttrFn(Attribute::Nest)) |
| 12 | | Assuming the condition is false | |
|
| |
| 46 | Flags.setNest(); |
| 47 | if (AttrFn(Attribute::ByVal)) |
| 14 | | Assuming the condition is false | |
|
| |
| 48 | Flags.setByVal(); |
| 49 | if (AttrFn(Attribute::Preallocated)) |
| 16 | | Assuming the condition is false | |
|
| |
| 50 | Flags.setPreallocated(); |
| 51 | if (AttrFn(Attribute::InAlloca)) |
| 18 | | Assuming the condition is false | |
|
| |
| 52 | Flags.setInAlloca(); |
| 53 | if (AttrFn(Attribute::Returned)) |
| 20 | | Assuming the condition is false | |
|
| |
| 54 | Flags.setReturned(); |
| 55 | if (AttrFn(Attribute::SwiftSelf)) |
| 22 | | Assuming the condition is false | |
|
| |
| 56 | Flags.setSwiftSelf(); |
| 57 | if (AttrFn(Attribute::SwiftAsync)) |
| 24 | | Assuming the condition is false | |
|
| |
| 58 | Flags.setSwiftAsync(); |
| 59 | if (AttrFn(Attribute::SwiftError)) |
| 26 | | Assuming the condition is false | |
|
| |
| 60 | Flags.setSwiftError(); |
| 61 | } |
| 28 | | Returning without writing to 'Flags.IsByVal', which participates in a condition later | |
|
| 62 | |
| 63 | ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call, |
| 64 | unsigned ArgIdx) const { |
| 65 | ISD::ArgFlagsTy Flags; |
| 66 | addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) { |
| 67 | return Call.paramHasAttr(ArgIdx, Attr); |
| 68 | }); |
| 69 | return Flags; |
| 70 | } |
| 71 | |
| 72 | void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, |
| 73 | const AttributeList &Attrs, |
| 74 | unsigned OpIdx) const { |
| 75 | addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) { |
| 3 | | Calling 'addFlagsUsingAttrFn' | |
|
| 29 | | Returning from 'addFlagsUsingAttrFn' | |
|
| 76 | return Attrs.hasAttribute(OpIdx, Attr); |
| 77 | }); |
| 78 | } |
| 30 | | Returning without writing to 'Flags.IsByVal', which participates in a condition later | |
|
| 79 | |
| 80 | bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, |
| 81 | ArrayRef<Register> ResRegs, |
| 82 | ArrayRef<ArrayRef<Register>> ArgRegs, |
| 83 | Register SwiftErrorVReg, |
| 84 | std::function<unsigned()> GetCalleeReg) const { |
| 85 | CallLoweringInfo Info; |
| 86 | const DataLayout &DL = MIRBuilder.getDataLayout(); |
| 87 | MachineFunction &MF = MIRBuilder.getMF(); |
| 88 | bool CanBeTailCalled = CB.isTailCall() && |
| 89 | isInTailCallPosition(CB, MF.getTarget()) && |
| 90 | (MF.getFunction() |
| 91 | .getFnAttribute("disable-tail-calls") |
| 92 | .getValueAsString() != "true"); |
| 93 | |
| 94 | CallingConv::ID CallConv = CB.getCallingConv(); |
| 95 | Type *RetTy = CB.getType(); |
| 96 | bool IsVarArg = CB.getFunctionType()->isVarArg(); |
| 97 | |
| 98 | SmallVector<BaseArgInfo, 4> SplitArgs; |
| 99 | getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL); |
| 100 | Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg); |
| 101 | |
| 102 | if (!Info.CanLowerReturn) { |
| 103 | |
| 104 | insertSRetOutgoingArgument(MIRBuilder, CB, Info); |
| 105 | |
| 106 | |
| 107 | |
| 108 | CanBeTailCalled = false; |
| 109 | } |
| 110 | |
| 111 | |
| 112 | |
| 113 | |
| 114 | unsigned i = 0; |
| 115 | unsigned NumFixedArgs = CB.getFunctionType()->getNumParams(); |
| 116 | for (auto &Arg : CB.args()) { |
| 117 | ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i), |
| 118 | i < NumFixedArgs}; |
| 119 | setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB); |
| 120 | |
| 121 | |
| 122 | |
| 123 | if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg)) |
| 124 | CanBeTailCalled = false; |
| 125 | |
| 126 | Info.OrigArgs.push_back(OrigArg); |
| 127 | ++i; |
| 128 | } |
| 129 | |
| 130 | |
| 131 | |
| 132 | const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts(); |
| 133 | if (const Function *F = dyn_cast<Function>(CalleeV)) |
| 134 | Info.Callee = MachineOperand::CreateGA(F, 0); |
| 135 | else |
| 136 | Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); |
| 137 | |
| 138 | Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, ISD::ArgFlagsTy{}}; |
| 139 | if (!Info.OrigRet.Ty->isVoidTy()) |
| 140 | setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB); |
| 141 | |
| 142 | Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); |
| 143 | Info.CallConv = CallConv; |
| 144 | Info.SwiftErrorVReg = SwiftErrorVReg; |
| 145 | Info.IsMustTailCall = CB.isMustTailCall(); |
| 146 | Info.IsTailCall = CanBeTailCalled; |
| 147 | Info.IsVarArg = IsVarArg; |
| 148 | return lowerCall(MIRBuilder, Info); |
| 149 | } |
| 150 | |
| 151 | template <typename FuncInfoTy> |
| 152 | void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, |
| 153 | const DataLayout &DL, |
| 154 | const FuncInfoTy &FuncInfo) const { |
| 155 | auto &Flags = Arg.Flags[0]; |
| 156 | const AttributeList &Attrs = FuncInfo.getAttributes(); |
| 157 | addArgFlagsFromAttributes(Flags, Attrs, OpIdx); |
| 2 | | Calling 'CallLowering::addArgFlagsFromAttributes' | |
|
| 31 | | Returning from 'CallLowering::addArgFlagsFromAttributes' | |
|
| 158 | |
| 159 | PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType()); |
| 32 | | Assuming the object is not a 'PointerType' | |
|
| 33 | | 'PtrTy' initialized to a null pointer value | |
|
| 160 | if (PtrTy) { |
| |
| 161 | Flags.setPointer(); |
| 162 | Flags.setPointerAddrSpace(PtrTy->getPointerAddressSpace()); |
| 163 | } |
| 164 | |
| 165 | Align MemAlign = DL.getABITypeAlign(Arg.Ty); |
| 166 | if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) { |
| 35 | | Assuming the condition is true | |
|
| 167 | assert(OpIdx >= AttributeList::FirstArgIndex); |
| 168 | Type *ElementTy = PtrTy->getElementType(); |
| 36 | | Called C++ object pointer is null |
|
| 169 | |
| 170 | auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); |
| 171 | Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); |
| 172 | |
| 173 | |
| 174 | |
| 175 | if (auto ParamAlign = |
| 176 | FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex)) |
| 177 | MemAlign = *ParamAlign; |
| 178 | else if ((ParamAlign = |
| 179 | FuncInfo.getParamAlign(OpIdx - AttributeList::FirstArgIndex))) |
| 180 | MemAlign = *ParamAlign; |
| 181 | else |
| 182 | MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL)); |
| 183 | } else if (OpIdx >= AttributeList::FirstArgIndex) { |
| 184 | if (auto ParamAlign = |
| 185 | FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex)) |
| 186 | MemAlign = *ParamAlign; |
| 187 | } |
| 188 | Flags.setMemAlign(MemAlign); |
| 189 | Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); |
| 190 | |
| 191 | |
| 192 | |
| 193 | if (Flags.isSwiftSelf()) |
| 194 | Flags.setReturned(false); |
| 195 | } |
| 196 | |
| 197 | template void |
| 198 | CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, |
| 199 | const DataLayout &DL, |
| 200 | const Function &FuncInfo) const; |
| 201 | |
| 202 | template void |
| 203 | CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx, |
| 204 | const DataLayout &DL, |
| 205 | const CallBase &FuncInfo) const; |
| 206 | |
| 207 | void CallLowering::splitToValueTypes(const ArgInfo &OrigArg, |
| 208 | SmallVectorImpl<ArgInfo> &SplitArgs, |
| 209 | const DataLayout &DL, |
| 210 | CallingConv::ID CallConv, |
| 211 | SmallVectorImpl<uint64_t> *Offsets) const { |
| 212 | LLVMContext &Ctx = OrigArg.Ty->getContext(); |
| 213 | |
| 214 | SmallVector<EVT, 4> SplitVTs; |
| 215 | ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0); |
| 216 | |
| 217 | if (SplitVTs.size() == 0) |
| 218 | return; |
| 219 | |
| 220 | if (SplitVTs.size() == 1) { |
| 221 | |
| 222 | |
| 223 | SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx), |
| 224 | OrigArg.OrigArgIndex, OrigArg.Flags[0], |
| 225 | OrigArg.IsFixed, OrigArg.OrigValue); |
| 226 | return; |
| 227 | } |
| 228 | |
| 229 | |
| 230 | assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch"); |
| 231 | |
| 232 | bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( |
| 233 | OrigArg.Ty, CallConv, false, DL); |
| 234 | for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) { |
| 235 | Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx); |
| 236 | SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex, |
| 237 | OrigArg.Flags[0], OrigArg.IsFixed); |
| 238 | if (NeedsRegBlock) |
| 239 | SplitArgs.back().Flags[0].setInConsecutiveRegs(); |
| 240 | } |
| 241 | |
| 242 | SplitArgs.back().Flags[0].setInConsecutiveRegsLast(); |
| 243 | } |
| 244 | |
| 245 | |
| 246 | static MachineInstrBuilder |
| 247 | mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs, |
| 248 | ArrayRef<Register> SrcRegs) { |
| 249 | MachineRegisterInfo &MRI = *B.getMRI(); |
| 250 | LLT LLTy = MRI.getType(DstRegs[0]); |
| 251 | LLT PartLLT = MRI.getType(SrcRegs[0]); |
| 252 | |
| 253 | |
| 254 | LLT LCMTy = getLCMType(LLTy, PartLLT); |
| 255 | if (LCMTy == LLTy) { |
| 256 | |
| 257 | assert(DstRegs.size() == 1); |
| 258 | return B.buildConcatVectors(DstRegs[0], SrcRegs); |
| 259 | } |
| 260 | |
| 261 | |
| 262 | |
| 263 | Register UnmergeSrcReg; |
| 264 | if (LCMTy != PartLLT) { |
| 265 | |
| 266 | |
| 267 | |
| 268 | |
| 269 | |
| 270 | |
| 271 | const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits(); |
| 272 | Register Undef = B.buildUndef(PartLLT).getReg(0); |
| 273 | |
| 274 | |
| 275 | SmallVector<Register, 8> WidenedSrcs(NumWide, Undef); |
| 276 | |
| 277 | |
| 278 | std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin()); |
| 279 | UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0); |
| 280 | } else { |
| 281 | |
| 282 | |
| 283 | assert(SrcRegs.size() == 1); |
| 284 | UnmergeSrcReg = SrcRegs[0]; |
| 285 | } |
| 286 | |
| 287 | int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits(); |
| 288 | |
| 289 | SmallVector<Register, 8> PadDstRegs(NumDst); |
| 290 | std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin()); |
| 291 | |
| 292 | |
| 293 | for (int I = DstRegs.size(); I != NumDst; ++I) |
| 294 | PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy); |
| 295 | |
| 296 | return B.buildUnmerge(PadDstRegs, UnmergeSrcReg); |
| 297 | } |
| 298 | |
| 299 | |
| 300 | |
| 301 | |
| 302 | |
| 303 | static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs, |
| 304 | ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, |
| 305 | const ISD::ArgFlagsTy Flags) { |
| 306 | MachineRegisterInfo &MRI = *B.getMRI(); |
| 307 | |
| 308 | if (PartLLT == LLTy) { |
| 309 | |
| 310 | |
| 311 | assert(OrigRegs[0] == Regs[0]); |
| 312 | return; |
| 313 | } |
| 314 | |
| 315 | if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 && |
| 316 | Regs.size() == 1) { |
| 317 | B.buildBitcast(OrigRegs[0], Regs[0]); |
| 318 | return; |
| 319 | } |
| 320 | |
| 321 | |
| 322 | |
| 323 | if (PartLLT.isVector() == LLTy.isVector() && |
| 324 | PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() && |
| 325 | (!PartLLT.isVector() || |
| 326 | PartLLT.getNumElements() == LLTy.getNumElements()) && |
| 327 | OrigRegs.size() == 1 && Regs.size() == 1) { |
| 328 | Register SrcReg = Regs[0]; |
| 329 | |
| 330 | LLT LocTy = MRI.getType(SrcReg); |
| 331 | |
| 332 | if (Flags.isSExt()) { |
| 333 | SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits()) |
| 334 | .getReg(0); |
| 335 | } else if (Flags.isZExt()) { |
| 336 | SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits()) |
| 337 | .getReg(0); |
| 338 | } |
| 339 | |
| 340 | |
| 341 | LLT OrigTy = MRI.getType(OrigRegs[0]); |
| 342 | if (OrigTy.isPointer()) { |
| 343 | LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits()); |
| 344 | B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg)); |
| 345 | return; |
| 346 | } |
| 347 | |
| 348 | B.buildTrunc(OrigRegs[0], SrcReg); |
| 349 | return; |
| 350 | } |
| 351 | |
| 352 | if (!LLTy.isVector() && !PartLLT.isVector()) { |
| 353 | assert(OrigRegs.size() == 1); |
| 354 | LLT OrigTy = MRI.getType(OrigRegs[0]); |
| 355 | |
| 356 | unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size(); |
| 357 | if (SrcSize == OrigTy.getSizeInBits()) |
| 358 | B.buildMerge(OrigRegs[0], Regs); |
| 359 | else { |
| 360 | auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs); |
| 361 | B.buildTrunc(OrigRegs[0], Widened); |
| 362 | } |
| 363 | |
| 364 | return; |
| 365 | } |
| 366 | |
| 367 | if (PartLLT.isVector()) { |
| 368 | assert(OrigRegs.size() == 1); |
| 369 | SmallVector<Register> CastRegs(Regs.begin(), Regs.end()); |
| 370 | |
| 371 | |
| 372 | |
| 373 | |
| 374 | if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() && |
| 375 | PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 && |
| 376 | Regs.size() == 1) { |
| 377 | LLT NewTy = PartLLT.changeElementType(LLTy.getElementType()) |
| 378 | .changeElementCount(PartLLT.getElementCount() * 2); |
| 379 | CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0); |
| 380 | PartLLT = NewTy; |
| 381 | } |
| 382 | |
| 383 | if (LLTy.getScalarType() == PartLLT.getElementType()) { |
| 384 | mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs); |
| 385 | } else { |
| 386 | unsigned I = 0; |
| 387 | LLT GCDTy = getGCDType(LLTy, PartLLT); |
| 388 | |
| 389 | |
| 390 | |
| 391 | |
| 392 | for (Register SrcReg : CastRegs) |
| 393 | CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0); |
| 394 | mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs); |
| 395 | } |
| 396 | |
| 397 | return; |
| 398 | } |
| 399 | |
| 400 | assert(LLTy.isVector() && !PartLLT.isVector()); |
| 401 | |
| 402 | LLT DstEltTy = LLTy.getElementType(); |
| 403 | |
| 404 | |
| 405 | |
| 406 | LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType(); |
| 407 | |
| 408 | assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits()); |
| 409 | |
| 410 | if (DstEltTy == PartLLT) { |
| 411 | |
| 412 | |
| 413 | if (RealDstEltTy.isPointer()) { |
| 414 | for (Register Reg : Regs) |
| 415 | MRI.setType(Reg, RealDstEltTy); |
| 416 | } |
| 417 | |
| 418 | B.buildBuildVector(OrigRegs[0], Regs); |
| 419 | } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) { |
| 420 | |
| 421 | |
| 422 | SmallVector<Register, 8> EltMerges; |
| 423 | int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits(); |
| 424 | |
| 425 | assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0); |
| 426 | |
| 427 | for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) { |
| 428 | auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt)); |
| 429 | |
| 430 | MRI.setType(Merge.getReg(0), RealDstEltTy); |
| 431 | EltMerges.push_back(Merge.getReg(0)); |
| 432 | Regs = Regs.drop_front(PartsPerElt); |
| 433 | } |
| 434 | |
| 435 | B.buildBuildVector(OrigRegs[0], EltMerges); |
| 436 | } else { |
| 437 | |
| 438 | |
| 439 | LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT); |
| 440 | auto BV = B.buildBuildVector(BVType, Regs); |
| 441 | B.buildTrunc(OrigRegs[0], BV); |
| 442 | } |
| 443 | } |
| 444 | |
| 445 | |
| 446 | |
| 447 | |
| 448 | |
| 449 | |
| 450 | static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs, |
| 451 | Register SrcReg, LLT SrcTy, LLT PartTy, |
| 452 | unsigned ExtendOp = TargetOpcode::G_ANYEXT) { |
| 453 | |
| 454 | assert(SrcTy != PartTy && "identical part types shouldn't reach here"); |
| 455 | |
| 456 | const unsigned PartSize = PartTy.getSizeInBits(); |
| 457 | |
| 458 | if (PartTy.isVector() == SrcTy.isVector() && |
| 459 | PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) { |
| 460 | assert(DstRegs.size() == 1); |
| 461 | B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg}); |
| 462 | return; |
| 463 | } |
| 464 | |
| 465 | if (SrcTy.isVector() && !PartTy.isVector() && |
| 466 | PartSize > SrcTy.getElementType().getSizeInBits()) { |
| 467 | |
| 468 | auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg); |
| 469 | for (int i = 0, e = DstRegs.size(); i != e; ++i) |
| 470 | B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i)); |
| 471 | return; |
| 472 | } |
| 473 | |
| 474 | LLT GCDTy = getGCDType(SrcTy, PartTy); |
| 475 | if (GCDTy == PartTy) { |
| 476 | |
| 477 | B.buildUnmerge(DstRegs, SrcReg); |
| 478 | return; |
| 479 | } |
| 480 | |
| 481 | MachineRegisterInfo &MRI = *B.getMRI(); |
| 482 | LLT DstTy = MRI.getType(DstRegs[0]); |
| 483 | LLT LCMTy = getLCMType(SrcTy, PartTy); |
| 484 | |
| 485 | const unsigned DstSize = DstTy.getSizeInBits(); |
| 486 | const unsigned SrcSize = SrcTy.getSizeInBits(); |
| 487 | unsigned CoveringSize = LCMTy.getSizeInBits(); |
| 488 | |
| 489 | Register UnmergeSrc = SrcReg; |
| 490 | |
| 491 | if (CoveringSize != SrcSize) { |
| 492 | |
| 493 | if (SrcTy.isScalar() && DstTy.isScalar()) { |
| 494 | CoveringSize = alignTo(SrcSize, DstSize); |
| 495 | LLT CoverTy = LLT::scalar(CoveringSize); |
| 496 | UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0); |
| 497 | } else { |
| 498 | |
| 499 | |
| 500 | Register Undef = B.buildUndef(SrcTy).getReg(0); |
| 501 | SmallVector<Register, 8> MergeParts(1, SrcReg); |
| 502 | for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize) |
| 503 | MergeParts.push_back(Undef); |
| 504 | UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0); |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | |
| 509 | SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end()); |
| 510 | for (unsigned Size = DstSize * DstRegs.size(); Size != CoveringSize; |
| 511 | Size += DstSize) { |
| 512 | UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy)); |
| 513 | } |
| 514 | |
| 515 | B.buildUnmerge(UnmergeResults, UnmergeSrc); |
| 516 | } |
| 517 | |
| 518 | bool CallLowering::determineAndHandleAssignments( |
| 519 | ValueHandler &Handler, ValueAssigner &Assigner, |
| 520 | SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder, |
| 521 | CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const { |
| 522 | MachineFunction &MF = MIRBuilder.getMF(); |
| 523 | const Function &F = MF.getFunction(); |
| 524 | SmallVector<CCValAssign, 16> ArgLocs; |
| 525 | |
| 526 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext()); |
| 527 | if (!determineAssignments(Assigner, Args, CCInfo)) |
| 528 | return false; |
| 529 | |
| 530 | return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder, |
| 531 | ThisReturnReg); |
| 532 | } |
| 533 | |
| 534 | static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) { |
| 535 | if (Flags.isSExt()) |
| 536 | return TargetOpcode::G_SEXT; |
| 537 | if (Flags.isZExt()) |
| 538 | return TargetOpcode::G_ZEXT; |
| 539 | return TargetOpcode::G_ANYEXT; |
| 540 | } |
| 541 | |
| 542 | bool CallLowering::determineAssignments(ValueAssigner &Assigner, |
| 543 | SmallVectorImpl<ArgInfo> &Args, |
| 544 | CCState &CCInfo) const { |
| 545 | LLVMContext &Ctx = CCInfo.getContext(); |
| 546 | const CallingConv::ID CallConv = CCInfo.getCallingConv(); |
| 547 | |
| 548 | unsigned NumArgs = Args.size(); |
| 549 | for (unsigned i = 0; i != NumArgs; ++i) { |
| 550 | EVT CurVT = EVT::getEVT(Args[i].Ty); |
| 551 | |
| 552 | MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT); |
| 553 | |
| 554 | |
| 555 | |
| 556 | unsigned NumParts = |
| 557 | TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT); |
| 558 | |
| 559 | if (NumParts == 1) { |
| 560 | |
| 561 | if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i], |
| 562 | Args[i].Flags[0], CCInfo)) |
| 563 | return false; |
| 564 | continue; |
| 565 | } |
| 566 | |
| 567 | |
| 568 | |
| 569 | |
| 570 | |
| 571 | |
| 572 | |
| 573 | |
| 574 | |
| 575 | |
| 576 | |
| 577 | |
| 578 | ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; |
| 579 | Args[i].Flags.clear(); |
| 580 | |
| 581 | for (unsigned Part = 0; Part < NumParts; ++Part) { |
| 582 | ISD::ArgFlagsTy Flags = OrigFlags; |
| 583 | if (Part == 0) { |
| 584 | Flags.setSplit(); |
| 585 | } else { |
| 586 | Flags.setOrigAlign(Align(1)); |
| 587 | if (Part == NumParts - 1) |
| 588 | Flags.setSplitEnd(); |
| 589 | } |
| 590 | |
| 591 | Args[i].Flags.push_back(Flags); |
| 592 | if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i], |
| 593 | Args[i].Flags[Part], CCInfo)) { |
| 594 | |
| 595 | return false; |
| 596 | } |
| 597 | } |
| 598 | } |
| 599 | |
| 600 | return true; |
| 601 | } |
| 602 | |
| 603 | bool CallLowering::handleAssignments(ValueHandler &Handler, |
| 604 | SmallVectorImpl<ArgInfo> &Args, |
| 605 | CCState &CCInfo, |
| 606 | SmallVectorImpl<CCValAssign> &ArgLocs, |
| 607 | MachineIRBuilder &MIRBuilder, |
| 608 | Register ThisReturnReg) const { |
| 609 | MachineFunction &MF = MIRBuilder.getMF(); |
| 610 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 611 | const Function &F = MF.getFunction(); |
| 612 | const DataLayout &DL = F.getParent()->getDataLayout(); |
| 613 | |
| 614 | const unsigned NumArgs = Args.size(); |
| 615 | |
| 616 | for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) { |
| 617 | assert(j < ArgLocs.size() && "Skipped too many arg locs"); |
| 618 | CCValAssign &VA = ArgLocs[j]; |
| 619 | assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); |
| 620 | |
| 621 | if (VA.needsCustom()) { |
| 622 | unsigned NumArgRegs = |
| 623 | Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); |
| 624 | if (!NumArgRegs) |
| 625 | return false; |
| 626 | j += NumArgRegs; |
| 627 | continue; |
| 628 | } |
| 629 | |
| 630 | const MVT ValVT = VA.getValVT(); |
| 631 | const MVT LocVT = VA.getLocVT(); |
| 632 | |
| 633 | const LLT LocTy(LocVT); |
| 634 | const LLT ValTy(ValVT); |
| 635 | const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy; |
| 636 | const EVT OrigVT = EVT::getEVT(Args[i].Ty); |
| 637 | const LLT OrigTy = getLLTForType(*Args[i].Ty, DL); |
| 638 | |
| 639 | |
| 640 | |
| 641 | |
| 642 | const unsigned NumParts = Args[i].Flags.size(); |
| 643 | |
| 644 | |
| 645 | Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end()); |
| 646 | |
| 647 | if (NumParts != 1 || NewLLT != OrigTy) { |
| 648 | |
| 649 | |
| 650 | Args[i].Regs.resize(NumParts); |
| 651 | |
| 652 | |
| 653 | |
| 654 | |
| 655 | for (unsigned Part = 0; Part < NumParts; ++Part) |
| 656 | Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT); |
| 657 | } |
| 658 | |
| 659 | assert((j + (NumParts - 1)) < ArgLocs.size() && |
| 660 | "Too many regs for number of args"); |
| 661 | |
| 662 | |
| 663 | if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) { |
| 664 | assert(Args[i].OrigRegs.size() == 1); |
| 665 | buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy, |
| 666 | ValTy, extendOpFromFlags(Args[i].Flags[0])); |
| 667 | } |
| 668 | |
| 669 | for (unsigned Part = 0; Part < NumParts; ++Part) { |
| 670 | Register ArgReg = Args[i].Regs[Part]; |
| 671 | |
| 672 | VA = ArgLocs[j + Part]; |
| 673 | const ISD::ArgFlagsTy Flags = Args[i].Flags[Part]; |
| 674 | |
| 675 | if (VA.isMemLoc() && !Flags.isByVal()) { |
| 676 | |
| 677 | |
| 678 | |
| 679 | |
| 680 | |
| 681 | LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags); |
| 682 | |
| 683 | MachinePointerInfo MPO; |
| 684 | Register StackAddr = Handler.getStackAddress( |
| 685 | MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags); |
| 686 | |
| 687 | Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA); |
| 688 | continue; |
| 689 | } |
| 690 | |
| 691 | if (VA.isMemLoc() && Flags.isByVal()) { |
| 692 | assert(Args[i].Regs.size() == 1 && |
| 693 | "didn't expect split byval pointer"); |
| 694 | |
| 695 | if (Handler.isIncomingArgumentHandler()) { |
| 696 | |
| 697 | MachinePointerInfo MPO; |
| 698 | Register StackAddr = Handler.getStackAddress( |
| 699 | Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags); |
| 700 | MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr); |
| 701 | } else { |
| 702 | |
| 703 | |
| 704 | |
| 705 | uint64_t MemSize = Flags.getByValSize(); |
| 706 | int64_t Offset = VA.getLocMemOffset(); |
| 707 | |
| 708 | MachinePointerInfo DstMPO; |
| 709 | Register StackAddr = |
| 710 | Handler.getStackAddress(MemSize, Offset, DstMPO, Flags); |
| 711 | |
| 712 | MachinePointerInfo SrcMPO(Args[i].OrigValue); |
| 713 | if (!Args[i].OrigValue) { |
| 714 | |
| 715 | |
| 716 | const LLT PtrTy = MRI.getType(StackAddr); |
| 717 | SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace()); |
| 718 | } |
| 719 | |
| 720 | Align DstAlign = std::max(Flags.getNonZeroByValAlign(), |
| 721 | inferAlignFromPtrInfo(MF, DstMPO)); |
| 722 | |
| 723 | Align SrcAlign = std::max(Flags.getNonZeroByValAlign(), |
| 724 | inferAlignFromPtrInfo(MF, SrcMPO)); |
| 725 | |
| 726 | Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0], |
| 727 | DstMPO, DstAlign, SrcMPO, SrcAlign, |
| 728 | MemSize, VA); |
| 729 | } |
| 730 | continue; |
| 731 | } |
| 732 | |
| 733 | assert(!VA.needsCustom() && "custom loc should have been handled already"); |
| 734 | |
| 735 | if (i == 0 && ThisReturnReg.isValid() && |
| 736 | Handler.isIncomingArgumentHandler() && |
| 737 | isTypeIsValidForThisReturn(ValVT)) { |
| 738 | Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA); |
| 739 | continue; |
| 740 | } |
| 741 | |
| 742 | Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); |
| 743 | } |
| 744 | |
| 745 | |
| 746 | |
| 747 | if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) { |
| 748 | |
| 749 | |
| 750 | buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy, |
| 751 | LocTy, Args[i].Flags[0]); |
| 752 | } |
| 753 | |
| 754 | j += NumParts - 1; |
| 755 | } |
| 756 | |
| 757 | return true; |
| 758 | } |
| 759 | |
| 760 | void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, |
| 761 | ArrayRef<Register> VRegs, Register DemoteReg, |
| 762 | int FI) const { |
| 763 | MachineFunction &MF = MIRBuilder.getMF(); |
| 764 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 765 | const DataLayout &DL = MF.getDataLayout(); |
| 766 | |
| 767 | SmallVector<EVT, 4> SplitVTs; |
| 768 | SmallVector<uint64_t, 4> Offsets; |
| 769 | ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0); |
| 770 | |
| 771 | assert(VRegs.size() == SplitVTs.size()); |
| 772 | |
| 773 | unsigned NumValues = SplitVTs.size(); |
| 774 | Align BaseAlign = DL.getPrefTypeAlign(RetTy); |
| 775 | Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace()); |
| 776 | LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL); |
| 777 | |
| 778 | MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); |
| 779 | |
| 780 | for (unsigned I = 0; I < NumValues; ++I) { |
| 781 | Register Addr; |
| 782 | MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]); |
| 783 | auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad, |
| 784 | MRI.getType(VRegs[I]), |
| 785 | commonAlignment(BaseAlign, Offsets[I])); |
| 786 | MIRBuilder.buildLoad(VRegs[I], Addr, *MMO); |
| 787 | } |
| 788 | } |
| 789 | |
| 790 | void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, |
| 791 | ArrayRef<Register> VRegs, |
| 792 | Register DemoteReg) const { |
| 793 | MachineFunction &MF = MIRBuilder.getMF(); |
| 794 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 795 | const DataLayout &DL = MF.getDataLayout(); |
| 796 | |
| 797 | SmallVector<EVT, 4> SplitVTs; |
| 798 | SmallVector<uint64_t, 4> Offsets; |
| 799 | ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0); |
| 800 | |
| 801 | assert(VRegs.size() == SplitVTs.size()); |
| 802 | |
| 803 | unsigned NumValues = SplitVTs.size(); |
| 804 | Align BaseAlign = DL.getPrefTypeAlign(RetTy); |
| 805 | unsigned AS = DL.getAllocaAddrSpace(); |
| 806 | LLT OffsetLLTy = |
| 807 | getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL); |
| 808 | |
| 809 | MachinePointerInfo PtrInfo(AS); |
| 810 | |
| 811 | for (unsigned I = 0; I < NumValues; ++I) { |
| 812 | Register Addr; |
| 813 | MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]); |
| 814 | auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, |
| 815 | MRI.getType(VRegs[I]), |
| 816 | commonAlignment(BaseAlign, Offsets[I])); |
| 817 | MIRBuilder.buildStore(VRegs[I], Addr, *MMO); |
| 818 | } |
| 819 | } |
| 820 | |
| 821 | void CallLowering::insertSRetIncomingArgument( |
| 822 | const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg, |
| 823 | MachineRegisterInfo &MRI, const DataLayout &DL) const { |
| 824 | unsigned AS = DL.getAllocaAddrSpace(); |
| 825 | DemoteReg = MRI.createGenericVirtualRegister( |
| 826 | LLT::pointer(AS, DL.getPointerSizeInBits(AS))); |
| 827 | |
| 828 | Type *PtrTy = PointerType::get(F.getReturnType(), AS); |
| 829 | |
| 830 | SmallVector<EVT, 1> ValueVTs; |
| 831 | ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs); |
| 832 | |
| 833 | |
| 834 | assert(ValueVTs.size() == 1); |
| 835 | |
| 836 | ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()), |
| 837 | ArgInfo::NoArgIndex); |
| 838 | setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F); |
| 1 | Calling 'CallLowering::setArgFlags' | |
|
| 839 | DemoteArg.Flags[0].setSRet(); |
| 840 | SplitArgs.insert(SplitArgs.begin(), DemoteArg); |
| 841 | } |
| 842 | |
| 843 | void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, |
| 844 | const CallBase &CB, |
| 845 | CallLoweringInfo &Info) const { |
| 846 | const DataLayout &DL = MIRBuilder.getDataLayout(); |
| 847 | Type *RetTy = CB.getType(); |
| 848 | unsigned AS = DL.getAllocaAddrSpace(); |
| 849 | LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS)); |
| 850 | |
| 851 | int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject( |
| 852 | DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false); |
| 853 | |
| 854 | Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0); |
| 855 | ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS), |
| 856 | ArgInfo::NoArgIndex); |
| 857 | setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB); |
| 858 | DemoteArg.Flags[0].setSRet(); |
| 859 | |
| 860 | Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg); |
| 861 | Info.DemoteStackIndex = FI; |
| 862 | Info.DemoteRegister = DemoteReg; |
| 863 | } |
| 864 | |
| 865 | bool CallLowering::checkReturn(CCState &CCInfo, |
| 866 | SmallVectorImpl<BaseArgInfo> &Outs, |
| 867 | CCAssignFn *Fn) const { |
| 868 | for (unsigned I = 0, E = Outs.size(); I < E; ++I) { |
| 869 | MVT VT = MVT::getVT(Outs[I].Ty); |
| 870 | if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo)) |
| 871 | return false; |
| 872 | } |
| 873 | return true; |
| 874 | } |
| 875 | |
| 876 | void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy, |
| 877 | AttributeList Attrs, |
| 878 | SmallVectorImpl<BaseArgInfo> &Outs, |
| 879 | const DataLayout &DL) const { |
| 880 | LLVMContext &Context = RetTy->getContext(); |
| 881 | ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); |
| 882 | |
| 883 | SmallVector<EVT, 4> SplitVTs; |
| 884 | ComputeValueVTs(*TLI, DL, RetTy, SplitVTs); |
| 885 | addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex); |
| 886 | |
| 887 | for (EVT VT : SplitVTs) { |
| 888 | unsigned NumParts = |
| 889 | TLI->getNumRegistersForCallingConv(Context, CallConv, VT); |
| 890 | MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT); |
| 891 | Type *PartTy = EVT(RegVT).getTypeForEVT(Context); |
| 892 | |
| 893 | for (unsigned I = 0; I < NumParts; ++I) { |
| 894 | Outs.emplace_back(PartTy, Flags); |
| 895 | } |
| 896 | } |
| 897 | } |
| 898 | |
| 899 | bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const { |
| 900 | const auto &F = MF.getFunction(); |
| 901 | Type *ReturnType = F.getReturnType(); |
| 902 | CallingConv::ID CallConv = F.getCallingConv(); |
| 903 | |
| 904 | SmallVector<BaseArgInfo, 4> SplitArgs; |
| 905 | getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs, |
| 906 | MF.getDataLayout()); |
| 907 | return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg()); |
| 908 | } |
| 909 | |
| 910 | bool CallLowering::parametersInCSRMatch( |
| 911 | const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, |
| 912 | const SmallVectorImpl<CCValAssign> &OutLocs, |
| 913 | const SmallVectorImpl<ArgInfo> &OutArgs) const { |
| 914 | for (unsigned i = 0; i < OutLocs.size(); ++i) { |
| 915 | auto &ArgLoc = OutLocs[i]; |
| 916 | |
| 917 | if (!ArgLoc.isRegLoc()) |
| 918 | continue; |
| 919 | |
| 920 | MCRegister PhysReg = ArgLoc.getLocReg(); |
| 921 | |
| 922 | |
| 923 | if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg)) |
| 924 | continue; |
| 925 | |
| 926 | LLVM_DEBUG( |
| 927 | dbgs() |
| 928 | << "... Call has an argument passed in a callee-saved register.\n"); |
| 929 | |
| 930 | |
| 931 | const ArgInfo &OutInfo = OutArgs[i]; |
| 932 | |
| 933 | if (OutInfo.Regs.size() > 1) { |
| 934 | LLVM_DEBUG( |
| 935 | dbgs() << "... Cannot handle arguments in multiple registers.\n"); |
| 936 | return false; |
| 937 | } |
| 938 | |
| 939 | |
| 940 | |
| 941 | |
| 942 | MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI); |
| 943 | if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) { |
| 944 | LLVM_DEBUG( |
| 945 | dbgs() |
| 946 | << "... Parameter was not copied into a VReg, cannot tail call.\n"); |
| 947 | return false; |
| 948 | } |
| 949 | |
| 950 | |
| 951 | Register CopyRHS = RegDef->getOperand(1).getReg(); |
| 952 | if (CopyRHS != PhysReg) { |
| 953 | LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into " |
| 954 | "VReg, cannot tail call.\n"); |
| 955 | return false; |
| 956 | } |
| 957 | } |
| 958 | |
| 959 | return true; |
| 960 | } |
| 961 | |
| 962 | bool CallLowering::resultsCompatible(CallLoweringInfo &Info, |
| 963 | MachineFunction &MF, |
| 964 | SmallVectorImpl<ArgInfo> &InArgs, |
| 965 | ValueAssigner &CalleeAssigner, |
| 966 | ValueAssigner &CallerAssigner) const { |
| 967 | const Function &F = MF.getFunction(); |
| 968 | CallingConv::ID CalleeCC = Info.CallConv; |
| 969 | CallingConv::ID CallerCC = F.getCallingConv(); |
| 970 | |
| 971 | if (CallerCC == CalleeCC) |
| 972 | return true; |
| 973 | |
| 974 | SmallVector<CCValAssign, 16> ArgLocs1; |
| 975 | CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext()); |
| 976 | if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1)) |
| 977 | return false; |
| 978 | |
| 979 | SmallVector<CCValAssign, 16> ArgLocs2; |
| 980 | CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext()); |
| 981 | if (!determineAssignments(CallerAssigner, InArgs, CCInfo2)) |
| 982 | return false; |
| 983 | |
| 984 | |
| 985 | |
| 986 | if (ArgLocs1.size() != ArgLocs2.size()) |
| 987 | return false; |
| 988 | |
| 989 | |
| 990 | for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) { |
| 991 | const CCValAssign &Loc1 = ArgLocs1[i]; |
| 992 | const CCValAssign &Loc2 = ArgLocs2[i]; |
| 993 | |
| 994 | |
| 995 | |
| 996 | if (Loc1.isRegLoc() != Loc2.isRegLoc()) |
| 997 | return false; |
| 998 | |
| 999 | if (Loc1.isRegLoc()) { |
| 1000 | |
| 1001 | if (Loc1.getLocReg() != Loc2.getLocReg()) |
| 1002 | return false; |
| 1003 | |
| 1004 | |
| 1005 | continue; |
| 1006 | } |
| 1007 | |
| 1008 | |
| 1009 | if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) |
| 1010 | return false; |
| 1011 | } |
| 1012 | |
| 1013 | return true; |
| 1014 | } |
| 1015 | |
| 1016 | LLT CallLowering::ValueHandler::getStackValueStoreType( |
| 1017 | const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const { |
| 1018 | const MVT ValVT = VA.getValVT(); |
| 1019 | if (ValVT != MVT::iPTR) { |
| 1020 | LLT ValTy(ValVT); |
| 1021 | |
| 1022 | |
| 1023 | |
| 1024 | if (Flags.isPointer()) { |
| 1025 | LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(), |
| 1026 | ValTy.getScalarSizeInBits()); |
| 1027 | if (ValVT.isVector()) |
| 1028 | return LLT::vector(ValTy.getElementCount(), PtrTy); |
| 1029 | return PtrTy; |
| 1030 | } |
| 1031 | |
| 1032 | return ValTy; |
| 1033 | } |
| 1034 | |
| 1035 | unsigned AddrSpace = Flags.getPointerAddrSpace(); |
| 1036 | return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace)); |
| 1037 | } |
| 1038 | |
| 1039 | void CallLowering::ValueHandler::copyArgumentMemory( |
| 1040 | const ArgInfo &Arg, Register DstPtr, Register SrcPtr, |
| 1041 | const MachinePointerInfo &DstPtrInfo, Align DstAlign, |
| 1042 | const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, |
| 1043 | CCValAssign &VA) const { |
| 1044 | MachineFunction &MF = MIRBuilder.getMF(); |
| 1045 | MachineMemOperand *SrcMMO = MF.getMachineMemOperand( |
| 1046 | SrcPtrInfo, |
| 1047 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize, |
| 1048 | SrcAlign); |
| 1049 | |
| 1050 | MachineMemOperand *DstMMO = MF.getMachineMemOperand( |
| 1051 | DstPtrInfo, |
| 1052 | MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable, |
| 1053 | MemSize, DstAlign); |
| 1054 | |
| 1055 | const LLT PtrTy = MRI.getType(DstPtr); |
| 1056 | const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits()); |
| 1057 | |
| 1058 | auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize); |
| 1059 | MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO); |
| 1060 | } |
| 1061 | |
| 1062 | Register CallLowering::ValueHandler::extendRegister(Register ValReg, |
| 1063 | CCValAssign &VA, |
| 1064 | unsigned MaxSizeBits) { |
| 1065 | LLT LocTy{VA.getLocVT()}; |
| 1066 | LLT ValTy{VA.getValVT()}; |
| 1067 | |
| 1068 | if (LocTy.getSizeInBits() == ValTy.getSizeInBits()) |
| 1069 | return ValReg; |
| 1070 | |
| 1071 | if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) { |
| 1072 | if (MaxSizeBits <= ValTy.getSizeInBits()) |
| 1073 | return ValReg; |
| 1074 | LocTy = LLT::scalar(MaxSizeBits); |
| 1075 | } |
| 1076 | |
| 1077 | const LLT ValRegTy = MRI.getType(ValReg); |
| 1078 | if (ValRegTy.isPointer()) { |
| 1079 | |
| 1080 | |
| 1081 | LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits()); |
| 1082 | ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0); |
| 1083 | } |
| 1084 | |
| 1085 | switch (VA.getLocInfo()) { |
| 1086 | default: break; |
| 1087 | case CCValAssign::Full: |
| 1088 | case CCValAssign::BCvt: |
| 1089 | |
| 1090 | |
| 1091 | return ValReg; |
| 1092 | case CCValAssign::AExt: { |
| 1093 | auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); |
| 1094 | return MIB.getReg(0); |
| 1095 | } |
| 1096 | case CCValAssign::SExt: { |
| 1097 | Register NewReg = MRI.createGenericVirtualRegister(LocTy); |
| 1098 | MIRBuilder.buildSExt(NewReg, ValReg); |
| 1099 | return NewReg; |
| 1100 | } |
| 1101 | case CCValAssign::ZExt: { |
| 1102 | Register NewReg = MRI.createGenericVirtualRegister(LocTy); |
| 1103 | MIRBuilder.buildZExt(NewReg, ValReg); |
| 1104 | return NewReg; |
| 1105 | } |
| 1106 | } |
| 1107 | llvm_unreachable("unable to extend register"); |
| 1108 | } |
| 1109 | |
| 1110 | void CallLowering::ValueAssigner::anchor() {} |
| 1111 | |
| 1112 | Register CallLowering::IncomingValueHandler::buildExtensionHint(CCValAssign &VA, |
| 1113 | Register SrcReg, |
| 1114 | LLT NarrowTy) { |
| 1115 | switch (VA.getLocInfo()) { |
| 1116 | case CCValAssign::LocInfo::ZExt: { |
| 1117 | return MIRBuilder |
| 1118 | .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg, |
| 1119 | NarrowTy.getScalarSizeInBits()) |
| 1120 | .getReg(0); |
| 1121 | } |
| 1122 | case CCValAssign::LocInfo::SExt: { |
| 1123 | return MIRBuilder |
| 1124 | .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg, |
| 1125 | NarrowTy.getScalarSizeInBits()) |
| 1126 | .getReg(0); |
| 1127 | break; |
| 1128 | } |
| 1129 | default: |
| 1130 | return SrcReg; |
| 1131 | } |
| 1132 | } |
| 1133 | |
| 1134 | |
| 1135 | |
| 1136 | |
| 1137 | |
| 1138 | |
| 1139 | |
| 1140 | static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) { |
| 1141 | if (SrcTy == DstTy) |
| 1142 | return true; |
| 1143 | |
| 1144 | if (SrcTy.getSizeInBits() != DstTy.getSizeInBits()) |
| 1145 | return false; |
| 1146 | |
| 1147 | SrcTy = SrcTy.getScalarType(); |
| 1148 | DstTy = DstTy.getScalarType(); |
| 1149 | |
| 1150 | return (SrcTy.isPointer() && DstTy.isScalar()) || |
| 1151 | (DstTy.isScalar() && SrcTy.isPointer()); |
| 1152 | } |
| 1153 | |
| 1154 | void CallLowering::IncomingValueHandler::assignValueToReg(Register ValVReg, |
| 1155 | Register PhysReg, |
| 1156 | CCValAssign &VA) { |
| 1157 | const MVT LocVT = VA.getLocVT(); |
| 1158 | const LLT LocTy(LocVT); |
| 1159 | const LLT RegTy = MRI.getType(ValVReg); |
| 1160 | |
| 1161 | if (isCopyCompatibleType(RegTy, LocTy)) { |
| 1162 | MIRBuilder.buildCopy(ValVReg, PhysReg); |
| 1163 | return; |
| 1164 | } |
| 1165 | |
| 1166 | auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg); |
| 1167 | auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy); |
| 1168 | MIRBuilder.buildTrunc(ValVReg, Hint); |
| 1169 | } |