Bug Summary

File:src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/TargetInfo.cpp
Warning:line 876, column 26
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name TargetInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/TargetInfo.cpp
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "TargetInfo.h"
15#include "ABIInfo.h"
16#include "CGBlocks.h"
17#include "CGCXXABI.h"
18#include "CGValue.h"
19#include "CodeGenFunction.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/RecordLayout.h"
22#include "clang/Basic/CodeGenOptions.h"
23#include "clang/Basic/DiagnosticFrontend.h"
24#include "clang/Basic/Builtins.h"
25#include "clang/CodeGen/CGFunctionInfo.h"
26#include "clang/CodeGen/SwiftCallingConv.h"
27#include "llvm/ADT/SmallBitVector.h"
28#include "llvm/ADT/StringExtras.h"
29#include "llvm/ADT/StringSwitch.h"
30#include "llvm/ADT/Triple.h"
31#include "llvm/ADT/Twine.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/IntrinsicsNVPTX.h"
34#include "llvm/IR/IntrinsicsS390.h"
35#include "llvm/IR/Type.h"
36#include "llvm/Support/raw_ostream.h"
37#include <algorithm> // std::sort
38
39using namespace clang;
40using namespace CodeGen;
41
42// Helper for coercing an aggregate argument or return value into an integer
43// array of the same size (including padding) and alignment. This alternate
44// coercion happens only for the RenderScript ABI and can be removed after
45// runtimes that rely on it are no longer supported.
46//
47// RenderScript assumes that the size of the argument / return value in the IR
48// is the same as the size of the corresponding qualified type. This helper
49// coerces the aggregate type into an array of the same size (including
50// padding). This coercion is used in lieu of expansion of struct members or
51// other canonical coercions that return a coerced-type of larger size.
52//
53// Ty - The argument / return value type
54// Context - The associated ASTContext
55// LLVMContext - The associated LLVMContext
56static ABIArgInfo coerceToIntArray(QualType Ty,
57 ASTContext &Context,
58 llvm::LLVMContext &LLVMContext) {
59 // Alignment and Size are measured in bits.
60 const uint64_t Size = Context.getTypeSize(Ty);
61 const uint64_t Alignment = Context.getTypeAlign(Ty);
62 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
63 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
64 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
65}
66
67static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
68 llvm::Value *Array,
69 llvm::Value *Value,
70 unsigned FirstIndex,
71 unsigned LastIndex) {
72 // Alternatively, we could emit this as a loop in the source.
73 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
74 llvm::Value *Cell =
75 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
76 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
77 }
78}
79
80static bool isAggregateTypeForABI(QualType T) {
81 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
6
Returning the value 1, which participates in a condition later
82 T->isMemberFunctionPointerType();
83}
84
85ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
86 bool Realign,
87 llvm::Type *Padding) const {
88 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
89 Realign, Padding);
90}
91
92ABIArgInfo
93ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
94 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
95 /*ByVal*/ false, Realign);
96}
97
98Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
99 QualType Ty) const {
100 return Address::invalid();
101}
102
103bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
104 if (Ty->isPromotableIntegerType())
105 return true;
106
107 if (const auto *EIT = Ty->getAs<ExtIntType>())
108 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
109 return true;
110
111 return false;
112}
113
114ABIInfo::~ABIInfo() {}
115
116/// Does the given lowering require more than the given number of
117/// registers when expanded?
118///
119/// This is intended to be the basis of a reasonable basic implementation
120/// of should{Pass,Return}IndirectlyForSwift.
121///
122/// For most targets, a limit of four total registers is reasonable; this
123/// limits the amount of code required in order to move around the value
124/// in case it wasn't produced immediately prior to the call by the caller
125/// (or wasn't produced in exactly the right registers) or isn't used
126/// immediately within the callee. But some targets may need to further
127/// limit the register count due to an inability to support that many
128/// return registers.
129static bool occupiesMoreThan(CodeGenTypes &cgt,
130 ArrayRef<llvm::Type*> scalarTypes,
131 unsigned maxAllRegisters) {
132 unsigned intCount = 0, fpCount = 0;
133 for (llvm::Type *type : scalarTypes) {
134 if (type->isPointerTy()) {
135 intCount++;
136 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
137 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
138 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
139 } else {
140 assert(type->isVectorTy() || type->isFloatingPointTy())((void)0);
141 fpCount++;
142 }
143 }
144
145 return (intCount + fpCount > maxAllRegisters);
146}
147
148bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
149 llvm::Type *eltTy,
150 unsigned numElts) const {
151 // The default implementation of this assumes that the target guarantees
152 // 128-bit SIMD support but nothing more.
153 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
154}
155
156static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
157 CGCXXABI &CXXABI) {
158 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
159 if (!RD) {
160 if (!RT->getDecl()->canPassInRegisters())
161 return CGCXXABI::RAA_Indirect;
162 return CGCXXABI::RAA_Default;
163 }
164 return CXXABI.getRecordArgABI(RD);
165}
166
167static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
168 CGCXXABI &CXXABI) {
169 const RecordType *RT = T->getAs<RecordType>();
10
Assuming the object is not a 'RecordType'
170 if (!RT
10.1
'RT' is null
)
11
Taking true branch
171 return CGCXXABI::RAA_Default;
12
Returning zero, which participates in a condition later
172 return getRecordArgABI(RT, CXXABI);
173}
174
175static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
176 const ABIInfo &Info) {
177 QualType Ty = FI.getReturnType();
178
179 if (const auto *RT = Ty->getAs<RecordType>())
180 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
181 !RT->getDecl()->canPassInRegisters()) {
182 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
183 return true;
184 }
185
186 return CXXABI.classifyReturnType(FI);
187}
188
189/// Pass transparent unions as if they were the type of the first element. Sema
190/// should ensure that all elements of the union have the same "machine type".
191static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
192 if (const RecordType *UT = Ty->getAsUnionType()) {
193 const RecordDecl *UD = UT->getDecl();
194 if (UD->hasAttr<TransparentUnionAttr>()) {
195 assert(!UD->field_empty() && "sema created an empty transparent union")((void)0);
196 return UD->field_begin()->getType();
197 }
198 }
199 return Ty;
200}
201
202CGCXXABI &ABIInfo::getCXXABI() const {
203 return CGT.getCXXABI();
204}
205
206ASTContext &ABIInfo::getContext() const {
207 return CGT.getContext();
208}
209
210llvm::LLVMContext &ABIInfo::getVMContext() const {
211 return CGT.getLLVMContext();
212}
213
214const llvm::DataLayout &ABIInfo::getDataLayout() const {
215 return CGT.getDataLayout();
216}
217
218const TargetInfo &ABIInfo::getTarget() const {
219 return CGT.getTarget();
220}
221
222const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
223 return CGT.getCodeGenOpts();
224}
225
226bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
227
228bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
229 return false;
230}
231
232bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
233 uint64_t Members) const {
234 return false;
235}
236
237LLVM_DUMP_METHOD__attribute__((noinline)) void ABIArgInfo::dump() const {
238 raw_ostream &OS = llvm::errs();
239 OS << "(ABIArgInfo Kind=";
240 switch (TheKind) {
241 case Direct:
242 OS << "Direct Type=";
243 if (llvm::Type *Ty = getCoerceToType())
244 Ty->print(OS);
245 else
246 OS << "null";
247 break;
248 case Extend:
249 OS << "Extend";
250 break;
251 case Ignore:
252 OS << "Ignore";
253 break;
254 case InAlloca:
255 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
256 break;
257 case Indirect:
258 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
259 << " ByVal=" << getIndirectByVal()
260 << " Realign=" << getIndirectRealign();
261 break;
262 case IndirectAliased:
263 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
264 << " AadrSpace=" << getIndirectAddrSpace()
265 << " Realign=" << getIndirectRealign();
266 break;
267 case Expand:
268 OS << "Expand";
269 break;
270 case CoerceAndExpand:
271 OS << "CoerceAndExpand Type=";
272 getCoerceAndExpandType()->print(OS);
273 break;
274 }
275 OS << ")\n";
276}
277
278// Dynamically round a pointer up to a multiple of the given alignment.
279static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
280 llvm::Value *Ptr,
281 CharUnits Align) {
282 llvm::Value *PtrAsInt = Ptr;
283 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
284 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
285 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
286 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
287 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
288 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
289 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
290 Ptr->getType(),
291 Ptr->getName() + ".aligned");
292 return PtrAsInt;
293}
294
295/// Emit va_arg for a platform using the common void* representation,
296/// where arguments are simply emitted in an array of slots on the stack.
297///
298/// This version implements the core direct-value passing rules.
299///
300/// \param SlotSize - The size and alignment of a stack slot.
301/// Each argument will be allocated to a multiple of this number of
302/// slots, and all the slots will be aligned to this value.
303/// \param AllowHigherAlign - The slot alignment is not a cap;
304/// an argument type with an alignment greater than the slot size
305/// will be emitted on a higher-alignment address, potentially
306/// leaving one or more empty slots behind as padding. If this
307/// is false, the returned address might be less-aligned than
308/// DirectAlign.
309static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
310 Address VAListAddr,
311 llvm::Type *DirectTy,
312 CharUnits DirectSize,
313 CharUnits DirectAlign,
314 CharUnits SlotSize,
315 bool AllowHigherAlign) {
316 // Cast the element type to i8* if necessary. Some platforms define
317 // va_list as a struct containing an i8* instead of just an i8*.
318 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
319 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
320
321 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
322
323 // If the CC aligns values higher than the slot size, do so if needed.
324 Address Addr = Address::invalid();
325 if (AllowHigherAlign && DirectAlign > SlotSize) {
326 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
327 DirectAlign);
328 } else {
329 Addr = Address(Ptr, SlotSize);
330 }
331
332 // Advance the pointer past the argument, then store that back.
333 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
334 Address NextPtr =
335 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
336 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
337
338 // If the argument is smaller than a slot, and this is a big-endian
339 // target, the argument will be right-adjusted in its slot.
340 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
341 !DirectTy->isStructTy()) {
342 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
343 }
344
345 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
346 return Addr;
347}
348
349/// Emit va_arg for a platform using the common void* representation,
350/// where arguments are simply emitted in an array of slots on the stack.
351///
352/// \param IsIndirect - Values of this type are passed indirectly.
353/// \param ValueInfo - The size and alignment of this type, generally
354/// computed with getContext().getTypeInfoInChars(ValueTy).
355/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
356/// Each argument will be allocated to a multiple of this number of
357/// slots, and all the slots will be aligned to this value.
358/// \param AllowHigherAlign - The slot alignment is not a cap;
359/// an argument type with an alignment greater than the slot size
360/// will be emitted on a higher-alignment address, potentially
361/// leaving one or more empty slots behind as padding.
362static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
363 QualType ValueTy, bool IsIndirect,
364 TypeInfoChars ValueInfo,
365 CharUnits SlotSizeAndAlign,
366 bool AllowHigherAlign) {
367 // The size and alignment of the value that was passed directly.
368 CharUnits DirectSize, DirectAlign;
369 if (IsIndirect) {
370 DirectSize = CGF.getPointerSize();
371 DirectAlign = CGF.getPointerAlign();
372 } else {
373 DirectSize = ValueInfo.Width;
374 DirectAlign = ValueInfo.Align;
375 }
376
377 // Cast the address we've calculated to the right type.
378 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
379 if (IsIndirect)
380 DirectTy = DirectTy->getPointerTo(0);
381
382 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
383 DirectSize, DirectAlign,
384 SlotSizeAndAlign,
385 AllowHigherAlign);
386
387 if (IsIndirect) {
388 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align);
389 }
390
391 return Addr;
392
393}
394
395static Address emitMergePHI(CodeGenFunction &CGF,
396 Address Addr1, llvm::BasicBlock *Block1,
397 Address Addr2, llvm::BasicBlock *Block2,
398 const llvm::Twine &Name = "") {
399 assert(Addr1.getType() == Addr2.getType())((void)0);
400 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
401 PHI->addIncoming(Addr1.getPointer(), Block1);
402 PHI->addIncoming(Addr2.getPointer(), Block2);
403 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
404 return Address(PHI, Align);
405}
406
407TargetCodeGenInfo::~TargetCodeGenInfo() = default;
408
409// If someone can figure out a general rule for this, that would be great.
410// It's probably just doomed to be platform-dependent, though.
411unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
412 // Verified for:
413 // x86-64 FreeBSD, Linux, Darwin
414 // x86-32 FreeBSD, Linux, Darwin
415 // PowerPC Linux, Darwin
416 // ARM Darwin (*not* EABI)
417 // AArch64 Linux
418 return 32;
419}
420
421bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
422 const FunctionNoProtoType *fnType) const {
423 // The following conventions are known to require this to be false:
424 // x86_stdcall
425 // MIPS
426 // For everything else, we just prefer false unless we opt out.
427 return false;
428}
429
430void
431TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
432 llvm::SmallString<24> &Opt) const {
433 // This assumes the user is passing a library name like "rt" instead of a
434 // filename like "librt.a/so", and that they don't care whether it's static or
435 // dynamic.
436 Opt = "-l";
437 Opt += Lib;
438}
439
440unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
441 // OpenCL kernels are called via an explicit runtime API with arguments
442 // set with clSetKernelArg(), not as normal sub-functions.
443 // Return SPIR_KERNEL by default as the kernel calling convention to
444 // ensure the fingerprint is fixed such way that each OpenCL argument
445 // gets one matching argument in the produced kernel function argument
446 // list to enable feasible implementation of clSetKernelArg() with
447 // aggregates etc. In case we would use the default C calling conv here,
448 // clSetKernelArg() might break depending on the target-specific
449 // conventions; different targets might split structs passed as values
450 // to multiple function arguments etc.
451 return llvm::CallingConv::SPIR_KERNEL;
452}
453
454llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
455 llvm::PointerType *T, QualType QT) const {
456 return llvm::ConstantPointerNull::get(T);
457}
458
459LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
460 const VarDecl *D) const {
461 assert(!CGM.getLangOpts().OpenCL &&((void)0)
462 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&((void)0)
463 "Address space agnostic languages only")((void)0);
464 return D ? D->getType().getAddressSpace() : LangAS::Default;
465}
466
467llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
468 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
469 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
470 // Since target may map different address spaces in AST to the same address
471 // space, an address space conversion may end up as a bitcast.
472 if (auto *C = dyn_cast<llvm::Constant>(Src))
473 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
474 // Try to preserve the source's name to make IR more readable.
475 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
476 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
477}
478
479llvm::Constant *
480TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
481 LangAS SrcAddr, LangAS DestAddr,
482 llvm::Type *DestTy) const {
483 // Since target may map different address spaces in AST to the same address
484 // space, an address space conversion may end up as a bitcast.
485 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
486}
487
488llvm::SyncScope::ID
489TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
490 SyncScope Scope,
491 llvm::AtomicOrdering Ordering,
492 llvm::LLVMContext &Ctx) const {
493 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
494}
495
496static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
497
498/// isEmptyField - Return true iff a the field is "empty", that is it
499/// is an unnamed bit-field or an (array of) empty record(s).
500static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
501 bool AllowArrays) {
502 if (FD->isUnnamedBitfield())
503 return true;
504
505 QualType FT = FD->getType();
506
507 // Constant arrays of empty records count as empty, strip them off.
508 // Constant arrays of zero length always count as empty.
509 bool WasArray = false;
510 if (AllowArrays)
511 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
512 if (AT->getSize() == 0)
513 return true;
514 FT = AT->getElementType();
515 // The [[no_unique_address]] special case below does not apply to
516 // arrays of C++ empty records, so we need to remember this fact.
517 WasArray = true;
518 }
519
520 const RecordType *RT = FT->getAs<RecordType>();
521 if (!RT)
522 return false;
523
524 // C++ record fields are never empty, at least in the Itanium ABI.
525 //
526 // FIXME: We should use a predicate for whether this behavior is true in the
527 // current ABI.
528 //
529 // The exception to the above rule are fields marked with the
530 // [[no_unique_address]] attribute (since C++20). Those do count as empty
531 // according to the Itanium ABI. The exception applies only to records,
532 // not arrays of records, so we must also check whether we stripped off an
533 // array type above.
534 if (isa<CXXRecordDecl>(RT->getDecl()) &&
535 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
536 return false;
537
538 return isEmptyRecord(Context, FT, AllowArrays);
539}
540
541/// isEmptyRecord - Return true iff a structure contains only empty
542/// fields. Note that a structure with a flexible array member is not
543/// considered empty.
544static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
545 const RecordType *RT = T->getAs<RecordType>();
546 if (!RT)
547 return false;
548 const RecordDecl *RD = RT->getDecl();
549 if (RD->hasFlexibleArrayMember())
550 return false;
551
552 // If this is a C++ record, check the bases first.
553 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
554 for (const auto &I : CXXRD->bases())
555 if (!isEmptyRecord(Context, I.getType(), true))
556 return false;
557
558 for (const auto *I : RD->fields())
559 if (!isEmptyField(Context, I, AllowArrays))
560 return false;
561 return true;
562}
563
564/// isSingleElementStruct - Determine if a structure is a "single
565/// element struct", i.e. it has exactly one non-empty field or
566/// exactly one field which is itself a single element
567/// struct. Structures with flexible array members are never
568/// considered single element structs.
569///
570/// \return The field declaration for the single non-empty field, if
571/// it exists.
572static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
573 const RecordType *RT = T->getAs<RecordType>();
574 if (!RT)
575 return nullptr;
576
577 const RecordDecl *RD = RT->getDecl();
578 if (RD->hasFlexibleArrayMember())
579 return nullptr;
580
581 const Type *Found = nullptr;
582
583 // If this is a C++ record, check the bases first.
584 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
585 for (const auto &I : CXXRD->bases()) {
586 // Ignore empty records.
587 if (isEmptyRecord(Context, I.getType(), true))
588 continue;
589
590 // If we already found an element then this isn't a single-element struct.
591 if (Found)
592 return nullptr;
593
594 // If this is non-empty and not a single element struct, the composite
595 // cannot be a single element struct.
596 Found = isSingleElementStruct(I.getType(), Context);
597 if (!Found)
598 return nullptr;
599 }
600 }
601
602 // Check for single element.
603 for (const auto *FD : RD->fields()) {
604 QualType FT = FD->getType();
605
606 // Ignore empty fields.
607 if (isEmptyField(Context, FD, true))
608 continue;
609
610 // If we already found an element then this isn't a single-element
611 // struct.
612 if (Found)
613 return nullptr;
614
615 // Treat single element arrays as the element.
616 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
617 if (AT->getSize().getZExtValue() != 1)
618 break;
619 FT = AT->getElementType();
620 }
621
622 if (!isAggregateTypeForABI(FT)) {
623 Found = FT.getTypePtr();
624 } else {
625 Found = isSingleElementStruct(FT, Context);
626 if (!Found)
627 return nullptr;
628 }
629 }
630
631 // We don't consider a struct a single-element struct if it has
632 // padding beyond the element type.
633 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
634 return nullptr;
635
636 return Found;
637}
638
639namespace {
640Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
641 const ABIArgInfo &AI) {
642 // This default implementation defers to the llvm backend's va_arg
643 // instruction. It can handle only passing arguments directly
644 // (typically only handled in the backend for primitive types), or
645 // aggregates passed indirectly by pointer (NOTE: if the "byval"
646 // flag has ABI impact in the callee, this implementation cannot
647 // work.)
648
649 // Only a few cases are covered here at the moment -- those needed
650 // by the default abi.
651 llvm::Value *Val;
652
653 if (AI.isIndirect()) {
654 assert(!AI.getPaddingType() &&((void)0)
655 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!")((void)0);
656 assert(((void)0)
657 !AI.getIndirectRealign() &&((void)0)
658 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!")((void)0);
659
660 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
661 CharUnits TyAlignForABI = TyInfo.Align;
662
663 llvm::Type *BaseTy =
664 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
665 llvm::Value *Addr =
666 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
667 return Address(Addr, TyAlignForABI);
668 } else {
669 assert((AI.isDirect() || AI.isExtend()) &&((void)0)
670 "Unexpected ArgInfo Kind in generic VAArg emitter!")((void)0);
671
672 assert(!AI.getInReg() &&((void)0)
673 "Unexpected InReg seen in arginfo in generic VAArg emitter!")((void)0);
674 assert(!AI.getPaddingType() &&((void)0)
675 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!")((void)0);
676 assert(!AI.getDirectOffset() &&((void)0)
677 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!")((void)0);
678 assert(!AI.getCoerceToType() &&((void)0)
679 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!")((void)0);
680
681 Address Temp = CGF.CreateMemTemp(Ty, "varet");
682 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
683 CGF.Builder.CreateStore(Val, Temp);
684 return Temp;
685 }
686}
687
688/// DefaultABIInfo - The default implementation for ABI specific
689/// details. This implementation provides information which results in
690/// self-consistent and sensible LLVM IR generation, but does not
691/// conform to any particular ABI.
692class DefaultABIInfo : public ABIInfo {
693public:
694 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
695
696 ABIArgInfo classifyReturnType(QualType RetTy) const;
697 ABIArgInfo classifyArgumentType(QualType RetTy) const;
698
699 void computeInfo(CGFunctionInfo &FI) const override {
700 if (!getCXXABI().classifyReturnType(FI))
701 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
702 for (auto &I : FI.arguments())
703 I.info = classifyArgumentType(I.type);
704 }
705
706 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
707 QualType Ty) const override {
708 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
709 }
710};
711
712class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
713public:
714 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
715 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
716};
717
718ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
719 Ty = useFirstFieldIfTransparentUnion(Ty);
720
721 if (isAggregateTypeForABI(Ty)) {
722 // Records with non-trivial destructors/copy-constructors should not be
723 // passed by value.
724 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
725 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
726
727 return getNaturalAlignIndirect(Ty);
728 }
729
730 // Treat an enum type as its underlying type.
731 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
732 Ty = EnumTy->getDecl()->getIntegerType();
733
734 ASTContext &Context = getContext();
735 if (const auto *EIT = Ty->getAs<ExtIntType>())
736 if (EIT->getNumBits() >
737 Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
738 ? Context.Int128Ty
739 : Context.LongLongTy))
740 return getNaturalAlignIndirect(Ty);
741
742 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
743 : ABIArgInfo::getDirect());
744}
745
746ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
747 if (RetTy->isVoidType())
748 return ABIArgInfo::getIgnore();
749
750 if (isAggregateTypeForABI(RetTy))
751 return getNaturalAlignIndirect(RetTy);
752
753 // Treat an enum type as its underlying type.
754 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
755 RetTy = EnumTy->getDecl()->getIntegerType();
756
757 if (const auto *EIT = RetTy->getAs<ExtIntType>())
758 if (EIT->getNumBits() >
759 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
760 ? getContext().Int128Ty
761 : getContext().LongLongTy))
762 return getNaturalAlignIndirect(RetTy);
763
764 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
765 : ABIArgInfo::getDirect());
766}
767
768//===----------------------------------------------------------------------===//
769// WebAssembly ABI Implementation
770//
771// This is a very simple ABI that relies a lot on DefaultABIInfo.
772//===----------------------------------------------------------------------===//
773
774class WebAssemblyABIInfo final : public SwiftABIInfo {
775public:
776 enum ABIKind {
777 MVP = 0,
778 ExperimentalMV = 1,
779 };
780
781private:
782 DefaultABIInfo defaultInfo;
783 ABIKind Kind;
784
785public:
786 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
787 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
788
789private:
790 ABIArgInfo classifyReturnType(QualType RetTy) const;
791 ABIArgInfo classifyArgumentType(QualType Ty) const;
792
793 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
794 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
795 // overload them.
796 void computeInfo(CGFunctionInfo &FI) const override {
797 if (!getCXXABI().classifyReturnType(FI))
1
Assuming the condition is false
2
Taking false branch
798 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
799 for (auto &Arg : FI.arguments())
3
Assuming '__begin2' is not equal to '__end2'
800 Arg.info = classifyArgumentType(Arg.type);
4
Calling 'WebAssemblyABIInfo::classifyArgumentType'
801 }
802
803 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
804 QualType Ty) const override;
805
806 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
807 bool asReturnValue) const override {
808 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
809 }
810
811 bool isSwiftErrorInRegister() const override {
812 return false;
813 }
814};
815
816class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
817public:
818 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
819 WebAssemblyABIInfo::ABIKind K)
820 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
821
822 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
823 CodeGen::CodeGenModule &CGM) const override {
824 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
825 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
826 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
827 llvm::Function *Fn = cast<llvm::Function>(GV);
828 llvm::AttrBuilder B;
829 B.addAttribute("wasm-import-module", Attr->getImportModule());
830 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
831 }
832 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
833 llvm::Function *Fn = cast<llvm::Function>(GV);
834 llvm::AttrBuilder B;
835 B.addAttribute("wasm-import-name", Attr->getImportName());
836 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
837 }
838 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
839 llvm::Function *Fn = cast<llvm::Function>(GV);
840 llvm::AttrBuilder B;
841 B.addAttribute("wasm-export-name", Attr->getExportName());
842 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
843 }
844 }
845
846 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
847 llvm::Function *Fn = cast<llvm::Function>(GV);
848 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
849 Fn->addFnAttr("no-prototype");
850 }
851 }
852};
853
854/// Classify argument of given type \p Ty.
855ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
856 Ty = useFirstFieldIfTransparentUnion(Ty);
857
858 if (isAggregateTypeForABI(Ty)) {
5
Calling 'isAggregateTypeForABI'
7
Returning from 'isAggregateTypeForABI'
8
Taking true branch
859 // Records with non-trivial destructors/copy-constructors should not be
860 // passed by value.
861 if (auto RAA
13.1
'RAA' is 0
= getRecordArgABI(Ty, getCXXABI()))
9
Calling 'getRecordArgABI'
13
Returning from 'getRecordArgABI'
14
Taking false branch
862 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
863 // Ignore empty structs/unions.
864 if (isEmptyRecord(getContext(), Ty, true))
15
Assuming the condition is false
16
Taking false branch
865 return ABIArgInfo::getIgnore();
866 // Lower single-element structs to just pass a regular value. TODO: We
867 // could do reasonable-size multiple-element structs too, using getExpand(),
868 // though watch out for things like bitfields.
869 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
17
Assuming 'SeltTy' is null
18
Taking false branch
870 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
871 // For the experimental multivalue ABI, fully expand all other aggregates
872 if (Kind == ABIKind::ExperimentalMV) {
19
Assuming field 'Kind' is equal to ExperimentalMV
20
Taking true branch
873 const RecordType *RT = Ty->getAs<RecordType>();
21
Assuming the object is not a 'RecordType'
22
'RT' initialized to a null pointer value
874 assert(RT)((void)0);
875 bool HasBitField = false;
876 for (auto *Field : RT->getDecl()->fields()) {
23
Called C++ object pointer is null
877 if (Field->isBitField()) {
878 HasBitField = true;
879 break;
880 }
881 }
882 if (!HasBitField)
883 return ABIArgInfo::getExpand();
884 }
885 }
886
887 // Otherwise just do the default thing.
888 return defaultInfo.classifyArgumentType(Ty);
889}
890
891ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
892 if (isAggregateTypeForABI(RetTy)) {
893 // Records with non-trivial destructors/copy-constructors should not be
894 // returned by value.
895 if (!getRecordArgABI(RetTy, getCXXABI())) {
896 // Ignore empty structs/unions.
897 if (isEmptyRecord(getContext(), RetTy, true))
898 return ABIArgInfo::getIgnore();
899 // Lower single-element structs to just return a regular value. TODO: We
900 // could do reasonable-size multiple-element structs too, using
901 // ABIArgInfo::getDirect().
902 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
903 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
904 // For the experimental multivalue ABI, return all other aggregates
905 if (Kind == ABIKind::ExperimentalMV)
906 return ABIArgInfo::getDirect();
907 }
908 }
909
910 // Otherwise just do the default thing.
911 return defaultInfo.classifyReturnType(RetTy);
912}
913
914Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
915 QualType Ty) const {
916 bool IsIndirect = isAggregateTypeForABI(Ty) &&
917 !isEmptyRecord(getContext(), Ty, true) &&
918 !isSingleElementStruct(Ty, getContext());
919 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
920 getContext().getTypeInfoInChars(Ty),
921 CharUnits::fromQuantity(4),
922 /*AllowHigherAlign=*/true);
923}
924
925//===----------------------------------------------------------------------===//
926// le32/PNaCl bitcode ABI Implementation
927//
928// This is a simplified version of the x86_32 ABI. Arguments and return values
929// are always passed on the stack.
930//===----------------------------------------------------------------------===//
931
932class PNaClABIInfo : public ABIInfo {
933 public:
934 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
935
936 ABIArgInfo classifyReturnType(QualType RetTy) const;
937 ABIArgInfo classifyArgumentType(QualType RetTy) const;
938
939 void computeInfo(CGFunctionInfo &FI) const override;
940 Address EmitVAArg(CodeGenFunction &CGF,
941 Address VAListAddr, QualType Ty) const override;
942};
943
944class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
945 public:
946 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
947 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
948};
949
950void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
951 if (!getCXXABI().classifyReturnType(FI))
952 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
953
954 for (auto &I : FI.arguments())
955 I.info = classifyArgumentType(I.type);
956}
957
958Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
959 QualType Ty) const {
960 // The PNaCL ABI is a bit odd, in that varargs don't use normal
961 // function classification. Structs get passed directly for varargs
962 // functions, through a rewriting transform in
963 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
964 // this target to actually support a va_arg instructions with an
965 // aggregate type, unlike other targets.
966 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
967}
968
969/// Classify argument of given type \p Ty.
970ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
971 if (isAggregateTypeForABI(Ty)) {
972 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
973 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
974 return getNaturalAlignIndirect(Ty);
975 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
976 // Treat an enum type as its underlying type.
977 Ty = EnumTy->getDecl()->getIntegerType();
978 } else if (Ty->isFloatingType()) {
979 // Floating-point types don't go inreg.
980 return ABIArgInfo::getDirect();
981 } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
982 // Treat extended integers as integers if <=64, otherwise pass indirectly.
983 if (EIT->getNumBits() > 64)
984 return getNaturalAlignIndirect(Ty);
985 return ABIArgInfo::getDirect();
986 }
987
988 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
989 : ABIArgInfo::getDirect());
990}
991
992ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
993 if (RetTy->isVoidType())
994 return ABIArgInfo::getIgnore();
995
996 // In the PNaCl ABI we always return records/structures on the stack.
997 if (isAggregateTypeForABI(RetTy))
998 return getNaturalAlignIndirect(RetTy);
999
1000 // Treat extended integers as integers if <=64, otherwise pass indirectly.
1001 if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
1002 if (EIT->getNumBits() > 64)
1003 return getNaturalAlignIndirect(RetTy);
1004 return ABIArgInfo::getDirect();
1005 }
1006
1007 // Treat an enum type as its underlying type.
1008 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1009 RetTy = EnumTy->getDecl()->getIntegerType();
1010
1011 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1012 : ABIArgInfo::getDirect());
1013}
1014
1015/// IsX86_MMXType - Return true if this is an MMX type.
1016bool IsX86_MMXType(llvm::Type *IRType) {
1017 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
1018 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1019 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1020 IRType->getScalarSizeInBits() != 64;
1021}
1022
1023static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1024 StringRef Constraint,
1025 llvm::Type* Ty) {
1026 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1027 .Cases("y", "&y", "^Ym", true)
1028 .Default(false);
1029 if (IsMMXCons && Ty->isVectorTy()) {
1030 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
1031 64) {
1032 // Invalid MMX constraint
1033 return nullptr;
1034 }
1035
1036 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
1037 }
1038
1039 // No operation needed
1040 return Ty;
1041}
1042
1043/// Returns true if this type can be passed in SSE registers with the
1044/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1045static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
1046 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1047 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1048 if (BT->getKind() == BuiltinType::LongDouble) {
1049 if (&Context.getTargetInfo().getLongDoubleFormat() ==
1050 &llvm::APFloat::x87DoubleExtended())
1051 return false;
1052 }
1053 return true;
1054 }
1055 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
1056 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
1057 // registers specially.
1058 unsigned VecSize = Context.getTypeSize(VT);
1059 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1060 return true;
1061 }
1062 return false;
1063}
1064
1065/// Returns true if this aggregate is small enough to be passed in SSE registers
1066/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1067static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1068 return NumMembers <= 4;
1069}
1070
1071/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
1072static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
1073 auto AI = ABIArgInfo::getDirect(T);
1074 AI.setInReg(true);
1075 AI.setCanBeFlattened(false);
1076 return AI;
1077}
1078
1079//===----------------------------------------------------------------------===//
1080// X86-32 ABI Implementation
1081//===----------------------------------------------------------------------===//
1082
1083/// Similar to llvm::CCState, but for Clang.
1084struct CCState {
1085 CCState(CGFunctionInfo &FI)
1086 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1087
1088 llvm::SmallBitVector IsPreassigned;
1089 unsigned CC = CallingConv::CC_C;
1090 unsigned FreeRegs = 0;
1091 unsigned FreeSSERegs = 0;
1092};
1093
1094/// X86_32ABIInfo - The X86-32 ABI information.
1095class X86_32ABIInfo : public SwiftABIInfo {
1096 enum Class {
1097 Integer,
1098 Float
1099 };
1100
1101 static const unsigned MinABIStackAlignInBytes = 4;
1102
1103 bool IsDarwinVectorABI;
1104 bool IsRetSmallStructInRegABI;
1105 bool IsWin32StructABI;
1106 bool IsSoftFloatABI;
1107 bool IsMCUABI;
1108 bool IsLinuxABI;
1109 unsigned DefaultNumRegisterParameters;
1110
1111 static bool isRegisterSize(unsigned Size) {
1112 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1113 }
1114
1115 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1116 // FIXME: Assumes vectorcall is in use.
1117 return isX86VectorTypeForVectorCall(getContext(), Ty);
1118 }
1119
1120 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1121 uint64_t NumMembers) const override {
1122 // FIXME: Assumes vectorcall is in use.
1123 return isX86VectorCallAggregateSmallEnough(NumMembers);
1124 }
1125
1126 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1127
1128 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1129 /// such that the argument will be passed in memory.
1130 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1131
1132 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1133
1134 /// Return the alignment to use for the given type on the stack.
1135 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1136
1137 Class classify(QualType Ty) const;
1138 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1139 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1140
1141 /// Updates the number of available free registers, returns
1142 /// true if any registers were allocated.
1143 bool updateFreeRegs(QualType Ty, CCState &State) const;
1144
1145 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1146 bool &NeedsPadding) const;
1147 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1148
1149 bool canExpandIndirectArgument(QualType Ty) const;
1150
1151 /// Rewrite the function info so that all memory arguments use
1152 /// inalloca.
1153 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1154
1155 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1156 CharUnits &StackOffset, ABIArgInfo &Info,
1157 QualType Type) const;
1158 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
1159
1160public:
1161
1162 void computeInfo(CGFunctionInfo &FI) const override;
1163 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1164 QualType Ty) const override;
1165
1166 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1167 bool RetSmallStructInRegABI, bool Win32StructABI,
1168 unsigned NumRegisterParameters, bool SoftFloatABI)
1169 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1170 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1171 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
1172 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1173 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux()),
1174 DefaultNumRegisterParameters(NumRegisterParameters) {}
1175
1176 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1177 bool asReturnValue) const override {
1178 // LLVM's x86-32 lowering currently only assigns up to three
1179 // integer registers and three fp registers. Oddly, it'll use up to
1180 // four vector registers for vectors, but those can overlap with the
1181 // scalar registers.
1182 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1183 }
1184
1185 bool isSwiftErrorInRegister() const override {
1186 // x86-32 lowering does not support passing swifterror in a register.
1187 return false;
1188 }
1189};
1190
1191class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1192public:
1193 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1194 bool RetSmallStructInRegABI, bool Win32StructABI,
1195 unsigned NumRegisterParameters, bool SoftFloatABI)
1196 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
1197 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1198 NumRegisterParameters, SoftFloatABI)) {}
1199
1200 static bool isStructReturnInRegABI(
1201 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1202
1203 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1204 CodeGen::CodeGenModule &CGM) const override;
1205
1206 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1207 // Darwin uses different dwarf register numbers for EH.
1208 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1209 return 4;
1210 }
1211
1212 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1213 llvm::Value *Address) const override;
1214
1215 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1216 StringRef Constraint,
1217 llvm::Type* Ty) const override {
1218 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1219 }
1220
1221 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1222 std::string &Constraints,
1223 std::vector<llvm::Type *> &ResultRegTypes,
1224 std::vector<llvm::Type *> &ResultTruncRegTypes,
1225 std::vector<LValue> &ResultRegDests,
1226 std::string &AsmString,
1227 unsigned NumOutputs) const override;
1228
1229 llvm::Constant *
1230 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1231 unsigned Sig = (0xeb << 0) | // jmp rel8
1232 (0x06 << 8) | // .+0x08
1233 ('v' << 16) |
1234 ('2' << 24);
1235 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1236 }
1237
1238 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1239 return "movl\t%ebp, %ebp"
1240 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1241 }
1242};
1243
1244}
1245
1246/// Rewrite input constraint references after adding some output constraints.
1247/// In the case where there is one output and one input and we add one output,
1248/// we need to replace all operand references greater than or equal to 1:
1249/// mov $0, $1
1250/// mov eax, $1
1251/// The result will be:
1252/// mov $0, $2
1253/// mov eax, $2
1254static void rewriteInputConstraintReferences(unsigned FirstIn,
1255 unsigned NumNewOuts,
1256 std::string &AsmString) {
1257 std::string Buf;
1258 llvm::raw_string_ostream OS(Buf);
1259 size_t Pos = 0;
1260 while (Pos < AsmString.size()) {
1261 size_t DollarStart = AsmString.find('$', Pos);
1262 if (DollarStart == std::string::npos)
1263 DollarStart = AsmString.size();
1264 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1265 if (DollarEnd == std::string::npos)
1266 DollarEnd = AsmString.size();
1267 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1268 Pos = DollarEnd;
1269 size_t NumDollars = DollarEnd - DollarStart;
1270 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1271 // We have an operand reference.
1272 size_t DigitStart = Pos;
1273 if (AsmString[DigitStart] == '{') {
1274 OS << '{';
1275 ++DigitStart;
1276 }
1277 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1278 if (DigitEnd == std::string::npos)
1279 DigitEnd = AsmString.size();
1280 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1281 unsigned OperandIndex;
1282 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1283 if (OperandIndex >= FirstIn)
1284 OperandIndex += NumNewOuts;
1285 OS << OperandIndex;
1286 } else {
1287 OS << OperandStr;
1288 }
1289 Pos = DigitEnd;
1290 }
1291 }
1292 AsmString = std::move(OS.str());
1293}
1294
1295/// Add output constraints for EAX:EDX because they are return registers.
1296void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1297 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1298 std::vector<llvm::Type *> &ResultRegTypes,
1299 std::vector<llvm::Type *> &ResultTruncRegTypes,
1300 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1301 unsigned NumOutputs) const {
1302 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1303
1304 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1305 // larger.
1306 if (!Constraints.empty())
1307 Constraints += ',';
1308 if (RetWidth <= 32) {
1309 Constraints += "={eax}";
1310 ResultRegTypes.push_back(CGF.Int32Ty);
1311 } else {
1312 // Use the 'A' constraint for EAX:EDX.
1313 Constraints += "=A";
1314 ResultRegTypes.push_back(CGF.Int64Ty);
1315 }
1316
1317 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1318 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1319 ResultTruncRegTypes.push_back(CoerceTy);
1320
1321 // Coerce the integer by bitcasting the return slot pointer.
1322 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
1323 CoerceTy->getPointerTo()));
1324 ResultRegDests.push_back(ReturnSlot);
1325
1326 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1327}
1328
1329/// shouldReturnTypeInRegister - Determine if the given type should be
1330/// returned in a register (for the Darwin and MCU ABI).
1331bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1332 ASTContext &Context) const {
1333 uint64_t Size = Context.getTypeSize(Ty);
1334
1335 // For i386, type must be register sized.
1336 // For the MCU ABI, it only needs to be <= 8-byte
1337 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1338 return false;
1339
1340 if (Ty->isVectorType()) {
1341 // 64- and 128- bit vectors inside structures are not returned in
1342 // registers.
1343 if (Size == 64 || Size == 128)
1344 return false;
1345
1346 return true;
1347 }
1348
1349 // If this is a builtin, pointer, enum, complex type, member pointer, or
1350 // member function pointer it is ok.
1351 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1352 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1353 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1354 return true;
1355
1356 // Arrays are treated like records.
1357 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1358 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1359
1360 // Otherwise, it must be a record type.
1361 const RecordType *RT = Ty->getAs<RecordType>();
1362 if (!RT) return false;
1363
1364 // FIXME: Traverse bases here too.
1365
1366 // Structure types are passed in register if all fields would be
1367 // passed in a register.
1368 for (const auto *FD : RT->getDecl()->fields()) {
1369 // Empty fields are ignored.
1370 if (isEmptyField(Context, FD, true))
1371 continue;
1372
1373 // Check fields recursively.
1374 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1375 return false;
1376 }
1377 return true;
1378}
1379
1380static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1381 // Treat complex types as the element type.
1382 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1383 Ty = CTy->getElementType();
1384
1385 // Check for a type which we know has a simple scalar argument-passing
1386 // convention without any padding. (We're specifically looking for 32
1387 // and 64-bit integer and integer-equivalents, float, and double.)
1388 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1389 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1390 return false;
1391
1392 uint64_t Size = Context.getTypeSize(Ty);
1393 return Size == 32 || Size == 64;
1394}
1395
1396static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1397 uint64_t &Size) {
1398 for (const auto *FD : RD->fields()) {
1399 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1400 // argument is smaller than 32-bits, expanding the struct will create
1401 // alignment padding.
1402 if (!is32Or64BitBasicType(FD->getType(), Context))
1403 return false;
1404
1405 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1406 // how to expand them yet, and the predicate for telling if a bitfield still
1407 // counts as "basic" is more complicated than what we were doing previously.
1408 if (FD->isBitField())
1409 return false;
1410
1411 Size += Context.getTypeSize(FD->getType());
1412 }
1413 return true;
1414}
1415
1416static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1417 uint64_t &Size) {
1418 // Don't do this if there are any non-empty bases.
1419 for (const CXXBaseSpecifier &Base : RD->bases()) {
1420 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1421 Size))
1422 return false;
1423 }
1424 if (!addFieldSizes(Context, RD, Size))
1425 return false;
1426 return true;
1427}
1428
1429/// Test whether an argument type which is to be passed indirectly (on the
1430/// stack) would have the equivalent layout if it was expanded into separate
1431/// arguments. If so, we prefer to do the latter to avoid inhibiting
1432/// optimizations.
1433bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1434 // We can only expand structure types.
1435 const RecordType *RT = Ty->getAs<RecordType>();
1436 if (!RT)
1437 return false;
1438 const RecordDecl *RD = RT->getDecl();
1439 uint64_t Size = 0;
1440 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1441 if (!IsWin32StructABI) {
1442 // On non-Windows, we have to conservatively match our old bitcode
1443 // prototypes in order to be ABI-compatible at the bitcode level.
1444 if (!CXXRD->isCLike())
1445 return false;
1446 } else {
1447 // Don't do this for dynamic classes.
1448 if (CXXRD->isDynamicClass())
1449 return false;
1450 }
1451 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1452 return false;
1453 } else {
1454 if (!addFieldSizes(getContext(), RD, Size))
1455 return false;
1456 }
1457
1458 // We can do this if there was no alignment padding.
1459 return Size == getContext().getTypeSize(Ty);
1460}
1461
1462ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1463 // If the return value is indirect, then the hidden argument is consuming one
1464 // integer register.
1465 if (State.FreeRegs) {
1466 --State.FreeRegs;
1467 if (!IsMCUABI)
1468 return getNaturalAlignIndirectInReg(RetTy);
1469 }
1470 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1471}
1472
1473ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1474 CCState &State) const {
1475 if (RetTy->isVoidType())
1476 return ABIArgInfo::getIgnore();
1477
1478 const Type *Base = nullptr;
1479 uint64_t NumElts = 0;
1480 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1481 State.CC == llvm::CallingConv::X86_RegCall) &&
1482 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1483 // The LLVM struct type for such an aggregate should lower properly.
1484 return ABIArgInfo::getDirect();
1485 }
1486
1487 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1488 // On Darwin, some vectors are returned in registers.
1489 if (IsDarwinVectorABI) {
1490 uint64_t Size = getContext().getTypeSize(RetTy);
1491
1492 // 128-bit vectors are a special case; they are returned in
1493 // registers and we need to make sure to pick a type the LLVM
1494 // backend will like.
1495 if (Size == 128)
1496 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
1497 llvm::Type::getInt64Ty(getVMContext()), 2));
1498
1499 // Always return in register if it fits in a general purpose
1500 // register, or if it is 64 bits and has a single element.
1501 if ((Size == 8 || Size == 16 || Size == 32) ||
1502 (Size == 64 && VT->getNumElements() == 1))
1503 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1504 Size));
1505
1506 return getIndirectReturnResult(RetTy, State);
1507 }
1508
1509 return ABIArgInfo::getDirect();
1510 }
1511
1512 if (isAggregateTypeForABI(RetTy)) {
1513 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1514 // Structures with flexible arrays are always indirect.
1515 if (RT->getDecl()->hasFlexibleArrayMember())
1516 return getIndirectReturnResult(RetTy, State);
1517 }
1518
1519 // If specified, structs and unions are always indirect.
1520 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1521 return getIndirectReturnResult(RetTy, State);
1522
1523 // Ignore empty structs/unions.
1524 if (isEmptyRecord(getContext(), RetTy, true))
1525 return ABIArgInfo::getIgnore();
1526
1527 // Small structures which are register sized are generally returned
1528 // in a register.
1529 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1530 uint64_t Size = getContext().getTypeSize(RetTy);
1531
1532 // As a special-case, if the struct is a "single-element" struct, and
1533 // the field is of type "float" or "double", return it in a
1534 // floating-point register. (MSVC does not apply this special case.)
1535 // We apply a similar transformation for pointer types to improve the
1536 // quality of the generated IR.
1537 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1538 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1539 || SeltTy->hasPointerRepresentation())
1540 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1541
1542 // FIXME: We should be able to narrow this integer in cases with dead
1543 // padding.
1544 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1545 }
1546
1547 return getIndirectReturnResult(RetTy, State);
1548 }
1549
1550 // Treat an enum type as its underlying type.
1551 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1552 RetTy = EnumTy->getDecl()->getIntegerType();
1553
1554 if (const auto *EIT = RetTy->getAs<ExtIntType>())
1555 if (EIT->getNumBits() > 64)
1556 return getIndirectReturnResult(RetTy, State);
1557
1558 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1559 : ABIArgInfo::getDirect());
1560}
1561
1562static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
1563 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1564}
1565
1566static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
1567 const RecordType *RT = Ty->getAs<RecordType>();
1568 if (!RT)
1569 return 0;
1570 const RecordDecl *RD = RT->getDecl();
1571
1572 // If this is a C++ record, check the bases first.
1573 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1574 for (const auto &I : CXXRD->bases())
1575 if (!isRecordWithSIMDVectorType(Context, I.getType()))
1576 return false;
1577
1578 for (const auto *i : RD->fields()) {
1579 QualType FT = i->getType();
1580
1581 if (isSIMDVectorType(Context, FT))
1582 return true;
1583
1584 if (isRecordWithSIMDVectorType(Context, FT))
1585 return true;
1586 }
1587
1588 return false;
1589}
1590
1591unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1592 unsigned Align) const {
1593 // Otherwise, if the alignment is less than or equal to the minimum ABI
1594 // alignment, just use the default; the backend will handle this.
1595 if (Align <= MinABIStackAlignInBytes)
1596 return 0; // Use default alignment.
1597
1598 if (IsLinuxABI) {
1599 // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
1600 // want to spend any effort dealing with the ramifications of ABI breaks.
1601 //
1602 // If the vector type is __m128/__m256/__m512, return the default alignment.
1603 if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
1604 return Align;
1605 }
1606 // On non-Darwin, the stack type alignment is always 4.
1607 if (!IsDarwinVectorABI) {
1608 // Set explicit alignment, since we may need to realign the top.
1609 return MinABIStackAlignInBytes;
1610 }
1611
1612 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1613 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
1614 isRecordWithSIMDVectorType(getContext(), Ty)))
1615 return 16;
1616
1617 return MinABIStackAlignInBytes;
1618}
1619
1620ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1621 CCState &State) const {
1622 if (!ByVal) {
1623 if (State.FreeRegs) {
1624 --State.FreeRegs; // Non-byval indirects just use one pointer.
1625 if (!IsMCUABI)
1626 return getNaturalAlignIndirectInReg(Ty);
1627 }
1628 return getNaturalAlignIndirect(Ty, false);
1629 }
1630
1631 // Compute the byval alignment.
1632 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1633 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1634 if (StackAlign == 0)
1635 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1636
1637 // If the stack alignment is less than the type alignment, realign the
1638 // argument.
1639 bool Realign = TypeAlign > StackAlign;
1640 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1641 /*ByVal=*/true, Realign);
1642}
1643
1644X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1645 const Type *T = isSingleElementStruct(Ty, getContext());
1646 if (!T)
1647 T = Ty.getTypePtr();
1648
1649 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1650 BuiltinType::Kind K = BT->getKind();
1651 if (K == BuiltinType::Float || K == BuiltinType::Double)
1652 return Float;
1653 }
1654 return Integer;
1655}
1656
1657bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1658 if (!IsSoftFloatABI) {
1659 Class C = classify(Ty);
1660 if (C == Float)
1661 return false;
1662 }
1663
1664 unsigned Size = getContext().getTypeSize(Ty);
1665 unsigned SizeInRegs = (Size + 31) / 32;
1666
1667 if (SizeInRegs == 0)
1668 return false;
1669
1670 if (!IsMCUABI) {
1671 if (SizeInRegs > State.FreeRegs) {
1672 State.FreeRegs = 0;
1673 return false;
1674 }
1675 } else {
1676 // The MCU psABI allows passing parameters in-reg even if there are
1677 // earlier parameters that are passed on the stack. Also,
1678 // it does not allow passing >8-byte structs in-register,
1679 // even if there are 3 free registers available.
1680 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1681 return false;
1682 }
1683
1684 State.FreeRegs -= SizeInRegs;
1685 return true;
1686}
1687
1688bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1689 bool &InReg,
1690 bool &NeedsPadding) const {
1691 // On Windows, aggregates other than HFAs are never passed in registers, and
1692 // they do not consume register slots. Homogenous floating-point aggregates
1693 // (HFAs) have already been dealt with at this point.
1694 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1695 return false;
1696
1697 NeedsPadding = false;
1698 InReg = !IsMCUABI;
1699
1700 if (!updateFreeRegs(Ty, State))
1701 return false;
1702
1703 if (IsMCUABI)
1704 return true;
1705
1706 if (State.CC == llvm::CallingConv::X86_FastCall ||
1707 State.CC == llvm::CallingConv::X86_VectorCall ||
1708 State.CC == llvm::CallingConv::X86_RegCall) {
1709 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1710 NeedsPadding = true;
1711
1712 return false;
1713 }
1714
1715 return true;
1716}
1717
1718bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1719 if (!updateFreeRegs(Ty, State))
1720 return false;
1721
1722 if (IsMCUABI)
1723 return false;
1724
1725 if (State.CC == llvm::CallingConv::X86_FastCall ||
1726 State.CC == llvm::CallingConv::X86_VectorCall ||
1727 State.CC == llvm::CallingConv::X86_RegCall) {
1728 if (getContext().getTypeSize(Ty) > 32)
1729 return false;
1730
1731 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1732 Ty->isReferenceType());
1733 }
1734
1735 return true;
1736}
1737
1738void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
1739 // Vectorcall x86 works subtly different than in x64, so the format is
1740 // a bit different than the x64 version. First, all vector types (not HVAs)
1741 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
1742 // This differs from the x64 implementation, where the first 6 by INDEX get
1743 // registers.
1744 // In the second pass over the arguments, HVAs are passed in the remaining
1745 // vector registers if possible, or indirectly by address. The address will be
1746 // passed in ECX/EDX if available. Any other arguments are passed according to
1747 // the usual fastcall rules.
1748 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1749 for (int I = 0, E = Args.size(); I < E; ++I) {
1750 const Type *Base = nullptr;
1751 uint64_t NumElts = 0;
1752 const QualType &Ty = Args[I].type;
1753 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1754 isHomogeneousAggregate(Ty, Base, NumElts)) {
1755 if (State.FreeSSERegs >= NumElts) {
1756 State.FreeSSERegs -= NumElts;
1757 Args[I].info = ABIArgInfo::getDirectInReg();
1758 State.IsPreassigned.set(I);
1759 }
1760 }
1761 }
1762}
1763
1764ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1765 CCState &State) const {
1766 // FIXME: Set alignment on indirect arguments.
1767 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1768 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1769 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1770
1771 Ty = useFirstFieldIfTransparentUnion(Ty);
1772 TypeInfo TI = getContext().getTypeInfo(Ty);
1773
1774 // Check with the C++ ABI first.
1775 const RecordType *RT = Ty->getAs<RecordType>();
1776 if (RT) {
1777 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1778 if (RAA == CGCXXABI::RAA_Indirect) {
1779 return getIndirectResult(Ty, false, State);
1780 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1781 // The field index doesn't matter, we'll fix it up later.
1782 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1783 }
1784 }
1785
1786 // Regcall uses the concept of a homogenous vector aggregate, similar
1787 // to other targets.
1788 const Type *Base = nullptr;
1789 uint64_t NumElts = 0;
1790 if ((IsRegCall || IsVectorCall) &&
1791 isHomogeneousAggregate(Ty, Base, NumElts)) {
1792 if (State.FreeSSERegs >= NumElts) {
1793 State.FreeSSERegs -= NumElts;
1794
1795 // Vectorcall passes HVAs directly and does not flatten them, but regcall
1796 // does.
1797 if (IsVectorCall)
1798 return getDirectX86Hva();
1799
1800 if (Ty->isBuiltinType() || Ty->isVectorType())
1801 return ABIArgInfo::getDirect();
1802 return ABIArgInfo::getExpand();
1803 }
1804 return getIndirectResult(Ty, /*ByVal=*/false, State);
1805 }
1806
1807 if (isAggregateTypeForABI(Ty)) {
1808 // Structures with flexible arrays are always indirect.
1809 // FIXME: This should not be byval!
1810 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1811 return getIndirectResult(Ty, true, State);
1812
1813 // Ignore empty structs/unions on non-Windows.
1814 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1815 return ABIArgInfo::getIgnore();
1816
1817 llvm::LLVMContext &LLVMContext = getVMContext();
1818 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1819 bool NeedsPadding = false;
1820 bool InReg;
1821 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1822 unsigned SizeInRegs = (TI.Width + 31) / 32;
1823 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1824 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1825 if (InReg)
1826 return ABIArgInfo::getDirectInReg(Result);
1827 else
1828 return ABIArgInfo::getDirect(Result);
1829 }
1830 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1831
1832 // Pass over-aligned aggregates on Windows indirectly. This behavior was
1833 // added in MSVC 2015.
1834 if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
1835 return getIndirectResult(Ty, /*ByVal=*/false, State);
1836
1837 // Expand small (<= 128-bit) record types when we know that the stack layout
1838 // of those arguments will match the struct. This is important because the
1839 // LLVM backend isn't smart enough to remove byval, which inhibits many
1840 // optimizations.
1841 // Don't do this for the MCU if there are still free integer registers
1842 // (see X86_64 ABI for full explanation).
1843 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
1844 canExpandIndirectArgument(Ty))
1845 return ABIArgInfo::getExpandWithPadding(
1846 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1847
1848 return getIndirectResult(Ty, true, State);
1849 }
1850
1851 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1852 // On Windows, vectors are passed directly if registers are available, or
1853 // indirectly if not. This avoids the need to align argument memory. Pass
1854 // user-defined vector types larger than 512 bits indirectly for simplicity.
1855 if (IsWin32StructABI) {
1856 if (TI.Width <= 512 && State.FreeSSERegs > 0) {
1857 --State.FreeSSERegs;
1858 return ABIArgInfo::getDirectInReg();
1859 }
1860 return getIndirectResult(Ty, /*ByVal=*/false, State);
1861 }
1862
1863 // On Darwin, some vectors are passed in memory, we handle this by passing
1864 // it as an i8/i16/i32/i64.
1865 if (IsDarwinVectorABI) {
1866 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
1867 (TI.Width == 64 && VT->getNumElements() == 1))
1868 return ABIArgInfo::getDirect(
1869 llvm::IntegerType::get(getVMContext(), TI.Width));
1870 }
1871
1872 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1873 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1874
1875 return ABIArgInfo::getDirect();
1876 }
1877
1878
1879 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1880 Ty = EnumTy->getDecl()->getIntegerType();
1881
1882 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1883
1884 if (isPromotableIntegerTypeForABI(Ty)) {
1885 if (InReg)
1886 return ABIArgInfo::getExtendInReg(Ty);
1887 return ABIArgInfo::getExtend(Ty);
1888 }
1889
1890 if (const auto * EIT = Ty->getAs<ExtIntType>()) {
1891 if (EIT->getNumBits() <= 64) {
1892 if (InReg)
1893 return ABIArgInfo::getDirectInReg();
1894 return ABIArgInfo::getDirect();
1895 }
1896 return getIndirectResult(Ty, /*ByVal=*/false, State);
1897 }
1898
1899 if (InReg)
1900 return ABIArgInfo::getDirectInReg();
1901 return ABIArgInfo::getDirect();
1902}
1903
1904void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1905 CCState State(FI);
1906 if (IsMCUABI)
1907 State.FreeRegs = 3;
1908 else if (State.CC == llvm::CallingConv::X86_FastCall) {
1909 State.FreeRegs = 2;
1910 State.FreeSSERegs = 3;
1911 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1912 State.FreeRegs = 2;
1913 State.FreeSSERegs = 6;
1914 } else if (FI.getHasRegParm())
1915 State.FreeRegs = FI.getRegParm();
1916 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1917 State.FreeRegs = 5;
1918 State.FreeSSERegs = 8;
1919 } else if (IsWin32StructABI) {
1920 // Since MSVC 2015, the first three SSE vectors have been passed in
1921 // registers. The rest are passed indirectly.
1922 State.FreeRegs = DefaultNumRegisterParameters;
1923 State.FreeSSERegs = 3;
1924 } else
1925 State.FreeRegs = DefaultNumRegisterParameters;
1926
1927 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1928 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1929 } else if (FI.getReturnInfo().isIndirect()) {
1930 // The C++ ABI is not aware of register usage, so we have to check if the
1931 // return value was sret and put it in a register ourselves if appropriate.
1932 if (State.FreeRegs) {
1933 --State.FreeRegs; // The sret parameter consumes a register.
1934 if (!IsMCUABI)
1935 FI.getReturnInfo().setInReg(true);
1936 }
1937 }
1938
1939 // The chain argument effectively gives us another free register.
1940 if (FI.isChainCall())
1941 ++State.FreeRegs;
1942
1943 // For vectorcall, do a first pass over the arguments, assigning FP and vector
1944 // arguments to XMM registers as available.
1945 if (State.CC == llvm::CallingConv::X86_VectorCall)
1946 runVectorCallFirstPass(FI, State);
1947
1948 bool UsedInAlloca = false;
1949 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1950 for (int I = 0, E = Args.size(); I < E; ++I) {
1951 // Skip arguments that have already been assigned.
1952 if (State.IsPreassigned.test(I))
1953 continue;
1954
1955 Args[I].info = classifyArgumentType(Args[I].type, State);
1956 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
1957 }
1958
1959 // If we needed to use inalloca for any argument, do a second pass and rewrite
1960 // all the memory arguments to use inalloca.
1961 if (UsedInAlloca)
1962 rewriteWithInAlloca(FI);
1963}
1964
1965void
1966X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1967 CharUnits &StackOffset, ABIArgInfo &Info,
1968 QualType Type) const {
1969 // Arguments are always 4-byte-aligned.
1970 CharUnits WordSize = CharUnits::fromQuantity(4);
1971 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct")((void)0);
1972
1973 // sret pointers and indirect things will require an extra pointer
1974 // indirection, unless they are byval. Most things are byval, and will not
1975 // require this indirection.
1976 bool IsIndirect = false;
1977 if (Info.isIndirect() && !Info.getIndirectByVal())
1978 IsIndirect = true;
1979 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
1980 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
1981 if (IsIndirect)
1982 LLTy = LLTy->getPointerTo(0);
1983 FrameFields.push_back(LLTy);
1984 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
1985
1986 // Insert padding bytes to respect alignment.
1987 CharUnits FieldEnd = StackOffset;
1988 StackOffset = FieldEnd.alignTo(WordSize);
1989 if (StackOffset != FieldEnd) {
1990 CharUnits NumBytes = StackOffset - FieldEnd;
1991 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1992 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1993 FrameFields.push_back(Ty);
1994 }
1995}
1996
1997static bool isArgInAlloca(const ABIArgInfo &Info) {
1998 // Leave ignored and inreg arguments alone.
1999 switch (Info.getKind()) {
2000 case ABIArgInfo::InAlloca:
2001 return true;
2002 case ABIArgInfo::Ignore:
2003 case ABIArgInfo::IndirectAliased:
2004 return false;
2005 case ABIArgInfo::Indirect:
2006 case ABIArgInfo::Direct:
2007 case ABIArgInfo::Extend:
2008 return !Info.getInReg();
2009 case ABIArgInfo::Expand:
2010 case ABIArgInfo::CoerceAndExpand:
2011 // These are aggregate types which are never passed in registers when
2012 // inalloca is involved.
2013 return true;
2014 }
2015 llvm_unreachable("invalid enum")__builtin_unreachable();
2016}
2017
2018void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
2019 assert(IsWin32StructABI && "inalloca only supported on win32")((void)0);
2020
2021 // Build a packed struct type for all of the arguments in memory.
2022 SmallVector<llvm::Type *, 6> FrameFields;
2023
2024 // The stack alignment is always 4.
2025 CharUnits StackAlign = CharUnits::fromQuantity(4);
2026
2027 CharUnits StackOffset;
2028 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
2029
2030 // Put 'this' into the struct before 'sret', if necessary.
2031 bool IsThisCall =
2032 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
2033 ABIArgInfo &Ret = FI.getReturnInfo();
2034 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
2035 isArgInAlloca(I->info)) {
2036 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2037 ++I;
2038 }
2039
2040 // Put the sret parameter into the inalloca struct if it's in memory.
2041 if (Ret.isIndirect() && !Ret.getInReg()) {
2042 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
2043 // On Windows, the hidden sret parameter is always returned in eax.
2044 Ret.setInAllocaSRet(IsWin32StructABI);
2045 }
2046
2047 // Skip the 'this' parameter in ecx.
2048 if (IsThisCall)
2049 ++I;
2050
2051 // Put arguments passed in memory into the struct.
2052 for (; I != E; ++I) {
2053 if (isArgInAlloca(I->info))
2054 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2055 }
2056
2057 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2058 /*isPacked=*/true),
2059 StackAlign);
2060}
2061
2062Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
2063 Address VAListAddr, QualType Ty) const {
2064
2065 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2066
2067 // x86-32 changes the alignment of certain arguments on the stack.
2068 //
2069 // Just messing with TypeInfo like this works because we never pass
2070 // anything indirectly.
2071 TypeInfo.Align = CharUnits::fromQuantity(
2072 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
2073
2074 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
2075 TypeInfo, CharUnits::fromQuantity(4),
2076 /*AllowHigherAlign*/ true);
2077}
2078
2079bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2080 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
2081 assert(Triple.getArch() == llvm::Triple::x86)((void)0);
2082
2083 switch (Opts.getStructReturnConvention()) {
2084 case CodeGenOptions::SRCK_Default:
2085 break;
2086 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
2087 return false;
2088 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
2089 return true;
2090 }
2091
2092 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2093 return true;
2094
2095 switch (Triple.getOS()) {
2096 case llvm::Triple::DragonFly:
2097 case llvm::Triple::FreeBSD:
2098 case llvm::Triple::OpenBSD:
2099 case llvm::Triple::Win32:
2100 return true;
2101 default:
2102 return false;
2103 }
2104}
2105
2106static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
2107 CodeGen::CodeGenModule &CGM) {
2108 if (!FD->hasAttr<AnyX86InterruptAttr>())
2109 return;
2110
2111 llvm::Function *Fn = cast<llvm::Function>(GV);
2112 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2113 if (FD->getNumParams() == 0)
2114 return;
2115
2116 auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
2117 llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
2118 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
2119 Fn->getContext(), ByValTy);
2120 Fn->addParamAttr(0, NewAttr);
2121}
2122
2123void X86_32TargetCodeGenInfo::setTargetAttributes(
2124 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2125 if (GV->isDeclaration())
2126 return;
2127 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2128 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2129 llvm::Function *Fn = cast<llvm::Function>(GV);
2130 Fn->addFnAttr("stackrealign");
2131 }
2132
2133 addX86InterruptAttrs(FD, GV, CGM);
2134 }
2135}
2136
2137bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2138 CodeGen::CodeGenFunction &CGF,
2139 llvm::Value *Address) const {
2140 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2141
2142 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2143
2144 // 0-7 are the eight integer registers; the order is different
2145 // on Darwin (for EH), but the range is the same.
2146 // 8 is %eip.
2147 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2148
2149 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2150 // 12-16 are st(0..4). Not sure why we stop at 4.
2151 // These have size 16, which is sizeof(long double) on
2152 // platforms with 8-byte alignment for that type.
2153 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2154 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2155
2156 } else {
2157 // 9 is %eflags, which doesn't get a size on Darwin for some
2158 // reason.
2159 Builder.CreateAlignedStore(
2160 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2161 CharUnits::One());
2162
2163 // 11-16 are st(0..5). Not sure why we stop at 5.
2164 // These have size 12, which is sizeof(long double) on
2165 // platforms with 4-byte alignment for that type.
2166 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2167 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2168 }
2169
2170 return false;
2171}
2172
2173//===----------------------------------------------------------------------===//
2174// X86-64 ABI Implementation
2175//===----------------------------------------------------------------------===//
2176
2177
2178namespace {
2179/// The AVX ABI level for X86 targets.
2180enum class X86AVXABILevel {
2181 None,
2182 AVX,
2183 AVX512
2184};
2185
2186/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2187static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2188 switch (AVXLevel) {
2189 case X86AVXABILevel::AVX512:
2190 return 512;
2191 case X86AVXABILevel::AVX:
2192 return 256;
2193 case X86AVXABILevel::None:
2194 return 128;
2195 }
2196 llvm_unreachable("Unknown AVXLevel")__builtin_unreachable();
2197}
2198
2199/// X86_64ABIInfo - The X86_64 ABI information.
2200class X86_64ABIInfo : public SwiftABIInfo {
2201 enum Class {
2202 Integer = 0,
2203 SSE,
2204 SSEUp,
2205 X87,
2206 X87Up,
2207 ComplexX87,
2208 NoClass,
2209 Memory
2210 };
2211
2212 /// merge - Implement the X86_64 ABI merging algorithm.
2213 ///
2214 /// Merge an accumulating classification \arg Accum with a field
2215 /// classification \arg Field.
2216 ///
2217 /// \param Accum - The accumulating classification. This should
2218 /// always be either NoClass or the result of a previous merge
2219 /// call. In addition, this should never be Memory (the caller
2220 /// should just return Memory for the aggregate).
2221 static Class merge(Class Accum, Class Field);
2222
2223 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2224 ///
2225 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2226 /// final MEMORY or SSE classes when necessary.
2227 ///
2228 /// \param AggregateSize - The size of the current aggregate in
2229 /// the classification process.
2230 ///
2231 /// \param Lo - The classification for the parts of the type
2232 /// residing in the low word of the containing object.
2233 ///
2234 /// \param Hi - The classification for the parts of the type
2235 /// residing in the higher words of the containing object.
2236 ///
2237 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2238
2239 /// classify - Determine the x86_64 register classes in which the
2240 /// given type T should be passed.
2241 ///
2242 /// \param Lo - The classification for the parts of the type
2243 /// residing in the low word of the containing object.
2244 ///
2245 /// \param Hi - The classification for the parts of the type
2246 /// residing in the high word of the containing object.
2247 ///
2248 /// \param OffsetBase - The bit offset of this type in the
2249 /// containing object. Some parameters are classified different
2250 /// depending on whether they straddle an eightbyte boundary.
2251 ///
2252 /// \param isNamedArg - Whether the argument in question is a "named"
2253 /// argument, as used in AMD64-ABI 3.5.7.
2254 ///
2255 /// If a word is unused its result will be NoClass; if a type should
2256 /// be passed in Memory then at least the classification of \arg Lo
2257 /// will be Memory.
2258 ///
2259 /// The \arg Lo class will be NoClass iff the argument is ignored.
2260 ///
2261 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2262 /// also be ComplexX87.
2263 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2264 bool isNamedArg) const;
2265
2266 llvm::Type *GetByteVectorType(QualType Ty) const;
2267 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2268 unsigned IROffset, QualType SourceTy,
2269 unsigned SourceOffset) const;
2270 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2271 unsigned IROffset, QualType SourceTy,
2272 unsigned SourceOffset) const;
2273
2274 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2275 /// such that the argument will be returned in memory.
2276 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2277
2278 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2279 /// such that the argument will be passed in memory.
2280 ///
2281 /// \param freeIntRegs - The number of free integer registers remaining
2282 /// available.
2283 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2284
2285 ABIArgInfo classifyReturnType(QualType RetTy) const;
2286
2287 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2288 unsigned &neededInt, unsigned &neededSSE,
2289 bool isNamedArg) const;
2290
2291 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2292 unsigned &NeededSSE) const;
2293
2294 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2295 unsigned &NeededSSE) const;
2296
2297 bool IsIllegalVectorType(QualType Ty) const;
2298
2299 /// The 0.98 ABI revision clarified a lot of ambiguities,
2300 /// unfortunately in ways that were not always consistent with
2301 /// certain previous compilers. In particular, platforms which
2302 /// required strict binary compatibility with older versions of GCC
2303 /// may need to exempt themselves.
2304 bool honorsRevision0_98() const {
2305 return !getTarget().getTriple().isOSDarwin();
2306 }
2307
2308 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2309 /// classify it as INTEGER (for compatibility with older clang compilers).
2310 bool classifyIntegerMMXAsSSE() const {
2311 // Clang <= 3.8 did not do this.
2312 if (getContext().getLangOpts().getClangABICompat() <=
2313 LangOptions::ClangABI::Ver3_8)
2314 return false;
2315
2316 const llvm::Triple &Triple = getTarget().getTriple();
2317 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2318 return false;
2319 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2320 return false;
2321 return true;
2322 }
2323
2324 // GCC classifies vectors of __int128 as memory.
2325 bool passInt128VectorsInMem() const {
2326 // Clang <= 9.0 did not do this.
2327 if (getContext().getLangOpts().getClangABICompat() <=
2328 LangOptions::ClangABI::Ver9)
2329 return false;
2330
2331 const llvm::Triple &T = getTarget().getTriple();
2332 return T.isOSLinux() || T.isOSNetBSD();
2333 }
2334
2335 X86AVXABILevel AVXLevel;
2336 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2337 // 64-bit hardware.
2338 bool Has64BitPointers;
2339
2340public:
2341 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2342 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2343 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2344 }
2345
2346 bool isPassedUsingAVXType(QualType type) const {
2347 unsigned neededInt, neededSSE;
2348 // The freeIntRegs argument doesn't matter here.
2349 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2350 /*isNamedArg*/true);
2351 if (info.isDirect()) {
2352 llvm::Type *ty = info.getCoerceToType();
2353 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2354 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
2355 }
2356 return false;
2357 }
2358
2359 void computeInfo(CGFunctionInfo &FI) const override;
2360
2361 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2362 QualType Ty) const override;
2363 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2364 QualType Ty) const override;
2365
2366 bool has64BitPointers() const {
2367 return Has64BitPointers;
2368 }
2369
2370 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2371 bool asReturnValue) const override {
2372 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2373 }
2374 bool isSwiftErrorInRegister() const override {
2375 return true;
2376 }
2377};
2378
2379/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2380class WinX86_64ABIInfo : public SwiftABIInfo {
2381public:
2382 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2383 : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2384 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2385
2386 void computeInfo(CGFunctionInfo &FI) const override;
2387
2388 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2389 QualType Ty) const override;
2390
2391 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2392 // FIXME: Assumes vectorcall is in use.
2393 return isX86VectorTypeForVectorCall(getContext(), Ty);
2394 }
2395
2396 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2397 uint64_t NumMembers) const override {
2398 // FIXME: Assumes vectorcall is in use.
2399 return isX86VectorCallAggregateSmallEnough(NumMembers);
2400 }
2401
2402 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2403 bool asReturnValue) const override {
2404 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2405 }
2406
2407 bool isSwiftErrorInRegister() const override {
2408 return true;
2409 }
2410
2411private:
2412 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2413 bool IsVectorCall, bool IsRegCall) const;
2414 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
2415 const ABIArgInfo &current) const;
2416
2417 X86AVXABILevel AVXLevel;
2418
2419 bool IsMingw64;
2420};
2421
2422class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2423public:
2424 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2425 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
2426
2427 const X86_64ABIInfo &getABIInfo() const {
2428 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2429 }
2430
2431 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2432 /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
2433 bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
2434
2435 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2436 return 7;
2437 }
2438
2439 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2440 llvm::Value *Address) const override {
2441 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2442
2443 // 0-15 are the 16 integer registers.
2444 // 16 is %rip.
2445 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2446 return false;
2447 }
2448
2449 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2450 StringRef Constraint,
2451 llvm::Type* Ty) const override {
2452 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2453 }
2454
2455 bool isNoProtoCallVariadic(const CallArgList &args,
2456 const FunctionNoProtoType *fnType) const override {
2457 // The default CC on x86-64 sets %al to the number of SSA
2458 // registers used, and GCC sets this when calling an unprototyped
2459 // function, so we override the default behavior. However, don't do
2460 // that when AVX types are involved: the ABI explicitly states it is
2461 // undefined, and it doesn't work in practice because of how the ABI
2462 // defines varargs anyway.
2463 if (fnType->getCallConv() == CC_C) {
2464 bool HasAVXType = false;
2465 for (CallArgList::const_iterator
2466 it = args.begin(), ie = args.end(); it != ie; ++it) {
2467 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2468 HasAVXType = true;
2469 break;
2470 }
2471 }
2472
2473 if (!HasAVXType)
2474 return true;
2475 }
2476
2477 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2478 }
2479
2480 llvm::Constant *
2481 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2482 unsigned Sig = (0xeb << 0) | // jmp rel8
2483 (0x06 << 8) | // .+0x08
2484 ('v' << 16) |
2485 ('2' << 24);
2486 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2487 }
2488
2489 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2490 CodeGen::CodeGenModule &CGM) const override {
2491 if (GV->isDeclaration())
2492 return;
2493 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2494 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2495 llvm::Function *Fn = cast<llvm::Function>(GV);
2496 Fn->addFnAttr("stackrealign");
2497 }
2498
2499 addX86InterruptAttrs(FD, GV, CGM);
2500 }
2501 }
2502
2503 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
2504 const FunctionDecl *Caller,
2505 const FunctionDecl *Callee,
2506 const CallArgList &Args) const override;
2507};
2508
2509static void initFeatureMaps(const ASTContext &Ctx,
2510 llvm::StringMap<bool> &CallerMap,
2511 const FunctionDecl *Caller,
2512 llvm::StringMap<bool> &CalleeMap,
2513 const FunctionDecl *Callee) {
2514 if (CalleeMap.empty() && CallerMap.empty()) {
2515 // The caller is potentially nullptr in the case where the call isn't in a
2516 // function. In this case, the getFunctionFeatureMap ensures we just get
2517 // the TU level setting (since it cannot be modified by 'target'..
2518 Ctx.getFunctionFeatureMap(CallerMap, Caller);
2519 Ctx.getFunctionFeatureMap(CalleeMap, Callee);
2520 }
2521}
2522
2523static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
2524 SourceLocation CallLoc,
2525 const llvm::StringMap<bool> &CallerMap,
2526 const llvm::StringMap<bool> &CalleeMap,
2527 QualType Ty, StringRef Feature,
2528 bool IsArgument) {
2529 bool CallerHasFeat = CallerMap.lookup(Feature);
2530 bool CalleeHasFeat = CalleeMap.lookup(Feature);
2531 if (!CallerHasFeat && !CalleeHasFeat)
2532 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
2533 << IsArgument << Ty << Feature;
2534
2535 // Mixing calling conventions here is very clearly an error.
2536 if (!CallerHasFeat || !CalleeHasFeat)
2537 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
2538 << IsArgument << Ty << Feature;
2539
2540 // Else, both caller and callee have the required feature, so there is no need
2541 // to diagnose.
2542 return false;
2543}
2544
2545static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
2546 SourceLocation CallLoc,
2547 const llvm::StringMap<bool> &CallerMap,
2548 const llvm::StringMap<bool> &CalleeMap, QualType Ty,
2549 bool IsArgument) {
2550 uint64_t Size = Ctx.getTypeSize(Ty);
2551 if (Size > 256)
2552 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
2553 "avx512f", IsArgument);
2554
2555 if (Size > 128)
2556 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
2557 IsArgument);
2558
2559 return false;
2560}
2561
2562void X86_64TargetCodeGenInfo::checkFunctionCallABI(
2563 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
2564 const FunctionDecl *Callee, const CallArgList &Args) const {
2565 llvm::StringMap<bool> CallerMap;
2566 llvm::StringMap<bool> CalleeMap;
2567 unsigned ArgIndex = 0;
2568
2569 // We need to loop through the actual call arguments rather than the the
2570 // function's parameters, in case this variadic.
2571 for (const CallArg &Arg : Args) {
2572 // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
2573 // additionally changes how vectors >256 in size are passed. Like GCC, we
2574 // warn when a function is called with an argument where this will change.
2575 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
2576 // the caller and callee features are mismatched.
2577 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
2578 // change its ABI with attribute-target after this call.
2579 if (Arg.getType()->isVectorType() &&
2580 CGM.getContext().getTypeSize(Arg.getType()) > 128) {
2581 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2582 QualType Ty = Arg.getType();
2583 // The CallArg seems to have desugared the type already, so for clearer
2584 // diagnostics, replace it with the type in the FunctionDecl if possible.
2585 if (ArgIndex < Callee->getNumParams())
2586 Ty = Callee->getParamDecl(ArgIndex)->getType();
2587
2588 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2589 CalleeMap, Ty, /*IsArgument*/ true))
2590 return;
2591 }
2592 ++ArgIndex;
2593 }
2594
2595 // Check return always, as we don't have a good way of knowing in codegen
2596 // whether this value is used, tail-called, etc.
2597 if (Callee->getReturnType()->isVectorType() &&
2598 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
2599 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2600 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2601 CalleeMap, Callee->getReturnType(),
2602 /*IsArgument*/ false);
2603 }
2604}
2605
2606static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2607 // If the argument does not end in .lib, automatically add the suffix.
2608 // If the argument contains a space, enclose it in quotes.
2609 // This matches the behavior of MSVC.
2610 bool Quote = (Lib.find(' ') != StringRef::npos);
2611 std::string ArgStr = Quote ? "\"" : "";
2612 ArgStr += Lib;
2613 if (!Lib.endswith_insensitive(".lib") && !Lib.endswith_insensitive(".a"))
2614 ArgStr += ".lib";
2615 ArgStr += Quote ? "\"" : "";
2616 return ArgStr;
2617}
2618
2619class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2620public:
2621 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2622 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2623 unsigned NumRegisterParameters)
2624 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2625 Win32StructABI, NumRegisterParameters, false) {}
2626
2627 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2628 CodeGen::CodeGenModule &CGM) const override;
2629
2630 void getDependentLibraryOption(llvm::StringRef Lib,
2631 llvm::SmallString<24> &Opt) const override {
2632 Opt = "/DEFAULTLIB:";
2633 Opt += qualifyWindowsLibrary(Lib);
2634 }
2635
2636 void getDetectMismatchOption(llvm::StringRef Name,
2637 llvm::StringRef Value,
2638 llvm::SmallString<32> &Opt) const override {
2639 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2640 }
2641};
2642
2643static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2644 CodeGen::CodeGenModule &CGM) {
2645 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2646
2647 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2648 Fn->addFnAttr("stack-probe-size",
2649 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2650 if (CGM.getCodeGenOpts().NoStackArgProbe)
2651 Fn->addFnAttr("no-stack-arg-probe");
2652 }
2653}
2654
2655void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2656 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2657 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2658 if (GV->isDeclaration())
2659 return;
2660 addStackProbeTargetAttributes(D, GV, CGM);
2661}
2662
2663class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2664public:
2665 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2666 X86AVXABILevel AVXLevel)
2667 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
2668
2669 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2670 CodeGen::CodeGenModule &CGM) const override;
2671
2672 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2673 return 7;
2674 }
2675
2676 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2677 llvm::Value *Address) const override {
2678 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2679
2680 // 0-15 are the 16 integer registers.
2681 // 16 is %rip.
2682 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2683 return false;
2684 }
2685
2686 void getDependentLibraryOption(llvm::StringRef Lib,
2687 llvm::SmallString<24> &Opt) const override {
2688 Opt = "/DEFAULTLIB:";
2689 Opt += qualifyWindowsLibrary(Lib);
2690 }
2691
2692 void getDetectMismatchOption(llvm::StringRef Name,
2693 llvm::StringRef Value,
2694 llvm::SmallString<32> &Opt) const override {
2695 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2696 }
2697};
2698
2699void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2700 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2701 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2702 if (GV->isDeclaration())
2703 return;
2704 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2705 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2706 llvm::Function *Fn = cast<llvm::Function>(GV);
2707 Fn->addFnAttr("stackrealign");
2708 }
2709
2710 addX86InterruptAttrs(FD, GV, CGM);
2711 }
2712
2713 addStackProbeTargetAttributes(D, GV, CGM);
2714}
2715}
2716
2717void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2718 Class &Hi) const {
2719 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2720 //
2721 // (a) If one of the classes is Memory, the whole argument is passed in
2722 // memory.
2723 //
2724 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2725 // memory.
2726 //
2727 // (c) If the size of the aggregate exceeds two eightbytes and the first
2728 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2729 // argument is passed in memory. NOTE: This is necessary to keep the
2730 // ABI working for processors that don't support the __m256 type.
2731 //
2732 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2733 //
2734 // Some of these are enforced by the merging logic. Others can arise
2735 // only with unions; for example:
2736 // union { _Complex double; unsigned; }
2737 //
2738 // Note that clauses (b) and (c) were added in 0.98.
2739 //
2740 if (Hi == Memory)
2741 Lo = Memory;
2742 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2743 Lo = Memory;
2744 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2745 Lo = Memory;
2746 if (Hi == SSEUp && Lo != SSE)
2747 Hi = SSE;
2748}
2749
2750X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2751 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2752 // classified recursively so that always two fields are
2753 // considered. The resulting class is calculated according to
2754 // the classes of the fields in the eightbyte:
2755 //
2756 // (a) If both classes are equal, this is the resulting class.
2757 //
2758 // (b) If one of the classes is NO_CLASS, the resulting class is
2759 // the other class.
2760 //
2761 // (c) If one of the classes is MEMORY, the result is the MEMORY
2762 // class.
2763 //
2764 // (d) If one of the classes is INTEGER, the result is the
2765 // INTEGER.
2766 //
2767 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2768 // MEMORY is used as class.
2769 //
2770 // (f) Otherwise class SSE is used.
2771
2772 // Accum should never be memory (we should have returned) or
2773 // ComplexX87 (because this cannot be passed in a structure).
2774 assert((Accum != Memory && Accum != ComplexX87) &&((void)0)
2775 "Invalid accumulated classification during merge.")((void)0);
2776 if (Accum == Field || Field == NoClass)
2777 return Accum;
2778 if (Field == Memory)
2779 return Memory;
2780 if (Accum == NoClass)
2781 return Field;
2782 if (Accum == Integer || Field == Integer)
2783 return Integer;
2784 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2785 Accum == X87 || Accum == X87Up)
2786 return Memory;
2787 return SSE;
2788}
2789
2790void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2791 Class &Lo, Class &Hi, bool isNamedArg) const {
2792 // FIXME: This code can be simplified by introducing a simple value class for
2793 // Class pairs with appropriate constructor methods for the various
2794 // situations.
2795
2796 // FIXME: Some of the split computations are wrong; unaligned vectors
2797 // shouldn't be passed in registers for example, so there is no chance they
2798 // can straddle an eightbyte. Verify & simplify.
2799
2800 Lo = Hi = NoClass;
2801
2802 Class &Current = OffsetBase < 64 ? Lo : Hi;
2803 Current = Memory;
2804
2805 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2806 BuiltinType::Kind k = BT->getKind();
2807
2808 if (k == BuiltinType::Void) {
2809 Current = NoClass;
2810 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2811 Lo = Integer;
2812 Hi = Integer;
2813 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2814 Current = Integer;
2815 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2816 Current = SSE;
2817 } else if (k == BuiltinType::LongDouble) {
2818 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2819 if (LDF == &llvm::APFloat::IEEEquad()) {
2820 Lo = SSE;
2821 Hi = SSEUp;
2822 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2823 Lo = X87;
2824 Hi = X87Up;
2825 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2826 Current = SSE;
2827 } else
2828 llvm_unreachable("unexpected long double representation!")__builtin_unreachable();
2829 }
2830 // FIXME: _Decimal32 and _Decimal64 are SSE.
2831 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2832 return;
2833 }
2834
2835 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2836 // Classify the underlying integer type.
2837 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2838 return;
2839 }
2840
2841 if (Ty->hasPointerRepresentation()) {
2842 Current = Integer;
2843 return;
2844 }
2845
2846 if (Ty->isMemberPointerType()) {
2847 if (Ty->isMemberFunctionPointerType()) {
2848 if (Has64BitPointers) {
2849 // If Has64BitPointers, this is an {i64, i64}, so classify both
2850 // Lo and Hi now.
2851 Lo = Hi = Integer;
2852 } else {
2853 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2854 // straddles an eightbyte boundary, Hi should be classified as well.
2855 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2856 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2857 if (EB_FuncPtr != EB_ThisAdj) {
2858 Lo = Hi = Integer;
2859 } else {
2860 Current = Integer;
2861 }
2862 }
2863 } else {
2864 Current = Integer;
2865 }
2866 return;
2867 }
2868
2869 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2870 uint64_t Size = getContext().getTypeSize(VT);
2871 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2872 // gcc passes the following as integer:
2873 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2874 // 2 bytes - <2 x char>, <1 x short>
2875 // 1 byte - <1 x char>
2876 Current = Integer;
2877
2878 // If this type crosses an eightbyte boundary, it should be
2879 // split.
2880 uint64_t EB_Lo = (OffsetBase) / 64;
2881 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2882 if (EB_Lo != EB_Hi)
2883 Hi = Lo;
2884 } else if (Size == 64) {
2885 QualType ElementType = VT->getElementType();
2886
2887 // gcc passes <1 x double> in memory. :(
2888 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2889 return;
2890
2891 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2892 // pass them as integer. For platforms where clang is the de facto
2893 // platform compiler, we must continue to use integer.
2894 if (!classifyIntegerMMXAsSSE() &&
2895 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2896 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2897 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2898 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2899 Current = Integer;
2900 else
2901 Current = SSE;
2902
2903 // If this type crosses an eightbyte boundary, it should be
2904 // split.
2905 if (OffsetBase && OffsetBase != 64)
2906 Hi = Lo;
2907 } else if (Size == 128 ||
2908 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2909 QualType ElementType = VT->getElementType();
2910
2911 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
2912 if (passInt128VectorsInMem() && Size != 128 &&
2913 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
2914 ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
2915 return;
2916
2917 // Arguments of 256-bits are split into four eightbyte chunks. The
2918 // least significant one belongs to class SSE and all the others to class
2919 // SSEUP. The original Lo and Hi design considers that types can't be
2920 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2921 // This design isn't correct for 256-bits, but since there're no cases
2922 // where the upper parts would need to be inspected, avoid adding
2923 // complexity and just consider Hi to match the 64-256 part.
2924 //
2925 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2926 // registers if they are "named", i.e. not part of the "..." of a
2927 // variadic function.
2928 //
2929 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2930 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2931 Lo = SSE;
2932 Hi = SSEUp;
2933 }
2934 return;
2935 }
2936
2937 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2938 QualType ET = getContext().getCanonicalType(CT->getElementType());
2939
2940 uint64_t Size = getContext().getTypeSize(Ty);
2941 if (ET->isIntegralOrEnumerationType()) {
2942 if (Size <= 64)
2943 Current = Integer;
2944 else if (Size <= 128)
2945 Lo = Hi = Integer;
2946 } else if (ET == getContext().FloatTy) {
2947 Current = SSE;
2948 } else if (ET == getContext().DoubleTy) {
2949 Lo = Hi = SSE;
2950 } else if (ET == getContext().LongDoubleTy) {
2951 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2952 if (LDF == &llvm::APFloat::IEEEquad())
2953 Current = Memory;
2954 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2955 Current = ComplexX87;
2956 else if (LDF == &llvm::APFloat::IEEEdouble())
2957 Lo = Hi = SSE;
2958 else
2959 llvm_unreachable("unexpected long double representation!")__builtin_unreachable();
2960 }
2961
2962 // If this complex type crosses an eightbyte boundary then it
2963 // should be split.
2964 uint64_t EB_Real = (OffsetBase) / 64;
2965 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2966 if (Hi == NoClass && EB_Real != EB_Imag)
2967 Hi = Lo;
2968
2969 return;
2970 }
2971
2972 if (const auto *EITy = Ty->getAs<ExtIntType>()) {
2973 if (EITy->getNumBits() <= 64)
2974 Current = Integer;
2975 else if (EITy->getNumBits() <= 128)
2976 Lo = Hi = Integer;
2977 // Larger values need to get passed in memory.
2978 return;
2979 }
2980
2981 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2982 // Arrays are treated like structures.
2983
2984 uint64_t Size = getContext().getTypeSize(Ty);
2985
2986 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2987 // than eight eightbytes, ..., it has class MEMORY.
2988 if (Size > 512)
2989 return;
2990
2991 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2992 // fields, it has class MEMORY.
2993 //
2994 // Only need to check alignment of array base.
2995 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2996 return;
2997
2998 // Otherwise implement simplified merge. We could be smarter about
2999 // this, but it isn't worth it and would be harder to verify.
3000 Current = NoClass;
3001 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
3002 uint64_t ArraySize = AT->getSize().getZExtValue();
3003
3004 // The only case a 256-bit wide vector could be used is when the array
3005 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
3006 // to work for sizes wider than 128, early check and fallback to memory.
3007 //
3008 if (Size > 128 &&
3009 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
3010 return;
3011
3012 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
3013 Class FieldLo, FieldHi;
3014 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
3015 Lo = merge(Lo, FieldLo);
3016 Hi = merge(Hi, FieldHi);
3017 if (Lo == Memory || Hi == Memory)
3018 break;
3019 }
3020
3021 postMerge(Size, Lo, Hi);
3022 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.")((void)0);
3023 return;
3024 }
3025
3026 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3027 uint64_t Size = getContext().getTypeSize(Ty);
3028
3029 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
3030 // than eight eightbytes, ..., it has class MEMORY.
3031 if (Size > 512)
3032 return;
3033
3034 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
3035 // copy constructor or a non-trivial destructor, it is passed by invisible
3036 // reference.
3037 if (getRecordArgABI(RT, getCXXABI()))
3038 return;
3039
3040 const RecordDecl *RD = RT->getDecl();
3041
3042 // Assume variable sized types are passed in memory.
3043 if (RD->hasFlexibleArrayMember())
3044 return;
3045
3046 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
3047
3048 // Reset Lo class, this will be recomputed.
3049 Current = NoClass;
3050
3051 // If this is a C++ record, classify the bases first.
3052 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3053 for (const auto &I : CXXRD->bases()) {
3054 assert(!I.isVirtual() && !I.getType()->isDependentType() &&((void)0)
3055 "Unexpected base class!")((void)0);
3056 const auto *Base =
3057 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3058
3059 // Classify this field.
3060 //
3061 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
3062 // single eightbyte, each is classified separately. Each eightbyte gets
3063 // initialized to class NO_CLASS.
3064 Class FieldLo, FieldHi;
3065 uint64_t Offset =
3066 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
3067 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
3068 Lo = merge(Lo, FieldLo);
3069 Hi = merge(Hi, FieldHi);
3070 if (Lo == Memory || Hi == Memory) {
3071 postMerge(Size, Lo, Hi);
3072 return;
3073 }
3074 }
3075 }
3076
3077 // Classify the fields one at a time, merging the results.
3078 unsigned idx = 0;
3079 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
3080 LangOptions::ClangABI::Ver11 ||
3081 getContext().getTargetInfo().getTriple().isPS4();
3082 bool IsUnion = RT->isUnionType() && !UseClang11Compat;
3083
3084 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3085 i != e; ++i, ++idx) {
3086 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3087 bool BitField = i->isBitField();
3088
3089 // Ignore padding bit-fields.
3090 if (BitField && i->isUnnamedBitfield())
3091 continue;
3092
3093 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
3094 // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
3095 //
3096 // The only case a 256-bit or a 512-bit wide vector could be used is when
3097 // the struct contains a single 256-bit or 512-bit element. Early check
3098 // and fallback to memory.
3099 //
3100 // FIXME: Extended the Lo and Hi logic properly to work for size wider
3101 // than 128.
3102 if (Size > 128 &&
3103 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
3104 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
3105 Lo = Memory;
3106 postMerge(Size, Lo, Hi);
3107 return;
3108 }
3109 // Note, skip this test for bit-fields, see below.
3110 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
3111 Lo = Memory;
3112 postMerge(Size, Lo, Hi);
3113 return;
3114 }
3115
3116 // Classify this field.
3117 //
3118 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
3119 // exceeds a single eightbyte, each is classified
3120 // separately. Each eightbyte gets initialized to class
3121 // NO_CLASS.
3122 Class FieldLo, FieldHi;
3123
3124 // Bit-fields require special handling, they do not force the
3125 // structure to be passed in memory even if unaligned, and
3126 // therefore they can straddle an eightbyte.
3127 if (BitField) {
3128 assert(!i->isUnnamedBitfield())((void)0);
3129 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3130 uint64_t Size = i->getBitWidthValue(getContext());
3131
3132 uint64_t EB_Lo = Offset / 64;
3133 uint64_t EB_Hi = (Offset + Size - 1) / 64;
3134
3135 if (EB_Lo) {
3136 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.")((void)0);
3137 FieldLo = NoClass;
3138 FieldHi = Integer;
3139 } else {
3140 FieldLo = Integer;
3141 FieldHi = EB_Hi ? Integer : NoClass;
3142 }
3143 } else
3144 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
3145 Lo = merge(Lo, FieldLo);
3146 Hi = merge(Hi, FieldHi);
3147 if (Lo == Memory || Hi == Memory)
3148 break;
3149 }
3150
3151 postMerge(Size, Lo, Hi);
3152 }
3153}
3154
3155ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
3156 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3157 // place naturally.
3158 if (!isAggregateTypeForABI(Ty)) {
3159 // Treat an enum type as its underlying type.
3160 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3161 Ty = EnumTy->getDecl()->getIntegerType();
3162
3163 if (Ty->isExtIntType())
3164 return getNaturalAlignIndirect(Ty);
3165
3166 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3167 : ABIArgInfo::getDirect());
3168 }
3169
3170 return getNaturalAlignIndirect(Ty);
3171}
3172
3173bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
3174 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
3175 uint64_t Size = getContext().getTypeSize(VecTy);
3176 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
3177 if (Size <= 64 || Size > LargestVector)
3178 return true;
3179 QualType EltTy = VecTy->getElementType();
3180 if (passInt128VectorsInMem() &&
3181 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
3182 EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
3183 return true;
3184 }
3185
3186 return false;
3187}
3188
3189ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
3190 unsigned freeIntRegs) const {
3191 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3192 // place naturally.
3193 //
3194 // This assumption is optimistic, as there could be free registers available
3195 // when we need to pass this argument in memory, and LLVM could try to pass
3196 // the argument in the free register. This does not seem to happen currently,
3197 // but this code would be much safer if we could mark the argument with
3198 // 'onstack'. See PR12193.
3199 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
3200 !Ty->isExtIntType()) {
3201 // Treat an enum type as its underlying type.
3202 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3203 Ty = EnumTy->getDecl()->getIntegerType();
3204
3205 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3206 : ABIArgInfo::getDirect());
3207 }
3208
3209 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3210 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3211
3212 // Compute the byval alignment. We specify the alignment of the byval in all
3213 // cases so that the mid-level optimizer knows the alignment of the byval.
3214 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
3215
3216 // Attempt to avoid passing indirect results using byval when possible. This
3217 // is important for good codegen.
3218 //
3219 // We do this by coercing the value into a scalar type which the backend can
3220 // handle naturally (i.e., without using byval).
3221 //
3222 // For simplicity, we currently only do this when we have exhausted all of the
3223 // free integer registers. Doing this when there are free integer registers
3224 // would require more care, as we would have to ensure that the coerced value
3225 // did not claim the unused register. That would require either reording the
3226 // arguments to the function (so that any subsequent inreg values came first),
3227 // or only doing this optimization when there were no following arguments that
3228 // might be inreg.
3229 //
3230 // We currently expect it to be rare (particularly in well written code) for
3231 // arguments to be passed on the stack when there are still free integer
3232 // registers available (this would typically imply large structs being passed
3233 // by value), so this seems like a fair tradeoff for now.
3234 //
3235 // We can revisit this if the backend grows support for 'onstack' parameter
3236 // attributes. See PR12193.
3237 if (freeIntRegs == 0) {
3238 uint64_t Size = getContext().getTypeSize(Ty);
3239
3240 // If this type fits in an eightbyte, coerce it into the matching integral
3241 // type, which will end up on the stack (with alignment 8).
3242 if (Align == 8 && Size <= 64)
3243 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3244 Size));
3245 }
3246
3247 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
3248}
3249
3250/// The ABI specifies that a value should be passed in a full vector XMM/YMM
3251/// register. Pick an LLVM IR type that will be passed as a vector register.
3252llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
3253 // Wrapper structs/arrays that only contain vectors are passed just like
3254 // vectors; strip them off if present.
3255 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
3256 Ty = QualType(InnerTy, 0);
3257
3258 llvm::Type *IRType = CGT.ConvertType(Ty);
3259 if (isa<llvm::VectorType>(IRType)) {
3260 // Don't pass vXi128 vectors in their native type, the backend can't
3261 // legalize them.
3262 if (passInt128VectorsInMem() &&
3263 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
3264 // Use a vXi64 vector.
3265 uint64_t Size = getContext().getTypeSize(Ty);
3266 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3267 Size / 64);
3268 }
3269
3270 return IRType;
3271 }
3272
3273 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3274 return IRType;
3275
3276 // We couldn't find the preferred IR vector type for 'Ty'.
3277 uint64_t Size = getContext().getTypeSize(Ty);
3278 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!")((void)0);
3279
3280
3281 // Return a LLVM IR vector type based on the size of 'Ty'.
3282 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3283 Size / 64);
3284}
3285
3286/// BitsContainNoUserData - Return true if the specified [start,end) bit range
3287/// is known to either be off the end of the specified type or being in
3288/// alignment padding. The user type specified is known to be at most 128 bits
3289/// in size, and have passed through X86_64ABIInfo::classify with a successful
3290/// classification that put one of the two halves in the INTEGER class.
3291///
3292/// It is conservatively correct to return false.
3293static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3294 unsigned EndBit, ASTContext &Context) {
3295 // If the bytes being queried are off the end of the type, there is no user
3296 // data hiding here. This handles analysis of builtins, vectors and other
3297 // types that don't contain interesting padding.
3298 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3299 if (TySize <= StartBit)
3300 return true;
3301
3302 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3303 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3304 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3305
3306 // Check each element to see if the element overlaps with the queried range.
3307 for (unsigned i = 0; i != NumElts; ++i) {
3308 // If the element is after the span we care about, then we're done..
3309 unsigned EltOffset = i*EltSize;
3310 if (EltOffset >= EndBit) break;
3311
3312 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3313 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3314 EndBit-EltOffset, Context))
3315 return false;
3316 }
3317 // If it overlaps no elements, then it is safe to process as padding.
3318 return true;
3319 }
3320
3321 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3322 const RecordDecl *RD = RT->getDecl();
3323 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3324
3325 // If this is a C++ record, check the bases first.
3326 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3327 for (const auto &I : CXXRD->bases()) {
3328 assert(!I.isVirtual() && !I.getType()->isDependentType() &&((void)0)
3329 "Unexpected base class!")((void)0);
3330 const auto *Base =
3331 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3332
3333 // If the base is after the span we care about, ignore it.
3334 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3335 if (BaseOffset >= EndBit) continue;
3336
3337 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3338 if (!BitsContainNoUserData(I.getType(), BaseStart,
3339 EndBit-BaseOffset, Context))
3340 return false;
3341 }
3342 }
3343
3344 // Verify that no field has data that overlaps the region of interest. Yes
3345 // this could be sped up a lot by being smarter about queried fields,
3346 // however we're only looking at structs up to 16 bytes, so we don't care
3347 // much.
3348 unsigned idx = 0;
3349 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3350 i != e; ++i, ++idx) {
3351 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3352
3353 // If we found a field after the region we care about, then we're done.
3354 if (FieldOffset >= EndBit) break;
3355
3356 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3357 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3358 Context))
3359 return false;
3360 }
3361
3362 // If nothing in this record overlapped the area of interest, then we're
3363 // clean.
3364 return true;
3365 }
3366
3367 return false;
3368}
3369
3370/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3371/// float member at the specified offset. For example, {int,{float}} has a
3372/// float at offset 4. It is conservatively correct for this routine to return
3373/// false.
3374static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3375 const llvm::DataLayout &TD) {
3376 // Base case if we find a float.
3377 if (IROffset == 0 && IRType->isFloatTy())
3378 return true;
3379
3380 // If this is a struct, recurse into the field at the specified offset.
3381 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3382 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3383 unsigned Elt = SL->getElementContainingOffset(IROffset);
3384 IROffset -= SL->getElementOffset(Elt);
3385 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3386 }
3387
3388 // If this is an array, recurse into the field at the specified offset.
3389 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3390 llvm::Type *EltTy = ATy->getElementType();
3391 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3392 IROffset -= IROffset/EltSize*EltSize;
3393 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3394 }
3395
3396 return false;
3397}
3398
3399
3400/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3401/// low 8 bytes of an XMM register, corresponding to the SSE class.
3402llvm::Type *X86_64ABIInfo::
3403GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3404 QualType SourceTy, unsigned SourceOffset) const {
3405 // The only three choices we have are either double, <2 x float>, or float. We
3406 // pass as float if the last 4 bytes is just padding. This happens for
3407 // structs that contain 3 floats.
3408 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3409 SourceOffset*8+64, getContext()))
3410 return llvm::Type::getFloatTy(getVMContext());
3411
3412 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3413 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3414 // case.
3415 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3416 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3417 return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()),
3418 2);
3419
3420 return llvm::Type::getDoubleTy(getVMContext());
3421}
3422
3423
3424/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3425/// an 8-byte GPR. This means that we either have a scalar or we are talking
3426/// about the high or low part of an up-to-16-byte struct. This routine picks
3427/// the best LLVM IR type to represent this, which may be i64 or may be anything
3428/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3429/// etc).
3430///
3431/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3432/// the source type. IROffset is an offset in bytes into the LLVM IR type that
3433/// the 8-byte value references. PrefType may be null.
3434///
3435/// SourceTy is the source-level type for the entire argument. SourceOffset is
3436/// an offset into this that we're processing (which is always either 0 or 8).
3437///
3438llvm::Type *X86_64ABIInfo::
3439GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3440 QualType SourceTy, unsigned SourceOffset) const {
3441 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3442 // returning an 8-byte unit starting with it. See if we can safely use it.
3443 if (IROffset == 0) {
3444 // Pointers and int64's always fill the 8-byte unit.
3445 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3446 IRType->isIntegerTy(64))
3447 return IRType;
3448
3449 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3450 // goodness in the source type is just tail padding. This is allowed to
3451 // kick in for struct {double,int} on the int, but not on
3452 // struct{double,int,int} because we wouldn't return the second int. We
3453 // have to do this analysis on the source type because we can't depend on
3454 // unions being lowered a specific way etc.
3455 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3456 IRType->isIntegerTy(32) ||
3457 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3458 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3459 cast<llvm::IntegerType>(IRType)->getBitWidth();
3460
3461 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3462 SourceOffset*8+64, getContext()))
3463 return IRType;
3464 }
3465 }
3466
3467 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3468 // If this is a struct, recurse into the field at the specified offset.
3469 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3470 if (IROffset < SL->getSizeInBytes()) {
3471 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3472 IROffset -= SL->getElementOffset(FieldIdx);
3473
3474 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3475 SourceTy, SourceOffset);
3476 }
3477 }
3478
3479 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3480 llvm::Type *EltTy = ATy->getElementType();
3481 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3482 unsigned EltOffset = IROffset/EltSize*EltSize;
3483 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3484 SourceOffset);
3485 }
3486
3487 // Okay, we don't have any better idea of what to pass, so we pass this in an
3488 // integer register that isn't too big to fit the rest of the struct.
3489 unsigned TySizeInBytes =
3490 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3491
3492 assert(TySizeInBytes != SourceOffset && "Empty field?")((void)0);
3493
3494 // It is always safe to classify this as an integer type up to i64 that
3495 // isn't larger than the structure.
3496 return llvm::IntegerType::get(getVMContext(),
3497 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3498}
3499
3500
3501/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3502/// be used as elements of a two register pair to pass or return, return a
3503/// first class aggregate to represent them. For example, if the low part of
3504/// a by-value argument should be passed as i32* and the high part as float,
3505/// return {i32*, float}.
3506static llvm::Type *
3507GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3508 const llvm::DataLayout &TD) {
3509 // In order to correctly satisfy the ABI, we need to the high part to start
3510 // at offset 8. If the high and low parts we inferred are both 4-byte types
3511 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3512 // the second element at offset 8. Check for this:
3513 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3514 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3515 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3516 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!")((void)0);
3517
3518 // To handle this, we have to increase the size of the low part so that the
3519 // second element will start at an 8 byte offset. We can't increase the size
3520 // of the second element because it might make us access off the end of the
3521 // struct.
3522 if (HiStart != 8) {
3523 // There are usually two sorts of types the ABI generation code can produce
3524 // for the low part of a pair that aren't 8 bytes in size: float or
3525 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3526 // NaCl).
3527 // Promote these to a larger type.
3528 if (Lo->isFloatTy())
3529 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3530 else {
3531 assert((Lo->isIntegerTy() || Lo->isPointerTy())((void)0)
3532 && "Invalid/unknown lo type")((void)0);
3533 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3534 }
3535 }
3536
3537 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3538
3539 // Verify that the second element is at an 8-byte offset.
3540 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&((void)0)
3541 "Invalid x86-64 argument pair!")((void)0);
3542 return Result;
3543}
3544
3545ABIArgInfo X86_64ABIInfo::
3546classifyReturnType(QualType RetTy) const {
3547 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3548 // classification algorithm.
3549 X86_64ABIInfo::Class Lo, Hi;
3550 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3551
3552 // Check some invariants.
3553 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.")((void)0);
3554 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.")((void)0);
3555
3556 llvm::Type *ResType = nullptr;
3557 switch (Lo) {
3558 case NoClass:
3559 if (Hi == NoClass)
3560 return ABIArgInfo::getIgnore();
3561 // If the low part is just padding, it takes no register, leave ResType
3562 // null.
3563 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&((void)0)
3564 "Unknown missing lo part")((void)0);
3565 break;
3566
3567 case SSEUp:
3568 case X87Up:
3569 llvm_unreachable("Invalid classification for lo word.")__builtin_unreachable();
3570
3571 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3572 // hidden argument.
3573 case Memory:
3574 return getIndirectReturnResult(RetTy);
3575
3576 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3577 // available register of the sequence %rax, %rdx is used.
3578 case Integer:
3579 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3580
3581 // If we have a sign or zero extended integer, make sure to return Extend
3582 // so that the parameter gets the right LLVM IR attributes.
3583 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3584 // Treat an enum type as its underlying type.
3585 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3586 RetTy = EnumTy->getDecl()->getIntegerType();
3587
3588 if (RetTy->isIntegralOrEnumerationType() &&
3589 isPromotableIntegerTypeForABI(RetTy))
3590 return ABIArgInfo::getExtend(RetTy);
3591 }
3592 break;
3593
3594 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3595 // available SSE register of the sequence %xmm0, %xmm1 is used.
3596 case SSE:
3597 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3598 break;
3599
3600 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3601 // returned on the X87 stack in %st0 as 80-bit x87 number.
3602 case X87:
3603 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3604 break;
3605
3606 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3607 // part of the value is returned in %st0 and the imaginary part in
3608 // %st1.
3609 case ComplexX87:
3610 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.")((void)0);
3611 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3612 llvm::Type::getX86_FP80Ty(getVMContext()));
3613 break;
3614 }
3615
3616 llvm::Type *HighPart = nullptr;
3617 switch (Hi) {
3618 // Memory was handled previously and X87 should
3619 // never occur as a hi class.
3620 case Memory:
3621 case X87:
3622 llvm_unreachable("Invalid classification for hi word.")__builtin_unreachable();
3623
3624 case ComplexX87: // Previously handled.
3625 case NoClass:
3626 break;
3627
3628 case Integer:
3629 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3630 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3631 return ABIArgInfo::getDirect(HighPart, 8);
3632 break;
3633 case SSE:
3634 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3635 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3636 return ABIArgInfo::getDirect(HighPart, 8);
3637 break;
3638
3639 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3640 // is passed in the next available eightbyte chunk if the last used
3641 // vector register.
3642 //
3643 // SSEUP should always be preceded by SSE, just widen.
3644 case SSEUp:
3645 assert(Lo == SSE && "Unexpected SSEUp classification.")((void)0);
3646 ResType = GetByteVectorType(RetTy);
3647 break;
3648
3649 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3650 // returned together with the previous X87 value in %st0.
3651 case X87Up:
3652 // If X87Up is preceded by X87, we don't need to do
3653 // anything. However, in some cases with unions it may not be
3654 // preceded by X87. In such situations we follow gcc and pass the
3655 // extra bits in an SSE reg.
3656 if (Lo != X87) {
3657 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3658 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3659 return ABIArgInfo::getDirect(HighPart, 8);
3660 }
3661 break;
3662 }
3663
3664 // If a high part was specified, merge it together with the low part. It is
3665 // known to pass in the high eightbyte of the result. We do this by forming a
3666 // first class struct aggregate with the high and low part: {low, high}
3667 if (HighPart)
3668 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3669
3670 return ABIArgInfo::getDirect(ResType);
3671}
3672
3673ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3674 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3675 bool isNamedArg)
3676 const
3677{
3678 Ty = useFirstFieldIfTransparentUnion(Ty);
3679
3680 X86_64ABIInfo::Class Lo, Hi;
3681 classify(Ty, 0, Lo, Hi, isNamedArg);
3682
3683 // Check some invariants.
3684 // FIXME: Enforce these by construction.
3685 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.")((void)0);
3686 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.")((void)0);
3687
3688 neededInt = 0;
3689 neededSSE = 0;
3690 llvm::Type *ResType = nullptr;
3691 switch (Lo) {
3692 case NoClass:
3693 if (Hi == NoClass)
3694 return ABIArgInfo::getIgnore();
3695 // If the low part is just padding, it takes no register, leave ResType
3696 // null.
3697 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&((void)0)
3698 "Unknown missing lo part")((void)0);
3699 break;
3700
3701 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3702 // on the stack.
3703 case Memory:
3704
3705 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3706 // COMPLEX_X87, it is passed in memory.
3707 case X87:
3708 case ComplexX87:
3709 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3710 ++neededInt;
3711 return getIndirectResult(Ty, freeIntRegs);
3712
3713 case SSEUp:
3714 case X87Up:
3715 llvm_unreachable("Invalid classification for lo word.")__builtin_unreachable();
3716
3717 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3718 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3719 // and %r9 is used.
3720 case Integer:
3721 ++neededInt;
3722
3723 // Pick an 8-byte type based on the preferred type.
3724 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3725
3726 // If we have a sign or zero extended integer, make sure to return Extend
3727 // so that the parameter gets the right LLVM IR attributes.
3728 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3729 // Treat an enum type as its underlying type.
3730 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3731 Ty = EnumTy->getDecl()->getIntegerType();
3732
3733 if (Ty->isIntegralOrEnumerationType() &&
3734 isPromotableIntegerTypeForABI(Ty))
3735 return ABIArgInfo::getExtend(Ty);
3736 }
3737
3738 break;
3739
3740 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3741 // available SSE register is used, the registers are taken in the
3742 // order from %xmm0 to %xmm7.
3743 case SSE: {
3744 llvm::Type *IRType = CGT.ConvertType(Ty);
3745 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3746 ++neededSSE;
3747 break;
3748 }
3749 }
3750
3751 llvm::Type *HighPart = nullptr;
3752 switch (Hi) {
3753 // Memory was handled previously, ComplexX87 and X87 should
3754 // never occur as hi classes, and X87Up must be preceded by X87,
3755 // which is passed in memory.
3756 case Memory:
3757 case X87:
3758 case ComplexX87:
3759 llvm_unreachable("Invalid classification for hi word.")__builtin_unreachable();
3760
3761 case NoClass: break;
3762
3763 case Integer:
3764 ++neededInt;
3765 // Pick an 8-byte type based on the preferred type.
3766 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3767
3768 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3769 return ABIArgInfo::getDirect(HighPart, 8);
3770 break;
3771
3772 // X87Up generally doesn't occur here (long double is passed in
3773 // memory), except in situations involving unions.
3774 case X87Up:
3775 case SSE:
3776 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3777
3778 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3779 return ABIArgInfo::getDirect(HighPart, 8);
3780
3781 ++neededSSE;
3782 break;
3783
3784 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3785 // eightbyte is passed in the upper half of the last used SSE
3786 // register. This only happens when 128-bit vectors are passed.
3787 case SSEUp:
3788 assert(Lo == SSE && "Unexpected SSEUp classification")((void)0);
3789 ResType = GetByteVectorType(Ty);
3790 break;
3791 }
3792
3793 // If a high part was specified, merge it together with the low part. It is
3794 // known to pass in the high eightbyte of the result. We do this by forming a
3795 // first class struct aggregate with the high and low part: {low, high}
3796 if (HighPart)
3797 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3798
3799 return ABIArgInfo::getDirect(ResType);
3800}
3801
3802ABIArgInfo
3803X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3804 unsigned &NeededSSE) const {
3805 auto RT = Ty->getAs<RecordType>();
3806 assert(RT && "classifyRegCallStructType only valid with struct types")((void)0);
3807
3808 if (RT->getDecl()->hasFlexibleArrayMember())
3809 return getIndirectReturnResult(Ty);
3810
3811 // Sum up bases
3812 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3813 if (CXXRD->isDynamicClass()) {
3814 NeededInt = NeededSSE = 0;
3815 return getIndirectReturnResult(Ty);
3816 }
3817
3818 for (const auto &I : CXXRD->bases())
3819 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3820 .isIndirect()) {
3821 NeededInt = NeededSSE = 0;
3822 return getIndirectReturnResult(Ty);
3823 }
3824 }
3825
3826 // Sum up members
3827 for (const auto *FD : RT->getDecl()->fields()) {
3828 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3829 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3830 .isIndirect()) {
3831 NeededInt = NeededSSE = 0;
3832 return getIndirectReturnResult(Ty);
3833 }
3834 } else {
3835 unsigned LocalNeededInt, LocalNeededSSE;
3836 if (classifyArgumentType(FD->getType(), UINT_MAX(2147483647 *2U +1U), LocalNeededInt,
3837 LocalNeededSSE, true)
3838 .isIndirect()) {
3839 NeededInt = NeededSSE = 0;
3840 return getIndirectReturnResult(Ty);
3841 }
3842 NeededInt += LocalNeededInt;
3843 NeededSSE += LocalNeededSSE;
3844 }
3845 }
3846
3847 return ABIArgInfo::getDirect();
3848}
3849
3850ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3851 unsigned &NeededInt,
3852 unsigned &NeededSSE) const {
3853
3854 NeededInt = 0;
3855 NeededSSE = 0;
3856
3857 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3858}
3859
3860void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3861
3862 const unsigned CallingConv = FI.getCallingConvention();
3863 // It is possible to force Win64 calling convention on any x86_64 target by
3864 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3865 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3866 if (CallingConv == llvm::CallingConv::Win64) {
3867 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3868 Win64ABIInfo.computeInfo(FI);
3869 return;
3870 }
3871
3872 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3873
3874 // Keep track of the number of assigned registers.
3875 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3876 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3877 unsigned NeededInt, NeededSSE;
3878
3879 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3880 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3881 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3882 FI.getReturnInfo() =
3883 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3884 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3885 FreeIntRegs -= NeededInt;
3886 FreeSSERegs -= NeededSSE;
3887 } else {
3888 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3889 }
3890 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
3891 getContext().getCanonicalType(FI.getReturnType()
3892 ->getAs<ComplexType>()
3893 ->getElementType()) ==
3894 getContext().LongDoubleTy)
3895 // Complex Long Double Type is passed in Memory when Regcall
3896 // calling convention is used.
3897 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3898 else
3899 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3900 }
3901
3902 // If the return value is indirect, then the hidden argument is consuming one
3903 // integer register.
3904 if (FI.getReturnInfo().isIndirect())
3905 --FreeIntRegs;
3906
3907 // The chain argument effectively gives us another free register.
3908 if (FI.isChainCall())
3909 ++FreeIntRegs;
3910
3911 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3912 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3913 // get assigned (in left-to-right order) for passing as follows...
3914 unsigned ArgNo = 0;
3915 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3916 it != ie; ++it, ++ArgNo) {
3917 bool IsNamedArg = ArgNo < NumRequiredArgs;
3918
3919 if (IsRegCall && it->type->isStructureOrClassType())
3920 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3921 else
3922 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3923 NeededSSE, IsNamedArg);
3924
3925 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3926 // eightbyte of an argument, the whole argument is passed on the
3927 // stack. If registers have already been assigned for some
3928 // eightbytes of such an argument, the assignments get reverted.
3929 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3930 FreeIntRegs -= NeededInt;
3931 FreeSSERegs -= NeededSSE;
3932 } else {
3933 it->info = getIndirectResult(it->type, FreeIntRegs);
3934 }
3935 }
3936}
3937
3938static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3939 Address VAListAddr, QualType Ty) {
3940 Address overflow_arg_area_p =
3941 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
3942 llvm::Value *overflow_arg_area =
3943 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3944
3945 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3946 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3947 // It isn't stated explicitly in the standard, but in practice we use
3948 // alignment greater than 16 where necessary.
3949 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3950 if (Align > CharUnits::fromQuantity(8)) {
3951 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3952 Align);
3953 }
3954
3955 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3956 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3957 llvm::Value *Res =
3958 CGF.Builder.CreateBitCast(overflow_arg_area,
3959 llvm::PointerType::getUnqual(LTy));
3960
3961 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3962 // l->overflow_arg_area + sizeof(type).
3963 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3964 // an 8 byte boundary.
3965
3966 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3967 llvm::Value *Offset =
3968 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3969 overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
3970 Offset, "overflow_arg_area.next");
3971 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3972
3973 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3974 return Address(Res, Align);
3975}
3976
3977Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3978 QualType Ty) const {
3979 // Assume that va_list type is correct; should be pointer to LLVM type:
3980 // struct {
3981 // i32 gp_offset;
3982 // i32 fp_offset;
3983 // i8* overflow_arg_area;
3984 // i8* reg_save_area;
3985 // };
3986 unsigned neededInt, neededSSE;
3987
3988 Ty = getContext().getCanonicalType(Ty);
3989 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3990 /*isNamedArg*/false);
3991
3992 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3993 // in the registers. If not go to step 7.
3994 if (!neededInt && !neededSSE)
3995 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3996
3997 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3998 // general purpose registers needed to pass type and num_fp to hold
3999 // the number of floating point registers needed.
4000
4001 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
4002 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
4003 // l->fp_offset > 304 - num_fp * 16 go to step 7.
4004 //
4005 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
4006 // register save space).
4007
4008 llvm::Value *InRegs = nullptr;
4009 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
4010 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
4011 if (neededInt) {
4012 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
4013 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
4014 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
4015 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
4016 }
4017
4018 if (neededSSE) {
4019 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
4020 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
4021 llvm::Value *FitsInFP =
4022 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
4023 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
4024 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
4025 }
4026
4027 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4028 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4029 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4030 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4031
4032 // Emit code to load the value if it was passed in registers.
4033
4034 CGF.EmitBlock(InRegBlock);
4035
4036 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
4037 // an offset of l->gp_offset and/or l->fp_offset. This may require
4038 // copying to a temporary location in case the parameter is passed
4039 // in different register classes or requires an alignment greater
4040 // than 8 for general purpose registers and 16 for XMM registers.
4041 //
4042 // FIXME: This really results in shameful code when we end up needing to
4043 // collect arguments from different places; often what should result in a
4044 // simple assembling of a structure from scattered addresses has many more
4045 // loads than necessary. Can we clean this up?
4046 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
4047 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
4048 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
4049
4050 Address RegAddr = Address::invalid();
4051 if (neededInt && neededSSE) {
4052 // FIXME: Cleanup.
4053 assert(AI.isDirect() && "Unexpected ABI info for mixed regs")((void)0);
4054 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
4055 Address Tmp = CGF.CreateMemTemp(Ty);
4056 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4057 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs")((void)0);
4058 llvm::Type *TyLo = ST->getElementType(0);
4059 llvm::Type *TyHi = ST->getElementType(1);
4060 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&((void)0)
4061 "Unexpected ABI info for mixed regs")((void)0);
4062 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
4063 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
4064 llvm::Value *GPAddr =
4065 CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
4066 llvm::Value *FPAddr =
4067 CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
4068 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
4069 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
4070
4071 // Copy the first element.
4072 // FIXME: Our choice of alignment here and below is probably pessimistic.
4073 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
4074 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
4075 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
4076 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4077
4078 // Copy the second element.
4079 V = CGF.Builder.CreateAlignedLoad(
4080 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
4081 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
4082 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4083
4084 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4085 } else if (neededInt) {
4086 RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
4087 CharUnits::fromQuantity(8));
4088 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4089
4090 // Copy to a temporary if necessary to ensure the appropriate alignment.
4091 auto TInfo = getContext().getTypeInfoInChars(Ty);
4092 uint64_t TySize = TInfo.Width.getQuantity();
4093 CharUnits TyAlign = TInfo.Align;
4094
4095 // Copy into a temporary if the type is more aligned than the
4096 // register save area.
4097 if (TyAlign.getQuantity() > 8) {
4098 Address Tmp = CGF.CreateMemTemp(Ty);
4099 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
4100 RegAddr = Tmp;
4101 }
4102
4103 } else if (neededSSE == 1) {
4104 RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
4105 CharUnits::fromQuantity(16));
4106 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4107 } else {
4108 assert(neededSSE == 2 && "Invalid number of needed registers!")((void)0);
4109 // SSE registers are spaced 16 bytes apart in the register save
4110 // area, we need to collect the two eightbytes together.
4111 // The ABI isn't explicit about this, but it seems reasonable
4112 // to assume that the slots are 16-byte aligned, since the stack is
4113 // naturally 16-byte aligned and the prologue is expected to store
4114 // all the SSE registers to the RSA.
4115 Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
4116 fp_offset),
4117 CharUnits::fromQuantity(16));
4118 Address RegAddrHi =
4119 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
4120 CharUnits::fromQuantity(16));
4121 llvm::Type *ST = AI.canHaveCoerceToType()
4122 ? AI.getCoerceToType()
4123 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
4124 llvm::Value *V;
4125 Address Tmp = CGF.CreateMemTemp(Ty);
4126 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4127 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4128 RegAddrLo, ST->getStructElementType(0)));
4129 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4130 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4131 RegAddrHi, ST->getStructElementType(1)));
4132 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4133
4134 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4135 }
4136
4137 // AMD64-ABI 3.5.7p5: Step 5. Set:
4138 // l->gp_offset = l->gp_offset + num_gp * 8
4139 // l->fp_offset = l->fp_offset + num_fp * 16.
4140 if (neededInt) {
4141 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
4142 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
4143 gp_offset_p);
4144 }
4145 if (neededSSE) {
4146 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
4147 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
4148 fp_offset_p);
4149 }
4150 CGF.EmitBranch(ContBlock);
4151
4152 // Emit code to load the value if it was passed in memory.
4153
4154 CGF.EmitBlock(InMemBlock);
4155 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
4156
4157 // Return the appropriate result.
4158
4159 CGF.EmitBlock(ContBlock);
4160 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
4161 "vaarg.addr");
4162 return ResAddr;
4163}
4164
4165Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4166 QualType Ty) const {
4167 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4168 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4169 uint64_t Width = getContext().getTypeSize(Ty);
4170 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4171
4172 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4173 CGF.getContext().getTypeInfoInChars(Ty),
4174 CharUnits::fromQuantity(8),
4175 /*allowHigherAlign*/ false);
4176}
4177
4178ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
4179 QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {
4180 const Type *Base = nullptr;
4181 uint64_t NumElts = 0;
4182
4183 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
4184 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
4185 FreeSSERegs -= NumElts;
4186 return getDirectX86Hva();
4187 }
4188 return current;
4189}
4190
4191ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
4192 bool IsReturnType, bool IsVectorCall,
4193 bool IsRegCall) const {
4194
4195 if (Ty->isVoidType())
4196 return ABIArgInfo::getIgnore();
4197
4198 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4199 Ty = EnumTy->getDecl()->getIntegerType();
4200
4201 TypeInfo Info = getContext().getTypeInfo(Ty);
4202 uint64_t Width = Info.Width;
4203 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
4204
4205 const RecordType *RT = Ty->getAs<RecordType>();
4206 if (RT) {
4207 if (!IsReturnType) {
4208 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
4209 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4210 }
4211
4212 if (RT->getDecl()->hasFlexibleArrayMember())
4213 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4214
4215 }
4216
4217 const Type *Base = nullptr;
4218 uint64_t NumElts = 0;
4219 // vectorcall adds the concept of a homogenous vector aggregate, similar to
4220 // other targets.
4221 if ((IsVectorCall || IsRegCall) &&
4222 isHomogeneousAggregate(Ty, Base, NumElts)) {
4223 if (IsRegCall) {
4224 if (FreeSSERegs >= NumElts) {
4225 FreeSSERegs -= NumElts;
4226 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
4227 return ABIArgInfo::getDirect();
4228 return ABIArgInfo::getExpand();
4229 }
4230 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4231 } else if (IsVectorCall) {
4232 if (FreeSSERegs >= NumElts &&
4233 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
4234 FreeSSERegs -= NumElts;
4235 return ABIArgInfo::getDirect();
4236 } else if (IsReturnType) {
4237 return ABIArgInfo::getExpand();
4238 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
4239 // HVAs are delayed and reclassified in the 2nd step.
4240 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4241 }
4242 }
4243 }
4244
4245 if (Ty->isMemberPointerType()) {
4246 // If the member pointer is represented by an LLVM int or ptr, pass it
4247 // directly.
4248 llvm::Type *LLTy = CGT.ConvertType(Ty);
4249 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
4250 return ABIArgInfo::getDirect();
4251 }
4252
4253 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
4254 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4255 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4256 if (Width > 64 || !llvm::isPowerOf2_64(Width))
4257 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4258
4259 // Otherwise, coerce it to a small integer.
4260 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
4261 }
4262
4263 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4264 switch (BT->getKind()) {
4265 case BuiltinType::Bool:
4266 // Bool type is always extended to the ABI, other builtin types are not
4267 // extended.
4268 return ABIArgInfo::getExtend(Ty);
4269
4270 case BuiltinType::LongDouble:
4271 // Mingw64 GCC uses the old 80 bit extended precision floating point
4272 // unit. It passes them indirectly through memory.
4273 if (IsMingw64) {
4274 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4275 if (LDF == &llvm::APFloat::x87DoubleExtended())
4276 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4277 }
4278 break;
4279
4280 case BuiltinType::Int128:
4281 case BuiltinType::UInt128:
4282 // If it's a parameter type, the normal ABI rule is that arguments larger
4283 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
4284 // even though it isn't particularly efficient.
4285 if (!IsReturnType)
4286 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4287
4288 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4289 // Clang matches them for compatibility.
4290 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
4291 llvm::Type::getInt64Ty(getVMContext()), 2));
4292
4293 default:
4294 break;
4295 }
4296 }
4297
4298 if (Ty->isExtIntType()) {
4299 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4300 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4301 // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
4302 // anyway as long is it fits in them, so we don't have to check the power of
4303 // 2.
4304 if (Width <= 64)
4305 return ABIArgInfo::getDirect();
4306 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4307 }
4308
4309 return ABIArgInfo::getDirect();
4310}
4311
4312void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4313 const unsigned CC = FI.getCallingConvention();
4314 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4315 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4316
4317 // If __attribute__((sysv_abi)) is in use, use the SysV argument
4318 // classification rules.
4319 if (CC == llvm::CallingConv::X86_64_SysV) {
4320 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4321 SysVABIInfo.computeInfo(FI);
4322 return;
4323 }
4324
4325 unsigned FreeSSERegs = 0;
4326 if (IsVectorCall) {
4327 // We can use up to 4 SSE return registers with vectorcall.
4328 FreeSSERegs = 4;
4329 } else if (IsRegCall) {
4330 // RegCall gives us 16 SSE registers.
4331 FreeSSERegs = 16;
4332 }
4333
4334 if (!getCXXABI().classifyReturnType(FI))
4335 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4336 IsVectorCall, IsRegCall);
4337
4338 if (IsVectorCall) {
4339 // We can use up to 6 SSE register parameters with vectorcall.
4340 FreeSSERegs = 6;
4341 } else if (IsRegCall) {
4342 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4343 FreeSSERegs = 16;
4344 }
4345
4346 unsigned ArgNum = 0;
4347 unsigned ZeroSSERegs = 0;
4348 for (auto &I : FI.arguments()) {
4349 // Vectorcall in x64 only permits the first 6 arguments to be passed as
4350 // XMM/YMM registers. After the sixth argument, pretend no vector
4351 // registers are left.
4352 unsigned *MaybeFreeSSERegs =
4353 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
4354 I.info =
4355 classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
4356 ++ArgNum;
4357 }
4358
4359 if (IsVectorCall) {
4360 // For vectorcall, assign aggregate HVAs to any free vector registers in a
4361 // second pass.
4362 for (auto &I : FI.arguments())
4363 I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
4364 }
4365}
4366
4367Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4368 QualType Ty) const {
4369 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4370 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4371 uint64_t Width = getContext().getTypeSize(Ty);
4372 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4373
4374 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4375 CGF.getContext().getTypeInfoInChars(Ty),
4376 CharUnits::fromQuantity(8),
4377 /*allowHigherAlign*/ false);
4378}
4379
4380static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4381 llvm::Value *Address, bool Is64Bit,
4382 bool IsAIX) {
4383 // This is calculated from the LLVM and GCC tables and verified
4384 // against gcc output. AFAIK all PPC ABIs use the same encoding.
4385
4386 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4387
4388 llvm::IntegerType *i8 = CGF.Int8Ty;
4389 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4390 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4391 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4392
4393 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
4394 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
4395
4396 // 32-63: fp0-31, the 8-byte floating-point registers
4397 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4398
4399 // 64-67 are various 4-byte or 8-byte special-purpose registers:
4400 // 64: mq
4401 // 65: lr
4402 // 66: ctr
4403 // 67: ap
4404 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
4405
4406 // 68-76 are various 4-byte special-purpose registers:
4407 // 68-75 cr0-7
4408 // 76: xer
4409 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4410
4411 // 77-108: v0-31, the 16-byte vector registers
4412 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4413
4414 // 109: vrsave
4415 // 110: vscr
4416 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
4417
4418 // AIX does not utilize the rest of the registers.
4419 if (IsAIX)
4420 return false;
4421
4422 // 111: spe_acc
4423 // 112: spefscr
4424 // 113: sfp
4425 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
4426
4427 if (!Is64Bit)
4428 return false;
4429
4430 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
4431 // or above CPU.
4432 // 64-bit only registers:
4433 // 114: tfhar
4434 // 115: tfiar
4435 // 116: texasr
4436 AssignToArrayRange(Builder, Address, Eight8, 114, 116);
4437
4438 return false;
4439}
4440
4441// AIX
4442namespace {
4443/// AIXABIInfo - The AIX XCOFF ABI information.
4444class AIXABIInfo : public ABIInfo {
4445 const bool Is64Bit;
4446 const unsigned PtrByteSize;
4447 CharUnits getParamTypeAlignment(QualType Ty) const;
4448
4449public:
4450 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4451 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
4452
4453 bool isPromotableTypeForABI(QualType Ty) const;
4454
4455 ABIArgInfo classifyReturnType(QualType RetTy) const;
4456 ABIArgInfo classifyArgumentType(QualType Ty) const;
4457
4458 void computeInfo(CGFunctionInfo &FI) const override {
4459 if (!getCXXABI().classifyReturnType(FI))
4460 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4461
4462 for (auto &I : FI.arguments())
4463 I.info = classifyArgumentType(I.type);
4464 }
4465
4466 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4467 QualType Ty) const override;
4468};
4469
4470class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
4471 const bool Is64Bit;
4472
4473public:
4474 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4475 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
4476 Is64Bit(Is64Bit) {}
4477 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4478 return 1; // r1 is the dedicated stack pointer
4479 }
4480
4481 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4482 llvm::Value *Address) const override;
4483};
4484} // namespace
4485
4486// Return true if the ABI requires Ty to be passed sign- or zero-
4487// extended to 32/64 bits.
4488bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
4489 // Treat an enum type as its underlying type.
4490 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4491 Ty = EnumTy->getDecl()->getIntegerType();
4492
4493 // Promotable integer types are required to be promoted by the ABI.
4494 if (Ty->isPromotableIntegerType())
4495 return true;
4496
4497 if (!Is64Bit)
4498 return false;
4499
4500 // For 64 bit mode, in addition to the usual promotable integer types, we also
4501 // need to extend all 32-bit types, since the ABI requires promotion to 64
4502 // bits.
4503 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4504 switch (BT->getKind()) {
4505 case BuiltinType::Int:
4506 case BuiltinType::UInt:
4507 return true;
4508 default:
4509 break;
4510 }
4511
4512 return false;
4513}
4514
4515ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
4516 if (RetTy->isAnyComplexType())
4517 return ABIArgInfo::getDirect();
4518
4519 if (RetTy->isVectorType())
4520 return ABIArgInfo::getDirect();
4521
4522 if (RetTy->isVoidType())
4523 return ABIArgInfo::getIgnore();
4524
4525 if (isAggregateTypeForABI(RetTy))
4526 return getNaturalAlignIndirect(RetTy);
4527
4528 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4529 : ABIArgInfo::getDirect());
4530}
4531
4532ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
4533 Ty = useFirstFieldIfTransparentUnion(Ty);
4534
4535 if (Ty->isAnyComplexType())
4536 return ABIArgInfo::getDirect();
4537
4538 if (Ty->isVectorType())
4539 return ABIArgInfo::getDirect();
4540
4541 if (isAggregateTypeForABI(Ty)) {
4542 // Records with non-trivial destructors/copy-constructors should not be
4543 // passed by value.
4544 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4545 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4546
4547 CharUnits CCAlign = getParamTypeAlignment(Ty);
4548 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
4549
4550 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
4551 /*Realign*/ TyAlign > CCAlign);
4552 }
4553
4554 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4555 : ABIArgInfo::getDirect());
4556}
4557
4558CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
4559 // Complex types are passed just like their elements.
4560 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4561 Ty = CTy->getElementType();
4562
4563 if (Ty->isVectorType())
4564 return CharUnits::fromQuantity(16);
4565
4566 // If the structure contains a vector type, the alignment is 16.
4567 if (isRecordWithSIMDVectorType(getContext(), Ty))
4568 return CharUnits::fromQuantity(16);
4569
4570 return CharUnits::fromQuantity(PtrByteSize);
4571}
4572
4573Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4574 QualType Ty) const {
4575 if (Ty->isAnyComplexType())
4576 llvm::report_fatal_error("complex type is not supported on AIX yet");
4577
4578 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4579 TypeInfo.Align = getParamTypeAlignment(Ty);
4580
4581 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
4582
4583 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
4584 SlotSize, /*AllowHigher*/ true);
4585}
4586
4587bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
4588 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
4589 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
4590}
4591
4592// PowerPC-32
4593namespace {
4594/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4595class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4596 bool IsSoftFloatABI;
4597 bool IsRetSmallStructInRegABI;
4598
4599 CharUnits getParamTypeAlignment(QualType Ty) const;
4600
4601public:
4602 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
4603 bool RetSmallStructInRegABI)
4604 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
4605 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
4606
4607 ABIArgInfo classifyReturnType(QualType RetTy) const;
4608
4609 void computeInfo(CGFunctionInfo &FI) const override {
4610 if (!getCXXABI().classifyReturnType(FI))
4611 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4612 for (auto &I : FI.arguments())
4613 I.info = classifyArgumentType(I.type);
4614 }
4615
4616 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4617 QualType Ty) const override;
4618};
4619
4620class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4621public:
4622 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
4623 bool RetSmallStructInRegABI)
4624 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
4625 CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
4626
4627 static bool isStructReturnInRegABI(const llvm::Triple &Triple,
4628 const CodeGenOptions &Opts);
4629
4630 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4631 // This is recovered from gcc output.
4632 return 1; // r1 is the dedicated stack pointer
4633 }
4634
4635 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4636 llvm::Value *Address) const override;
4637};
4638}
4639
4640CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4641 // Complex types are passed just like their elements.
4642 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4643 Ty = CTy->getElementType();
4644
4645 if (Ty->isVectorType())
4646 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4647 : 4);
4648
4649 // For single-element float/vector structs, we consider the whole type
4650 // to have the same alignment requirements as its single element.
4651 const Type *AlignTy = nullptr;
4652 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4653 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4654 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4655 (BT && BT->isFloatingPoint()))
4656 AlignTy = EltType;
4657 }
4658
4659 if (AlignTy)
4660 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4661 return CharUnits::fromQuantity(4);
4662}
4663
4664ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4665 uint64_t Size;
4666
4667 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
4668 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
4669 (Size = getContext().getTypeSize(RetTy)) <= 64) {
4670 // System V ABI (1995), page 3-22, specified:
4671 // > A structure or union whose size is less than or equal to 8 bytes
4672 // > shall be returned in r3 and r4, as if it were first stored in the
4673 // > 8-byte aligned memory area and then the low addressed word were
4674 // > loaded into r3 and the high-addressed word into r4. Bits beyond
4675 // > the last member of the structure or union are not defined.
4676 //
4677 // GCC for big-endian PPC32 inserts the pad before the first member,
4678 // not "beyond the last member" of the struct. To stay compatible
4679 // with GCC, we coerce the struct to an integer of the same size.
4680 // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
4681 if (Size == 0)
4682 return ABIArgInfo::getIgnore();
4683 else {
4684 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
4685 return ABIArgInfo::getDirect(CoerceTy);
4686 }
4687 }
4688
4689 return DefaultABIInfo::classifyReturnType(RetTy);
4690}
4691
4692// TODO: this implementation is now likely redundant with
4693// DefaultABIInfo::EmitVAArg.
4694Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4695 QualType Ty) const {
4696 if (getTarget().getTriple().isOSDarwin()) {
4697 auto TI = getContext().getTypeInfoInChars(Ty);
4698 TI.Align = getParamTypeAlignment(Ty);
4699
4700 CharUnits SlotSize = CharUnits::fromQuantity(4);
4701 return emitVoidPtrVAArg(CGF, VAList, Ty,
4702 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4703 /*AllowHigherAlign=*/true);
4704 }
4705
4706 const unsigned OverflowLimit = 8;
4707 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4708 // TODO: Implement this. For now ignore.
4709 (void)CTy;
4710 return Address::invalid(); // FIXME?
4711 }
4712
4713 // struct __va_list_tag {
4714 // unsigned char gpr;
4715 // unsigned char fpr;
4716 // unsigned short reserved;
4717 // void *overflow_arg_area;
4718 // void *reg_save_area;
4719 // };
4720
4721 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4722 bool isInt = !Ty->isFloatingType();
4723 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4724
4725 // All aggregates are passed indirectly? That doesn't seem consistent
4726 // with the argument-lowering code.
4727 bool isIndirect = isAggregateTypeForABI(Ty);
4728
4729 CGBuilderTy &Builder = CGF.Builder;
4730
4731 // The calling convention either uses 1-2 GPRs or 1 FPR.
4732 Address NumRegsAddr = Address::invalid();
4733 if (isInt || IsSoftFloatABI) {
4734 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4735 } else {
4736 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4737 }
4738
4739 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4740
4741 // "Align" the register count when TY is i64.
4742 if (isI64 || (isF64 && IsSoftFloatABI)) {
4743 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4744 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4745 }
4746
4747 llvm::Value *CC =
4748 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4749
4750 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4751 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4752 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4753
4754 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4755
4756 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4757 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4758
4759 // Case 1: consume registers.
4760 Address RegAddr = Address::invalid();
4761 {
4762 CGF.EmitBlock(UsingRegs);
4763
4764 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4765 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4766 CharUnits::fromQuantity(8));
4767 assert(RegAddr.getElementType() == CGF.Int8Ty)((void)0);
4768
4769 // Floating-point registers start after the general-purpose registers.
4770 if (!(isInt || IsSoftFloatABI)) {
4771 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4772 CharUnits::fromQuantity(32));
4773 }
4774
4775 // Get the address of the saved value by scaling the number of
4776 // registers we've used by the number of
4777 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4778 llvm::Value *RegOffset =
4779 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4780 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4781 RegAddr.getPointer(), RegOffset),
4782 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4783 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4784
4785 // Increase the used-register count.
4786 NumRegs =
4787 Builder.CreateAdd(NumRegs,
4788 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4789 Builder.CreateStore(NumRegs, NumRegsAddr);
4790
4791 CGF.EmitBranch(Cont);
4792 }
4793
4794 // Case 2: consume space in the overflow area.
4795 Address MemAddr = Address::invalid();
4796 {
4797 CGF.EmitBlock(UsingOverflow);
4798
4799 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4800
4801 // Everything in the overflow area is rounded up to a size of at least 4.
4802 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4803
4804 CharUnits Size;
4805 if (!isIndirect) {
4806 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4807 Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
4808 } else {
4809 Size = CGF.getPointerSize();
4810 }
4811
4812 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4813 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4814 OverflowAreaAlign);
4815 // Round up address of argument to alignment
4816 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4817 if (Align > OverflowAreaAlign) {
4818 llvm::Value *Ptr = OverflowArea.getPointer();
4819 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4820 Align);
4821 }
4822
4823 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4824
4825 // Increase the overflow area.
4826 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4827 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4828 CGF.EmitBranch(Cont);
4829 }
4830
4831 CGF.EmitBlock(Cont);
4832
4833 // Merge the cases with a phi.
4834 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4835 "vaarg.addr");
4836
4837 // Load the pointer if the argument was passed indirectly.
4838 if (isIndirect) {
4839 Result = Address(Builder.CreateLoad(Result, "aggr"),
4840 getContext().getTypeAlignInChars(Ty));
4841 }
4842
4843 return Result;
4844}
4845
4846bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
4847 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4848 assert(Triple.isPPC32())((void)0);
4849
4850 switch (Opts.getStructReturnConvention()) {
4851 case CodeGenOptions::SRCK_Default:
4852 break;
4853 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
4854 return false;
4855 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
4856 return true;
4857 }
4858
4859 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
4860 return true;
4861
4862 return false;
4863}
4864
4865bool
4866PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4867 llvm::Value *Address) const {
4868 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
4869 /*IsAIX*/ false);
4870}
4871
4872// PowerPC-64
4873
4874namespace {
4875/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4876class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4877public:
4878 enum ABIKind {
4879 ELFv1 = 0,
4880 ELFv2
4881 };
4882
4883private:
4884 static const unsigned GPRBits = 64;
4885 ABIKind Kind;
4886 bool IsSoftFloatABI;
4887
4888public:
4889 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind,
4890 bool SoftFloatABI)
4891 : SwiftABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
4892
4893 bool isPromotableTypeForABI(QualType Ty) const;
4894 CharUnits getParamTypeAlignment(QualType Ty) const;
4895
4896 ABIArgInfo classifyReturnType(QualType RetTy) const;
4897 ABIArgInfo classifyArgumentType(QualType Ty) const;
4898
4899 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4900 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4901 uint64_t Members) const override;
4902
4903 // TODO: We can add more logic to computeInfo to improve performance.
4904 // Example: For aggregate arguments that fit in a register, we could
4905 // use getDirectInReg (as is done below for structs containing a single
4906 // floating-point value) to avoid pushing them to memory on function
4907 // entry. This would require changing the logic in PPCISelLowering
4908 // when lowering the parameters in the caller and args in the callee.
4909 void computeInfo(CGFunctionInfo &FI) const override {
4910 if (!getCXXABI().classifyReturnType(FI))
4911 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4912 for (auto &I : FI.arguments()) {
4913 // We rely on the default argument classification for the most part.
4914 // One exception: An aggregate containing a single floating-point
4915 // or vector item must be passed in a register if one is available.
4916 const Type *T = isSingleElementStruct(I.type, getContext());
4917 if (T) {
4918 const BuiltinType *BT = T->getAs<BuiltinType>();
4919 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4920 (BT && BT->isFloatingPoint())) {
4921 QualType QT(T, 0);
4922 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4923 continue;
4924 }
4925 }
4926 I.info = classifyArgumentType(I.type);
4927 }
4928 }
4929
4930 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4931 QualType Ty) const override;
4932
4933 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4934 bool asReturnValue) const override {
4935 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4936 }
4937
4938 bool isSwiftErrorInRegister() const override {
4939 return false;
4940 }
4941};
4942
4943class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4944
4945public:
4946 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4947 PPC64_SVR4_ABIInfo::ABIKind Kind,
4948 bool SoftFloatABI)
4949 : TargetCodeGenInfo(
4950 std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {}
4951
4952 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4953 // This is recovered from gcc output.
4954 return 1; // r1 is the dedicated stack pointer
4955 }
4956
4957 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4958 llvm::Value *Address) const override;
4959};
4960
4961class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4962public:
4963 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4964
4965 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4966 // This is recovered from gcc output.
4967 return 1; // r1 is the dedicated stack pointer
4968 }
4969
4970 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4971 llvm::Value *Address) const override;
4972};
4973
4974}
4975
4976// Return true if the ABI requires Ty to be passed sign- or zero-
4977// extended to 64 bits.
4978bool
4979PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4980 // Treat an enum type as its underlying type.
4981 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4982 Ty = EnumTy->getDecl()->getIntegerType();
4983
4984 // Promotable integer types are required to be promoted by the ABI.
4985 if (isPromotableIntegerTypeForABI(Ty))
4986 return true;
4987
4988 // In addition to the usual promotable integer types, we also need to
4989 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4990 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4991 switch (BT->getKind()) {
4992 case BuiltinType::Int:
4993 case BuiltinType::UInt:
4994 return true;
4995 default:
4996 break;
4997 }
4998
4999 if (const auto *EIT = Ty->getAs<ExtIntType>())
5000 if (EIT->getNumBits() < 64)
5001 return true;
5002
5003 return false;
5004}
5005
5006/// isAlignedParamType - Determine whether a type requires 16-byte or
5007/// higher alignment in the parameter area. Always returns at least 8.
5008CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
5009 // Complex types are passed just like their elements.
5010 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
5011 Ty = CTy->getElementType();
5012
5013 // Only vector types of size 16 bytes need alignment (larger types are
5014 // passed via reference, smaller types are not aligned).
5015 if (Ty->isVectorType()) {
5016 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
5017 } else if (Ty->isRealFloatingType() &&
5018 &getContext().getFloatTypeSemantics(Ty) ==
5019 &llvm::APFloat::IEEEquad()) {
5020 // According to ABI document section 'Optional Save Areas': If extended
5021 // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
5022 // format are supported, map them to a single quadword, quadword aligned.
5023 return CharUnits::fromQuantity(16);
5024 }
5025
5026 // For single-element float/vector structs, we consider the whole type
5027 // to have the same alignment requirements as its single element.
5028 const Type *AlignAsType = nullptr;
5029 const Type *EltType = isSingleElementStruct(Ty, getContext());
5030 if (EltType) {
5031 const BuiltinType *BT = EltType->getAs<BuiltinType>();
5032 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
5033 (BT && BT->isFloatingPoint()))
5034 AlignAsType = EltType;
5035 }
5036
5037 // Likewise for ELFv2 homogeneous aggregates.
5038 const Type *Base = nullptr;
5039 uint64_t Members = 0;
5040 if (!AlignAsType && Kind == ELFv2 &&
5041 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
5042 AlignAsType = Base;
5043
5044 // With special case aggregates, only vector base types need alignment.
5045 if (AlignAsType) {
5046 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
5047 }
5048
5049 // Otherwise, we only need alignment for any aggregate type that
5050 // has an alignment requirement of >= 16 bytes.
5051 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
5052 return CharUnits::fromQuantity(16);
5053 }
5054
5055 return CharUnits::fromQuantity(8);
5056}
5057
5058/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
5059/// aggregate. Base is set to the base element type, and Members is set
5060/// to the number of base elements.
5061bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
5062 uint64_t &Members) const {
5063 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
5064 uint64_t NElements = AT->getSize().getZExtValue();
5065 if (NElements == 0)
5066 return false;
5067 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
5068 return false;
5069 Members *= NElements;
5070 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
5071 const RecordDecl *RD = RT->getDecl();
5072 if (RD->hasFlexibleArrayMember())
5073 return false;
5074
5075 Members = 0;
5076
5077 // If this is a C++ record, check the properties of the record such as
5078 // bases and ABI specific restrictions
5079 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
5080 if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
5081 return false;
5082
5083 for (const auto &I : CXXRD->bases()) {
5084 // Ignore empty records.
5085 if (isEmptyRecord(getContext(), I.getType(), true))
5086 continue;
5087
5088 uint64_t FldMembers;
5089 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
5090 return false;
5091
5092 Members += FldMembers;
5093 }
5094 }
5095
5096 for (const auto *FD : RD->fields()) {
5097 // Ignore (non-zero arrays of) empty records.
5098 QualType FT = FD->getType();
5099 while (const ConstantArrayType *AT =
5100 getContext().getAsConstantArrayType(FT)) {
5101 if (AT->getSize().getZExtValue() == 0)
5102 return false;
5103 FT = AT->getElementType();
5104 }
5105 if (isEmptyRecord(getContext(), FT, true))
5106 continue;
5107
5108 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5109 if (getContext().getLangOpts().CPlusPlus &&
5110 FD->isZeroLengthBitField(getContext()))
5111 continue;
5112
5113 uint64_t FldMembers;
5114 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
5115 return false;
5116
5117 Members = (RD->isUnion() ?
5118 std::max(Members, FldMembers) : Members + FldMembers);
5119 }
5120
5121 if (!Base)
5122 return false;
5123
5124 // Ensure there is no padding.
5125 if (getContext().getTypeSize(Base) * Members !=
5126 getContext().getTypeSize(Ty))
5127 return false;
5128 } else {
5129 Members = 1;
5130 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
5131 Members = 2;
5132 Ty = CT->getElementType();
5133 }
5134
5135 // Most ABIs only support float, double, and some vector type widths.
5136 if (!isHomogeneousAggregateBaseType(Ty))
5137 return false;
5138
5139 // The base type must be the same for all members. Types that
5140 // agree in both total size and mode (float vs. vector) are
5141 // treated as being equivalent here.
5142 const Type *TyPtr = Ty.getTypePtr();
5143 if (!Base) {
5144 Base = TyPtr;
5145 // If it's a non-power-of-2 vector, its size is already a power-of-2,
5146 // so make sure to widen it explicitly.
5147 if (const VectorType *VT = Base->getAs<VectorType>()) {
5148 QualType EltTy = VT->getElementType();
5149 unsigned NumElements =
5150 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
5151 Base = getContext()
5152 .getVectorType(EltTy, NumElements, VT->getVectorKind())
5153 .getTypePtr();
5154 }
5155 }
5156
5157 if (Base->isVectorType() != TyPtr->isVectorType() ||
5158 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
5159 return false;
5160 }
5161 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
5162}
5163
5164bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5165 // Homogeneous aggregates for ELFv2 must have base types of float,
5166 // double, long double, or 128-bit vectors.
5167 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5168 if (BT->getKind() == BuiltinType::Float ||
5169 BT->getKind() == BuiltinType::Double ||
5170 BT->getKind() == BuiltinType::LongDouble ||
5171 (getContext().getTargetInfo().hasFloat128Type() &&
5172 (BT->getKind() == BuiltinType::Float128))) {
5173 if (IsSoftFloatABI)
5174 return false;
5175 return true;
5176 }
5177 }
5178 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5179 if (getContext().getTypeSize(VT) == 128)
5180 return true;
5181 }
5182 return false;
5183}
5184
5185bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
5186 const Type *Base, uint64_t Members) const {
5187 // Vector and fp128 types require one register, other floating point types
5188 // require one or two registers depending on their size.
5189 uint32_t NumRegs =
5190 ((getContext().getTargetInfo().hasFloat128Type() &&
5191 Base->isFloat128Type()) ||
5192 Base->isVectorType()) ? 1
5193 : (getContext().getTypeSize(Base) + 63) / 64;
5194
5195 // Homogeneous Aggregates may occupy at most 8 registers.
5196 return Members * NumRegs <= 8;
5197}
5198
5199ABIArgInfo
5200PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
5201 Ty = useFirstFieldIfTransparentUnion(Ty);
5202
5203 if (Ty->isAnyComplexType())
5204 return ABIArgInfo::getDirect();
5205
5206 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
5207 // or via reference (larger than 16 bytes).
5208 if (Ty->isVectorType()) {
5209 uint64_t Size = getContext().getTypeSize(Ty);
5210 if (Size > 128)
5211 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5212 else if (Size < 128) {
5213 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5214 return ABIArgInfo::getDirect(CoerceTy);
5215 }
5216 }
5217
5218 if (const auto *EIT = Ty->getAs<ExtIntType>())
5219 if (EIT->getNumBits() > 128)
5220 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
5221
5222 if (isAggregateTypeForABI(Ty)) {
5223 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5224 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5225
5226 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
5227 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5228
5229 // ELFv2 homogeneous aggregates are passed as array types.
5230 const Type *Base = nullptr;
5231 uint64_t Members = 0;
5232 if (Kind == ELFv2 &&
5233 isHomogeneousAggregate(Ty, Base, Members)) {
5234 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5235 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5236 return ABIArgInfo::getDirect(CoerceTy);
5237 }
5238
5239 // If an aggregate may end up fully in registers, we do not
5240 // use the ByVal method, but pass the aggregate as array.
5241 // This is usually beneficial since we avoid forcing the
5242 // back-end to store the argument to memory.
5243 uint64_t Bits = getContext().getTypeSize(Ty);
5244 if (Bits > 0 && Bits <= 8 * GPRBits) {
5245 llvm::Type *CoerceTy;
5246
5247 // Types up to 8 bytes are passed as integer type (which will be
5248 // properly aligned in the argument save area doubleword).
5249 if (Bits <= GPRBits)
5250 CoerceTy =
5251 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5252 // Larger types are passed as arrays, with the base type selected
5253 // according to the required alignment in the save area.
5254 else {
5255 uint64_t RegBits = ABIAlign * 8;
5256 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
5257 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
5258 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
5259 }
5260
5261 return ABIArgInfo::getDirect(CoerceTy);
5262 }
5263
5264 // All other aggregates are passed ByVal.
5265 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5266 /*ByVal=*/true,
5267 /*Realign=*/TyAlign > ABIAlign);
5268 }
5269
5270 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
5271 : ABIArgInfo::getDirect());
5272}
5273
5274ABIArgInfo
5275PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
5276 if (RetTy->isVoidType())
5277 return ABIArgInfo::getIgnore();
5278
5279 if (RetTy->isAnyComplexType())
5280 return ABIArgInfo::getDirect();
5281
5282 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
5283 // or via reference (larger than 16 bytes).
5284 if (RetTy->isVectorType()) {
5285 uint64_t Size = getContext().getTypeSize(RetTy);
5286 if (Size > 128)
5287 return getNaturalAlignIndirect(RetTy);
5288 else if (Size < 128) {
5289 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5290 return ABIArgInfo::getDirect(CoerceTy);
5291 }
5292 }
5293
5294 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5295 if (EIT->getNumBits() > 128)
5296 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
5297
5298 if (isAggregateTypeForABI(RetTy)) {
5299 // ELFv2 homogeneous aggregates are returned as array types.
5300 const Type *Base = nullptr;
5301 uint64_t Members = 0;
5302 if (Kind == ELFv2 &&
5303 isHomogeneousAggregate(RetTy, Base, Members)) {
5304 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5305 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5306 return ABIArgInfo::getDirect(CoerceTy);
5307 }
5308
5309 // ELFv2 small aggregates are returned in up to two registers.
5310 uint64_t Bits = getContext().getTypeSize(RetTy);
5311 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
5312 if (Bits == 0)
5313 return ABIArgInfo::getIgnore();
5314
5315 llvm::Type *CoerceTy;
5316 if (Bits > GPRBits) {
5317 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
5318 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
5319 } else
5320 CoerceTy =
5321 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5322 return ABIArgInfo::getDirect(CoerceTy);
5323 }
5324
5325 // All other aggregates are returned indirectly.
5326 return getNaturalAlignIndirect(RetTy);
5327 }
5328
5329 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
5330 : ABIArgInfo::getDirect());
5331}
5332
5333// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
5334Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5335 QualType Ty) const {
5336 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
5337 TypeInfo.Align = getParamTypeAlignment(Ty);
5338
5339 CharUnits SlotSize = CharUnits::fromQuantity(8);
5340
5341 // If we have a complex type and the base type is smaller than 8 bytes,
5342 // the ABI calls for the real and imaginary parts to be right-adjusted
5343 // in separate doublewords. However, Clang expects us to produce a
5344 // pointer to a structure with the two parts packed tightly. So generate
5345 // loads of the real and imaginary parts relative to the va_list pointer,
5346 // and store them to a temporary structure.
5347 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
5348 CharUnits EltSize = TypeInfo.Width / 2;
5349 if (EltSize < SlotSize) {
5350 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
5351 SlotSize * 2, SlotSize,
5352 SlotSize, /*AllowHigher*/ true);
5353
5354 Address RealAddr = Addr;
5355 Address ImagAddr = RealAddr;
5356 if (CGF.CGM.getDataLayout().isBigEndian()) {
5357 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
5358 SlotSize - EltSize);
5359 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
5360 2 * SlotSize - EltSize);
5361 } else {
5362 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
5363 }
5364
5365 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
5366 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
5367 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
5368 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
5369 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
5370
5371 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
5372 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
5373 /*init*/ true);
5374 return Temp;
5375 }
5376 }
5377
5378 // Otherwise, just use the general rule.
5379 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
5380 TypeInfo, SlotSize, /*AllowHigher*/ true);
5381}
5382
5383bool
5384PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
5385 CodeGen::CodeGenFunction &CGF,
5386 llvm::Value *Address) const {
5387 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5388 /*IsAIX*/ false);
5389}
5390
5391bool
5392PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5393 llvm::Value *Address) const {
5394 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5395 /*IsAIX*/ false);
5396}
5397
5398//===----------------------------------------------------------------------===//
5399// AArch64 ABI Implementation
5400//===----------------------------------------------------------------------===//
5401
5402namespace {
5403
5404class AArch64ABIInfo : public SwiftABIInfo {
5405public:
5406 enum ABIKind {
5407 AAPCS = 0,
5408 DarwinPCS,
5409 Win64
5410 };
5411
5412private:
5413 ABIKind Kind;
5414
5415public:
5416 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
5417 : SwiftABIInfo(CGT), Kind(Kind) {}
5418
5419private:
5420 ABIKind getABIKind() const { return Kind; }
5421 bool isDarwinPCS() const { return Kind == DarwinPCS; }
5422
5423 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
5424 ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
5425 unsigned CallingConvention) const;
5426 ABIArgInfo coerceIllegalVector(QualType Ty) const;
5427 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5428 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5429 uint64_t Members) const override;
5430
5431 bool isIllegalVectorType(QualType Ty) const;
5432
5433 void computeInfo(CGFunctionInfo &FI) const override {
5434 if (!::classifyReturnType(getCXXABI(), FI, *this))
5435 FI.getReturnInfo() =
5436 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5437
5438 for (auto &it : FI.arguments())
5439 it.info = classifyArgumentType(it.type, FI.isVariadic(),
5440 FI.getCallingConvention());
5441 }
5442
5443 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5444 CodeGenFunction &CGF) const;
5445
5446 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5447 CodeGenFunction &CGF) const;
5448
5449 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5450 QualType Ty) const override {
5451 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5452 if (isa<llvm::ScalableVectorType>(BaseTy))
5453 llvm::report_fatal_error("Passing SVE types to variadic functions is "
5454 "currently not supported");
5455
5456 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5457 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5458 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5459 }
5460
5461 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5462 QualType Ty) const override;
5463
5464 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5465 bool asReturnValue) const override {
5466 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5467 }
5468 bool isSwiftErrorInRegister() const override {
5469 return true;
5470 }
5471
5472 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5473 unsigned elts) const override;
5474
5475 bool allowBFloatArgsAndRet() const override {
5476 return getTarget().hasBFloat16Type();
5477 }
5478};
5479
5480class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5481public:
5482 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5483 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
5484
5485 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5486 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5487 }
5488
5489 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5490 return 31;
5491 }
5492
5493 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5494
5495 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5496 CodeGen::CodeGenModule &CGM) const override {
5497 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5498 if (!FD)
5499 return;
5500
5501 const auto *TA = FD->getAttr<TargetAttr>();
5502 if (TA == nullptr)
5503 return;
5504
5505 ParsedTargetAttr Attr = TA->parse();
5506 if (Attr.BranchProtection.empty())
5507 return;
5508
5509 TargetInfo::BranchProtectionInfo BPI;
5510 StringRef Error;
5511 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
5512 BPI, Error);
5513 assert(Error.empty())((void)0);
5514
5515 auto *Fn = cast<llvm::Function>(GV);
5516 static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
5517 Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
5518
5519 if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
5520 Fn->addFnAttr("sign-return-address-key",
5521 BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
5522 ? "a_key"
5523 : "b_key");
5524 }
5525
5526 Fn->addFnAttr("branch-target-enforcement",
5527 BPI.BranchTargetEnforcement ? "true" : "false");
5528 }
5529
5530 bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
5531 llvm::Type *Ty) const override {
5532 if (CGF.getTarget().hasFeature("ls64")) {
5533 auto *ST = dyn_cast<llvm::StructType>(Ty);
5534 if (ST && ST->getNumElements() == 1) {
5535 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
5536 if (AT && AT->getNumElements() == 8 &&
5537 AT->getElementType()->isIntegerTy(64))
5538 return true;
5539 }
5540 }
5541 return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
5542 }
5543};
5544
5545class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5546public:
5547 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5548 : AArch64TargetCodeGenInfo(CGT, K) {}
5549
5550 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5551 CodeGen::CodeGenModule &CGM) const override;
5552
5553 void getDependentLibraryOption(llvm::StringRef Lib,
5554 llvm::SmallString<24> &Opt) const override {
5555 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5556 }
5557
5558 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5559 llvm::SmallString<32> &Opt) const override {
5560 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5561 }
5562};
5563
5564void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5565 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5566 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5567 if (GV->isDeclaration())
5568 return;
5569 addStackProbeTargetAttributes(D, GV, CGM);
5570}
5571}
5572
5573ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
5574 assert(Ty->isVectorType() && "expected vector type!")((void)0);
5575
5576 const auto *VT = Ty->castAs<VectorType>();
5577 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
5578 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!")((void)0);
5579 assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==((void)0)
5580 BuiltinType::UChar &&((void)0)
5581 "unexpected builtin type for SVE predicate!")((void)0);
5582 return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
5583 llvm::Type::getInt1Ty(getVMContext()), 16));
5584 }
5585
5586 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
5587 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!")((void)0);
5588
5589 const auto *BT = VT->getElementType()->castAs<BuiltinType>();
5590 llvm::ScalableVectorType *ResType = nullptr;
5591 switch (BT->getKind()) {
5592 default:
5593 llvm_unreachable("unexpected builtin type for SVE vector!")__builtin_unreachable();
5594 case BuiltinType::SChar:
5595 case BuiltinType::UChar:
5596 ResType = llvm::ScalableVectorType::get(
5597 llvm::Type::getInt8Ty(getVMContext()), 16);
5598 break;
5599 case BuiltinType::Short:
5600 case BuiltinType::UShort:
5601 ResType = llvm::ScalableVectorType::get(
5602 llvm::Type::getInt16Ty(getVMContext()), 8);
5603 break;
5604 case BuiltinType::Int:
5605 case BuiltinType::UInt:
5606 ResType = llvm::ScalableVectorType::get(
5607 llvm::Type::getInt32Ty(getVMContext()), 4);
5608 break;
5609 case BuiltinType::Long:
5610 case BuiltinType::ULong:
5611 ResType = llvm::ScalableVectorType::get(
5612 llvm::Type::getInt64Ty(getVMContext()), 2);
5613 break;
5614 case BuiltinType::Half:
5615 ResType = llvm::ScalableVectorType::get(
5616 llvm::Type::getHalfTy(getVMContext()), 8);
5617 break;
5618 case BuiltinType::Float:
5619 ResType = llvm::ScalableVectorType::get(
5620 llvm::Type::getFloatTy(getVMContext()), 4);
5621 break;
5622 case BuiltinType::Double:
5623 ResType = llvm::ScalableVectorType::get(
5624 llvm::Type::getDoubleTy(getVMContext()), 2);
5625 break;
5626 case BuiltinType::BFloat16:
5627 ResType = llvm::ScalableVectorType::get(
5628 llvm::Type::getBFloatTy(getVMContext()), 8);
5629 break;
5630 }
5631 return ABIArgInfo::getDirect(ResType);
5632 }
5633
5634 uint64_t Size = getContext().getTypeSize(Ty);
5635 // Android promotes <2 x i8> to i16, not i32
5636 if (isAndroid() && (Size <= 16)) {
5637 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5638 return ABIArgInfo::getDirect(ResType);
5639 }
5640 if (Size <= 32) {
5641 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5642 return ABIArgInfo::getDirect(ResType);
5643 }
5644 if (Size == 64) {
5645 auto *ResType =
5646 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5647 return ABIArgInfo::getDirect(ResType);
5648 }
5649 if (Size == 128) {
5650 auto *ResType =
5651 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5652 return ABIArgInfo::getDirect(ResType);
5653 }
5654 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5655}
5656
5657ABIArgInfo
5658AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
5659 unsigned CallingConvention) const {
5660 Ty = useFirstFieldIfTransparentUnion(Ty);
5661
5662 // Handle illegal vector types here.
5663 if (isIllegalVectorType(Ty))
5664 return coerceIllegalVector(Ty);
5665
5666 if (!isAggregateTypeForABI(Ty)) {
5667 // Treat an enum type as its underlying type.
5668 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5669 Ty = EnumTy->getDecl()->getIntegerType();
5670
5671 if (const auto *EIT = Ty->getAs<ExtIntType>())
5672 if (EIT->getNumBits() > 128)
5673 return getNaturalAlignIndirect(Ty);
5674
5675 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
5676 ? ABIArgInfo::getExtend(Ty)
5677 : ABIArgInfo::getDirect());
5678 }
5679
5680 // Structures with either a non-trivial destructor or a non-trivial
5681 // copy constructor are always indirect.
5682 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5683 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5684 CGCXXABI::RAA_DirectInMemory);
5685 }
5686
5687 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5688 // elsewhere for GNU compatibility.
5689 uint64_t Size = getContext().getTypeSize(Ty);
5690 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5691 if (IsEmpty || Size == 0) {
5692 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5693 return ABIArgInfo::getIgnore();
5694
5695 // GNU C mode. The only argument that gets ignored is an empty one with size
5696 // 0.
5697 if (IsEmpty && Size == 0)
5698 return ABIArgInfo::getIgnore();
5699 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5700 }
5701
5702 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5703 const Type *Base = nullptr;
5704 uint64_t Members = 0;
5705 bool IsWin64 = Kind == Win64 || CallingConvention == llvm::CallingConv::Win64;
5706 bool IsWinVariadic = IsWin64 && IsVariadic;
5707 // In variadic functions on Windows, all composite types are treated alike,
5708 // no special handling of HFAs/HVAs.
5709 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
5710 if (Kind != AArch64ABIInfo::AAPCS)
5711 return ABIArgInfo::getDirect(
5712 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5713
5714 // For alignment adjusted HFAs, cap the argument alignment to 16, leave it
5715 // default otherwise.
5716 unsigned Align =
5717 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
5718 unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
5719 Align = (Align > BaseAlign && Align >= 16) ? 16 : 0;
5720 return ABIArgInfo::getDirect(
5721 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
5722 nullptr, true, Align);
5723 }
5724
5725 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5726 if (Size <= 128) {
5727 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5728 // same size and alignment.
5729 if (getTarget().isRenderScriptTarget()) {
5730 return coerceToIntArray(Ty, getContext(), getVMContext());
5731 }
5732 unsigned Alignment;
5733 if (Kind == AArch64ABIInfo::AAPCS) {
5734 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5735 Alignment = Alignment < 128 ? 64 : 128;
5736 } else {
5737 Alignment = std::max(getContext().getTypeAlign(Ty),
5738 (unsigned)getTarget().getPointerWidth(0));
5739 }
5740 Size = llvm::alignTo(Size, Alignment);
5741
5742 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5743 // For aggregates with 16-byte alignment, we use i128.
5744 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
5745 return ABIArgInfo::getDirect(
5746 Size == Alignment ? BaseTy
5747 : llvm::ArrayType::get(BaseTy, Size / Alignment));
5748 }
5749
5750 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5751}
5752
5753ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
5754 bool IsVariadic) const {
5755 if (RetTy->isVoidType())
5756 return ABIArgInfo::getIgnore();
5757
5758 if (const auto *VT = RetTy->getAs<VectorType>()) {
5759 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
5760 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
5761 return coerceIllegalVector(RetTy);
5762 }
5763
5764 // Large vector types should be returned via memory.
5765 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5766 return getNaturalAlignIndirect(RetTy);
5767
5768 if (!isAggregateTypeForABI(RetTy)) {
5769 // Treat an enum type as its underlying type.
5770 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5771 RetTy = EnumTy->getDecl()->getIntegerType();
5772
5773 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5774 if (EIT->getNumBits() > 128)
5775 return getNaturalAlignIndirect(RetTy);
5776
5777 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
5778 ? ABIArgInfo::getExtend(RetTy)
5779 : ABIArgInfo::getDirect());
5780 }
5781
5782 uint64_t Size = getContext().getTypeSize(RetTy);
5783 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5784 return ABIArgInfo::getIgnore();
5785
5786 const Type *Base = nullptr;
5787 uint64_t Members = 0;
5788 if (isHomogeneousAggregate(RetTy, Base, Members) &&
5789 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
5790 IsVariadic))
5791 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5792 return ABIArgInfo::getDirect();
5793
5794 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5795 if (Size <= 128) {
5796 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5797 // same size and alignment.
5798 if (getTarget().isRenderScriptTarget()) {
5799 return coerceToIntArray(RetTy, getContext(), getVMContext());
5800 }
5801
5802 if (Size <= 64 && getDataLayout().isLittleEndian()) {
5803 // Composite types are returned in lower bits of a 64-bit register for LE,
5804 // and in higher bits for BE. However, integer types are always returned
5805 // in lower bits for both LE and BE, and they are not rounded up to
5806 // 64-bits. We can skip rounding up of composite types for LE, but not for
5807 // BE, otherwise composite types will be indistinguishable from integer
5808 // types.
5809 return ABIArgInfo::getDirect(
5810 llvm::IntegerType::get(getVMContext(), Size));
5811 }
5812
5813 unsigned Alignment = getContext().getTypeAlign(RetTy);
5814 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5815
5816 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5817 // For aggregates with 16-byte alignment, we use i128.
5818 if (Alignment < 128 && Size == 128) {
5819 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5820 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5821 }
5822 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5823 }
5824
5825 return getNaturalAlignIndirect(RetTy);
5826}
5827
5828/// isIllegalVectorType - check whether the vector type is legal for AArch64.
5829bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5830 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5831 // Check whether VT is a fixed-length SVE vector. These types are
5832 // represented as scalable vectors in function args/return and must be
5833 // coerced from fixed vectors.
5834 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
5835 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
5836 return true;
5837
5838 // Check whether VT is legal.
5839 unsigned NumElements = VT->getNumElements();
5840 uint64_t Size = getContext().getTypeSize(VT);
5841 // NumElements should be power of 2.
5842 if (!llvm::isPowerOf2_32(NumElements))
5843 return true;
5844
5845 // arm64_32 has to be compatible with the ARM logic here, which allows huge
5846 // vectors for some reason.
5847 llvm::Triple Triple = getTarget().getTriple();
5848 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
5849 Triple.isOSBinFormatMachO())
5850 return Size <= 32;
5851
5852 return Size != 64 && (Size != 128 || NumElements == 1);
5853 }
5854 return false;
5855}
5856
5857bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5858 llvm::Type *eltTy,
5859 unsigned elts) const {
5860 if (!llvm::isPowerOf2_32(elts))
5861 return false;
5862 if (totalSize.getQuantity() != 8 &&
5863 (totalSize.getQuantity() != 16 || elts == 1))
5864 return false;
5865 return true;
5866}
5867
5868bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5869 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5870 // point type or a short-vector type. This is the same as the 32-bit ABI,
5871 // but with the difference that any floating-point type is allowed,
5872 // including __fp16.
5873 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5874 if (BT->isFloatingPoint())
5875 return true;
5876 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5877 unsigned VecSize = getContext().getTypeSize(VT);
5878 if (VecSize == 64 || VecSize == 128)
5879 return true;
5880 }
5881 return false;
5882}
5883
5884bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5885 uint64_t Members) const {
5886 return Members <= 4;
5887}
5888
5889Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5890 CodeGenFunction &CGF) const {
5891 ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
5892 CGF.CurFnInfo->getCallingConvention());
5893 bool IsIndirect = AI.isIndirect();
5894
5895 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5896 if (IsIndirect)
5897 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5898 else if (AI.getCoerceToType())
5899 BaseTy = AI.getCoerceToType();
5900
5901 unsigned NumRegs = 1;
5902 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5903 BaseTy = ArrTy->getElementType();
5904 NumRegs = ArrTy->getNumElements();
5905 }
5906 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5907
5908 // The AArch64 va_list type and handling is specified in the Procedure Call
5909 // Standard, section B.4:
5910 //
5911 // struct {
5912 // void *__stack;
5913 // void *__gr_top;
5914 // void *__vr_top;
5915 // int __gr_offs;
5916 // int __vr_offs;
5917 // };
5918
5919 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5920 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5921 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5922 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5923
5924 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
5925 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
5926
5927 Address reg_offs_p = Address::invalid();
5928 llvm::Value *reg_offs = nullptr;
5929 int reg_top_index;
5930 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
5931 if (!IsFPR) {
5932 // 3 is the field number of __gr_offs
5933 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
5934 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5935 reg_top_index = 1; // field number for __gr_top
5936 RegSize = llvm::alignTo(RegSize, 8);
5937 } else {
5938 // 4 is the field number of __vr_offs.
5939 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
5940 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5941 reg_top_index = 2; // field number for __vr_top
5942 RegSize = 16 * NumRegs;
5943 }
5944
5945 //=======================================
5946 // Find out where argument was passed
5947 //=======================================
5948
5949 // If reg_offs >= 0 we're already using the stack for this type of
5950 // argument. We don't want to keep updating reg_offs (in case it overflows,
5951 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5952 // whatever they get).
5953 llvm::Value *UsingStack = nullptr;
5954 UsingStack = CGF.Builder.CreateICmpSGE(
5955 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5956
5957 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5958
5959 // Otherwise, at least some kind of argument could go in these registers, the
5960 // question is whether this particular type is too big.
5961 CGF.EmitBlock(MaybeRegBlock);
5962
5963 // Integer arguments may need to correct register alignment (for example a
5964 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5965 // align __gr_offs to calculate the potential address.
5966 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5967 int Align = TyAlign.getQuantity();
5968
5969 reg_offs = CGF.Builder.CreateAdd(
5970 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5971 "align_regoffs");
5972 reg_offs = CGF.Builder.CreateAnd(
5973 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5974 "aligned_regoffs");
5975 }
5976
5977 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5978 // The fact that this is done unconditionally reflects the fact that
5979 // allocating an argument to the stack also uses up all the remaining
5980 // registers of the appropriate kind.
5981 llvm::Value *NewOffset = nullptr;
5982 NewOffset = CGF.Builder.CreateAdd(
5983 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5984 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5985
5986 // Now we're in a position to decide whether this argument really was in
5987 // registers or not.
5988 llvm::Value *InRegs = nullptr;
5989 InRegs = CGF.Builder.CreateICmpSLE(
5990 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5991
5992 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5993
5994 //=======================================
5995 // Argument was in registers
5996 //=======================================
5997
5998 // Now we emit the code for if the argument was originally passed in
5999 // registers. First start the appropriate block:
6000 CGF.EmitBlock(InRegBlock);
6001
6002 llvm::Value *reg_top = nullptr;
6003 Address reg_top_p =
6004 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
6005 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
6006 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
6007 CharUnits::fromQuantity(IsFPR ? 16 : 8));
6008 Address RegAddr = Address::invalid();
6009 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
6010
6011 if (IsIndirect) {
6012 // If it's been passed indirectly (actually a struct), whatever we find from
6013 // stored registers or on the stack will actually be a struct **.
6014 MemTy = llvm::PointerType::getUnqual(MemTy);
6015 }
6016
6017 const Type *Base = nullptr;
6018 uint64_t NumMembers = 0;
6019 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
6020 if (IsHFA && NumMembers > 1) {
6021 // Homogeneous aggregates passed in registers will have their elements split
6022 // and stored 16-bytes apart regardless of size (they're notionally in qN,
6023 // qN+1, ...). We reload and store into a temporary local variable
6024 // contiguously.
6025 assert(!IsIndirect && "Homogeneous aggregates should be passed directly")((void)0);
6026 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
6027 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
6028 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
6029 Address Tmp = CGF.CreateTempAlloca(HFATy,
6030 std::max(TyAlign, BaseTyInfo.Align));
6031
6032 // On big-endian platforms, the value will be right-aligned in its slot.
6033 int Offset = 0;
6034 if (CGF.CGM.getDataLayout().isBigEndian() &&
6035 BaseTyInfo.Width.getQuantity() < 16)
6036 Offset = 16 - BaseTyInfo.Width.getQuantity();
6037
6038 for (unsigned i = 0; i < NumMembers; ++i) {
6039 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
6040 Address LoadAddr =
6041 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
6042 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
6043
6044 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
6045
6046 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
6047 CGF.Builder.CreateStore(Elem, StoreAddr);
6048 }
6049
6050 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
6051 } else {
6052 // Otherwise the object is contiguous in memory.
6053
6054 // It might be right-aligned in its slot.
6055 CharUnits SlotSize = BaseAddr.getAlignment();
6056 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
6057 (IsHFA || !isAggregateTypeForABI(Ty)) &&
6058 TySize < SlotSize) {
6059 CharUnits Offset = SlotSize - TySize;
6060 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
6061 }
6062
6063 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
6064 }
6065
6066 CGF.EmitBranch(ContBlock);
6067
6068 //=======================================
6069 // Argument was on the stack
6070 //=======================================
6071 CGF.EmitBlock(OnStackBlock);
6072
6073 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
6074 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
6075
6076 // Again, stack arguments may need realignment. In this case both integer and
6077 // floating-point ones might be affected.
6078 if (!IsIndirect && TyAlign.getQuantity() > 8) {
6079 int Align = TyAlign.getQuantity();
6080
6081 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
6082
6083 OnStackPtr = CGF.Builder.CreateAdd(
6084 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
6085 "align_stack");
6086 OnStackPtr = CGF.Builder.CreateAnd(
6087 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
6088 "align_stack");
6089
6090 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
6091 }
6092 Address OnStackAddr(OnStackPtr,
6093 std::max(CharUnits::fromQuantity(8), TyAlign));
6094
6095 // All stack slots are multiples of 8 bytes.
6096 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
6097 CharUnits StackSize;
6098 if (IsIndirect)
6099 StackSize = StackSlotSize;
6100 else
6101 StackSize = TySize.alignTo(StackSlotSize);
6102
6103 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
6104 llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
6105 CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
6106
6107 // Write the new value of __stack for the next call to va_arg
6108 CGF.Builder.CreateStore(NewStack, stack_p);
6109
6110 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
6111 TySize < StackSlotSize) {
6112 CharUnits Offset = StackSlotSize - TySize;
6113 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
6114 }
6115
6116 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
6117
6118 CGF.EmitBranch(ContBlock);
6119
6120 //=======================================
6121 // Tidy up
6122 //=======================================
6123 CGF.EmitBlock(ContBlock);
6124
6125 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6126 OnStackAddr, OnStackBlock, "vaargs.addr");
6127
6128 if (IsIndirect)
6129 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
6130 TyAlign);
6131
6132 return ResAddr;
6133}
6134
6135Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
6136 CodeGenFunction &CGF) const {
6137 // The backend's lowering doesn't support va_arg for aggregates or
6138 // illegal vector types. Lower VAArg here for these cases and use
6139 // the LLVM va_arg instruction for everything else.
6140 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
6141 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
6142
6143 uint64_t PointerSize = getTarget().getPointerWidth(0) / 8;
6144 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
6145
6146 // Empty records are ignored for parameter passing purposes.
6147 if (isEmptyRecord(getContext(), Ty, true)) {
6148 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
6149 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6150 return Addr;
6151 }
6152
6153 // The size of the actual thing passed, which might end up just
6154 // being a pointer for indirect types.
6155 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6156
6157 // Arguments bigger than 16 bytes which aren't homogeneous
6158 // aggregates should be passed indirectly.
6159 bool IsIndirect = false;
6160 if (TyInfo.Width.getQuantity() > 16) {
6161 const Type *Base = nullptr;
6162 uint64_t Members = 0;
6163 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
6164 }
6165
6166 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
6167 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
6168}
6169
6170Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
6171 QualType Ty) const {
6172 bool IsIndirect = false;
6173
6174 // Composites larger than 16 bytes are passed by reference.
6175 if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
6176 IsIndirect = true;
6177
6178 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
6179 CGF.getContext().getTypeInfoInChars(Ty),
6180 CharUnits::fromQuantity(8),
6181 /*allowHigherAlign*/ false);
6182}
6183
6184//===----------------------------------------------------------------------===//
6185// ARM ABI Implementation
6186//===----------------------------------------------------------------------===//
6187
6188namespace {
6189
6190class ARMABIInfo : public SwiftABIInfo {
6191public:
6192 enum ABIKind {
6193 APCS = 0,
6194 AAPCS = 1,
6195 AAPCS_VFP = 2,
6196 AAPCS16_VFP = 3,
6197 };
6198
6199private:
6200 ABIKind Kind;
6201 bool IsFloatABISoftFP;
6202
6203public:
6204 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
6205 : SwiftABIInfo(CGT), Kind(_Kind) {
6206 setCCs();
6207 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
6208 CGT.getCodeGenOpts().FloatABI == ""; // default
6209 }
6210
6211 bool isEABI() const {
6212 switch (getTarget().getTriple().getEnvironment()) {
6213 case llvm::Triple::Android:
6214 case llvm::Triple::EABI:
6215 case llvm::Triple::EABIHF:
6216 case llvm::Triple::GNUEABI:
6217 case llvm::Triple::GNUEABIHF:
6218 case llvm::Triple::MuslEABI:
6219 case llvm::Triple::MuslEABIHF:
6220 return true;
6221 default:
6222 return false;
6223 }
6224 }
6225
6226 bool isEABIHF() const {
6227 switch (getTarget().getTriple().getEnvironment()) {
6228 case llvm::Triple::EABIHF:
6229 case llvm::Triple::GNUEABIHF:
6230 case llvm::Triple::MuslEABIHF:
6231 return true;
6232 default:
6233 return false;
6234 }
6235 }
6236
6237 ABIKind getABIKind() const { return Kind; }
6238
6239 bool allowBFloatArgsAndRet() const override {
6240 return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
6241 }
6242
6243private:
6244 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
6245 unsigned functionCallConv) const;
6246 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
6247 unsigned functionCallConv) const;
6248 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
6249 uint64_t Members) const;
6250 ABIArgInfo coerceIllegalVector(QualType Ty) const;
6251 bool isIllegalVectorType(QualType Ty) const;
6252 bool containsAnyFP16Vectors(QualType Ty) const;
6253
6254 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
6255 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
6256 uint64_t Members) const override;
6257
6258 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
6259
6260 void computeInfo(CGFunctionInfo &FI) const override;
6261
6262 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6263 QualType Ty) const override;
6264
6265 llvm::CallingConv::ID getLLVMDefaultCC() const;
6266 llvm::CallingConv::ID getABIDefaultCC() const;
6267 void setCCs();
6268
6269 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
6270 bool asReturnValue) const override {
6271 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6272 }
6273 bool isSwiftErrorInRegister() const override {
6274 return true;
6275 }
6276 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
6277 unsigned elts) const override;
6278};
6279
6280class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
6281public:
6282 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
6283 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {}
6284
6285 const ARMABIInfo &getABIInfo() const {
6286 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
6287 }
6288
6289 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6290 return 13;
6291 }
6292
6293 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
6294 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
6295 }
6296
6297 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6298 llvm::Value *Address) const override {
6299 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6300
6301 // 0-15 are the 16 integer registers.
6302 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
6303 return false;
6304 }
6305
6306 unsigned getSizeOfUnwindException() const override {
6307 if (getABIInfo().isEABI()) return 88;
6308 return TargetCodeGenInfo::getSizeOfUnwindException();
6309 }
6310
6311 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6312 CodeGen::CodeGenModule &CGM) const override {
6313 if (GV->isDeclaration())
6314 return;
6315 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6316 if (!FD)
6317 return;
6318
6319 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
6320 if (!Attr)
6321 return;
6322
6323 const char *Kind;
6324 switch (Attr->getInterrupt()) {
6325 case ARMInterruptAttr::Generic: Kind = ""; break;
6326 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
6327 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
6328 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
6329 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
6330 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
6331 }
6332
6333 llvm::Function *Fn = cast<llvm::Function>(GV);
6334
6335 Fn->addFnAttr("interrupt", Kind);
6336
6337 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
6338 if (ABI == ARMABIInfo::APCS)
6339 return;
6340
6341 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
6342 // however this is not necessarily true on taking any interrupt. Instruct
6343 // the backend to perform a realignment as part of the function prologue.
6344 llvm::AttrBuilder B;
6345 B.addStackAlignmentAttr(8);
6346 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
6347 }
6348};
6349
6350class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
6351public:
6352 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
6353 : ARMTargetCodeGenInfo(CGT, K) {}
6354
6355 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6356 CodeGen::CodeGenModule &CGM) const override;
6357
6358 void getDependentLibraryOption(llvm::StringRef Lib,
6359 llvm::SmallString<24> &Opt) const override {
6360 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
6361 }
6362
6363 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
6364 llvm::SmallString<32> &Opt) const override {
6365 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
6366 }
6367};
6368
6369void WindowsARMTargetCodeGenInfo::setTargetAttributes(
6370 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
6371 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
6372 if (GV->isDeclaration())
6373 return;
6374 addStackProbeTargetAttributes(D, GV, CGM);
6375}
6376}
6377
6378void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
6379 if (!::classifyReturnType(getCXXABI(), FI, *this))
6380 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
6381 FI.getCallingConvention());
6382
6383 for (auto &I : FI.arguments())
6384 I.info = classifyArgumentType(I.type, FI.isVariadic(),
6385 FI.getCallingConvention());
6386
6387
6388 // Always honor user-specified calling convention.
6389 if (FI.getCallingConvention() != llvm::CallingConv::C)
6390 return;
6391
6392 llvm::CallingConv::ID cc = getRuntimeCC();
6393 if (cc != llvm::CallingConv::C)
6394 FI.setEffectiveCallingConvention(cc);
6395}
6396
6397/// Return the default calling convention that LLVM will use.
6398llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
6399 // The default calling convention that LLVM will infer.
6400 if (isEABIHF() || getTarget().getTriple().isWatchABI())
6401 return llvm::CallingConv::ARM_AAPCS_VFP;
6402 else if (isEABI())
6403 return llvm::CallingConv::ARM_AAPCS;
6404 else
6405 return llvm::CallingConv::ARM_APCS;
6406}
6407
6408/// Return the calling convention that our ABI would like us to use
6409/// as the C calling convention.
6410llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
6411 switch (getABIKind()) {
6412 case APCS: return llvm::CallingConv::ARM_APCS;
6413 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
6414 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
6415 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
6416 }
6417 llvm_unreachable("bad ABI kind")__builtin_unreachable();
6418}
6419
6420void ARMABIInfo::setCCs() {
6421 assert(getRuntimeCC() == llvm::CallingConv::C)((void)0);
6422
6423 // Don't muddy up the IR with a ton of explicit annotations if
6424 // they'd just match what LLVM will infer from the triple.
6425 llvm::CallingConv::ID abiCC = getABIDefaultCC();
6426 if (abiCC != getLLVMDefaultCC())
6427 RuntimeCC = abiCC;
6428}
6429
6430ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
6431 uint64_t Size = getContext().getTypeSize(Ty);
6432 if (Size <= 32) {
6433 llvm::Type *ResType =
6434 llvm::Type::getInt32Ty(getVMContext());
6435 return ABIArgInfo::getDirect(ResType);
6436 }
6437 if (Size == 64 || Size == 128) {
6438 auto *ResType = llvm::FixedVectorType::get(
6439 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
6440 return ABIArgInfo::getDirect(ResType);
6441 }
6442 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6443}
6444
6445ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
6446 const Type *Base,
6447 uint64_t Members) const {
6448 assert(Base && "Base class should be set for homogeneous aggregate")((void)0);
6449 // Base can be a floating-point or a vector.
6450 if (const VectorType *VT = Base->getAs<VectorType>()) {
6451 // FP16 vectors should be converted to integer vectors
6452 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
6453 uint64_t Size = getContext().getTypeSize(VT);
6454 auto *NewVecTy = llvm::FixedVectorType::get(
6455 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
6456 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
6457 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
6458 }
6459 }
6460 unsigned Align = 0;
6461 if (getABIKind() == ARMABIInfo::AAPCS ||
6462 getABIKind() == ARMABIInfo::AAPCS_VFP) {
6463 // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
6464 // default otherwise.
6465 Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
6466 unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
6467 Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
6468 }
6469 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align);
6470}
6471
6472ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
6473 unsigned functionCallConv) const {
6474 // 6.1.2.1 The following argument types are VFP CPRCs:
6475 // A single-precision floating-point type (including promoted
6476 // half-precision types); A double-precision floating-point type;
6477 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
6478 // with a Base Type of a single- or double-precision floating-point type,
6479 // 64-bit containerized vectors or 128-bit containerized vectors with one
6480 // to four Elements.
6481 // Variadic functions should always marshal to the base standard.
6482 bool IsAAPCS_VFP =
6483 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
6484
6485 Ty = useFirstFieldIfTransparentUnion(Ty);
6486
6487 // Handle illegal vector types here.
6488 if (isIllegalVectorType(Ty))
6489 return coerceIllegalVector(Ty);
6490
6491 if (!isAggregateTypeForABI(Ty)) {
6492 // Treat an enum type as its underlying type.
6493 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
6494 Ty = EnumTy->getDecl()->getIntegerType();
6495 }
6496
6497 if (const auto *EIT = Ty->getAs<ExtIntType>())
6498 if (EIT->getNumBits() > 64)
6499 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
6500
6501 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
6502 : ABIArgInfo::getDirect());
6503 }
6504
6505 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6506 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6507 }
6508
6509 // Ignore empty records.
6510 if (isEmptyRecord(getContext(), Ty, true))
6511 return ABIArgInfo::getIgnore();
6512
6513 if (IsAAPCS_VFP) {
6514 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
6515 // into VFP registers.
6516 const Type *Base = nullptr;
6517 uint64_t Members = 0;
6518 if (isHomogeneousAggregate(Ty, Base, Members))
6519 return classifyHomogeneousAggregate(Ty, Base, Members);
6520 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6521 // WatchOS does have homogeneous aggregates. Note that we intentionally use
6522 // this convention even for a variadic function: the backend will use GPRs
6523 // if needed.
6524 const Type *Base = nullptr;
6525 uint64_t Members = 0;
6526 if (isHomogeneousAggregate(Ty, Base, Members)) {
6527 assert(Base && Members <= 4 && "unexpected homogeneous aggregate")((void)0);
6528 llvm::Type *Ty =
6529 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
6530 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
6531 }
6532 }
6533
6534 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6535 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
6536 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
6537 // bigger than 128-bits, they get placed in space allocated by the caller,
6538 // and a pointer is passed.
6539 return ABIArgInfo::getIndirect(
6540 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
6541 }
6542
6543 // Support byval for ARM.
6544 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
6545 // most 8-byte. We realign the indirect argument if type alignment is bigger
6546 // than ABI alignment.
6547 uint64_t ABIAlign = 4;
6548 uint64_t TyAlign;
6549 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6550 getABIKind() == ARMABIInfo::AAPCS) {
6551 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
6552 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
6553 } else {
6554 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
6555 }
6556 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
6557 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval")((void)0);
6558 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
6559 /*ByVal=*/true,
6560 /*Realign=*/TyAlign > ABIAlign);
6561 }
6562
6563 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
6564 // same size and alignment.
6565 if (getTarget().isRenderScriptTarget()) {
6566 return coerceToIntArray(Ty, getContext(), getVMContext());
6567 }
6568
6569 // Otherwise, pass by coercing to a structure of the appropriate size.
6570 llvm::Type* ElemTy;
6571 unsigned SizeRegs;
6572 // FIXME: Try to match the types of the arguments more accurately where
6573 // we can.
6574 if (TyAlign <= 4) {
6575 ElemTy = llvm::Type::getInt32Ty(getVMContext());
6576 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
6577 } else {
6578 ElemTy = llvm::Type::getInt64Ty(getVMContext());
6579 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
6580 }
6581
6582 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
6583}
6584
6585static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
6586 llvm::LLVMContext &VMContext) {
6587 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
6588 // is called integer-like if its size is less than or equal to one word, and
6589 // the offset of each of its addressable sub-fields is zero.
6590
6591 uint64_t Size = Context.getTypeSize(Ty);
6592
6593 // Check that the type fits in a word.
6594 if (Size > 32)
6595 return false;
6596
6597 // FIXME: Handle vector types!
6598 if (Ty->isVectorType())
6599 return false;
6600
6601 // Float types are never treated as "integer like".
6602 if (Ty->isRealFloatingType())
6603 return false;
6604
6605 // If this is a builtin or pointer type then it is ok.
6606 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
6607 return true;
6608
6609 // Small complex integer types are "integer like".
6610 if (const ComplexType *CT = Ty->getAs<ComplexType>())
6611 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
6612
6613 // Single element and zero sized arrays should be allowed, by the definition
6614 // above, but they are not.
6615
6616 // Otherwise, it must be a record type.
6617 const RecordType *RT = Ty->getAs<RecordType>();
6618 if (!RT) return false;
6619
6620 // Ignore records with flexible arrays.
6621 const RecordDecl *RD = RT->getDecl();
6622 if (RD->hasFlexibleArrayMember())
6623 return false;
6624
6625 // Check that all sub-fields are at offset 0, and are themselves "integer
6626 // like".
6627 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
6628
6629 bool HadField = false;
6630 unsigned idx = 0;
6631 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6632 i != e; ++i, ++idx) {
6633 const FieldDecl *FD = *i;
6634
6635 // Bit-fields are not addressable, we only need to verify they are "integer
6636 // like". We still have to disallow a subsequent non-bitfield, for example:
6637 // struct { int : 0; int x }
6638 // is non-integer like according to gcc.
6639 if (FD->isBitField()) {
6640 if (!RD->isUnion())
6641 HadField = true;
6642
6643 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6644 return false;
6645
6646 continue;
6647 }
6648
6649 // Check if this field is at offset 0.
6650 if (Layout.getFieldOffset(idx) != 0)
6651 return false;
6652
6653 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6654 return false;
6655
6656 // Only allow at most one field in a structure. This doesn't match the
6657 // wording above, but follows gcc in situations with a field following an
6658 // empty structure.
6659 if (!RD->isUnion()) {
6660 if (HadField)
6661 return false;
6662
6663 HadField = true;
6664 }
6665 }
6666
6667 return true;
6668}
6669
6670ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
6671 unsigned functionCallConv) const {
6672
6673 // Variadic functions should always marshal to the base standard.
6674 bool IsAAPCS_VFP =
6675 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
6676
6677 if (RetTy->isVoidType())
6678 return ABIArgInfo::getIgnore();
6679
6680 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
6681 // Large vector types should be returned via memory.
6682 if (getContext().getTypeSize(RetTy) > 128)
6683 return getNaturalAlignIndirect(RetTy);
6684 // TODO: FP16/BF16 vectors should be converted to integer vectors
6685 // This check is similar to isIllegalVectorType - refactor?
6686 if ((!getTarget().hasLegalHalfType() &&
6687 (VT->getElementType()->isFloat16Type() ||
6688 VT->getElementType()->isHalfType())) ||
6689 (IsFloatABISoftFP &&
6690 VT->getElementType()->isBFloat16Type()))
6691 return coerceIllegalVector(RetTy);
6692 }
6693
6694 if (!isAggregateTypeForABI(RetTy)) {
6695 // Treat an enum type as its underlying type.
6696 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6697 RetTy = EnumTy->getDecl()->getIntegerType();
6698
6699 if (const auto *EIT = RetTy->getAs<ExtIntType>())
6700 if (EIT->getNumBits() > 64)
6701 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
6702
6703 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
6704 : ABIArgInfo::getDirect();
6705 }
6706
6707 // Are we following APCS?
6708 if (getABIKind() == APCS) {
6709 if (isEmptyRecord(getContext(), RetTy, false))
6710 return ABIArgInfo::getIgnore();
6711
6712 // Complex types are all returned as packed integers.
6713 //
6714 // FIXME: Consider using 2 x vector types if the back end handles them
6715 // correctly.
6716 if (RetTy->isAnyComplexType())
6717 return ABIArgInfo::getDirect(llvm::IntegerType::get(
6718 getVMContext(), getContext().getTypeSize(RetTy)));
6719
6720 // Integer like structures are returned in r0.
6721 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
6722 // Return in the smallest viable integer type.
6723 uint64_t Size = getContext().getTypeSize(RetTy);
6724 if (Size <= 8)
6725 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6726 if (Size <= 16)
6727 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6728 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6729 }
6730
6731 // Otherwise return in memory.
6732 return getNaturalAlignIndirect(RetTy);
6733 }
6734
6735 // Otherwise this is an AAPCS variant.
6736
6737 if (isEmptyRecord(getContext(), RetTy, true))
6738 return ABIArgInfo::getIgnore();
6739
6740 // Check for homogeneous aggregates with AAPCS-VFP.
6741 if (IsAAPCS_VFP) {
6742 const Type *Base = nullptr;
6743 uint64_t Members = 0;
6744 if (isHomogeneousAggregate(RetTy, Base, Members))
6745 return classifyHomogeneousAggregate(RetTy, Base, Members);
6746 }
6747
6748 // Aggregates <= 4 bytes are returned in r0; other aggregates
6749 // are returned indirectly.
6750 uint64_t Size = getContext().getTypeSize(RetTy);
6751 if (Size <= 32) {
6752 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6753 // same size and alignment.
6754 if (getTarget().isRenderScriptTarget()) {
6755 return coerceToIntArray(RetTy, getContext(), getVMContext());
6756 }
6757 if (getDataLayout().isBigEndian())
6758 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6759 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6760
6761 // Return in the smallest viable integer type.
6762 if (Size <= 8)
6763 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6764 if (Size <= 16)
6765 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6766 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6767 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6768 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6769 llvm::Type *CoerceTy =
6770 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6771 return ABIArgInfo::getDirect(CoerceTy);
6772 }
6773
6774 return getNaturalAlignIndirect(RetTy);
6775}
6776
6777/// isIllegalVector - check whether Ty is an illegal vector type.
6778bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6779 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6780 // On targets that don't support half, fp16 or bfloat, they are expanded
6781 // into float, and we don't want the ABI to depend on whether or not they
6782 // are supported in hardware. Thus return false to coerce vectors of these
6783 // types into integer vectors.
6784 // We do not depend on hasLegalHalfType for bfloat as it is a
6785 // separate IR type.
6786 if ((!getTarget().hasLegalHalfType() &&
6787 (VT->getElementType()->isFloat16Type() ||
6788 VT->getElementType()->isHalfType())) ||
6789 (IsFloatABISoftFP &&
6790 VT->getElementType()->isBFloat16Type()))
6791 return true;
6792 if (isAndroid()) {
6793 // Android shipped using Clang 3.1, which supported a slightly different
6794 // vector ABI. The primary differences were that 3-element vector types
6795 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6796 // accepts that legacy behavior for Android only.
6797 // Check whether VT is legal.
6798 unsigned NumElements = VT->getNumElements();
6799 // NumElements should be power of 2 or equal to 3.
6800 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6801 return true;
6802 } else {
6803 // Check whether VT is legal.
6804 unsigned NumElements = VT->getNumElements();
6805 uint64_t Size = getContext().getTypeSize(VT);
6806 // NumElements should be power of 2.
6807 if (!llvm::isPowerOf2_32(NumElements))
6808 return true;
6809 // Size should be greater than 32 bits.
6810 return Size <= 32;
6811 }
6812 }
6813 return false;
6814}
6815
6816/// Return true if a type contains any 16-bit floating point vectors
6817bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
6818 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
6819 uint64_t NElements = AT->getSize().getZExtValue();
6820 if (NElements == 0)
6821 return false;
6822 return containsAnyFP16Vectors(AT->getElementType());
6823 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
6824 const RecordDecl *RD = RT->getDecl();
6825
6826 // If this is a C++ record, check the bases first.
6827 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6828 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
6829 return containsAnyFP16Vectors(B.getType());
6830 }))
6831 return true;
6832
6833 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
6834 return FD && containsAnyFP16Vectors(FD->getType());
6835 }))
6836 return true;
6837
6838 return false;
6839 } else {
6840 if (const VectorType *VT = Ty->getAs<VectorType>())
6841 return (VT->getElementType()->isFloat16Type() ||
6842 VT->getElementType()->isBFloat16Type() ||
6843 VT->getElementType()->isHalfType());
6844 return false;
6845 }
6846}
6847
6848bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6849 llvm::Type *eltTy,
6850 unsigned numElts) const {
6851 if (!llvm::isPowerOf2_32(numElts))
6852 return false;
6853 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6854 if (size > 64)
6855 return false;
6856 if (vectorSize.getQuantity() != 8 &&
6857 (vectorSize.getQuantity() != 16 || numElts == 1))
6858 return false;
6859 return true;
6860}
6861
6862bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6863 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6864 // double, or 64-bit or 128-bit vectors.
6865 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6866 if (BT->getKind() == BuiltinType::Float ||
6867 BT->getKind() == BuiltinType::Double ||
6868 BT->getKind() == BuiltinType::LongDouble)
6869 return true;
6870 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6871 unsigned VecSize = getContext().getTypeSize(VT);
6872 if (VecSize == 64 || VecSize == 128)
6873 return true;
6874 }
6875 return false;
6876}
6877
6878bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6879 uint64_t Members) const {
6880 return Members <= 4;
6881}
6882
6883bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
6884 bool acceptHalf) const {
6885 // Give precedence to user-specified calling conventions.
6886 if (callConvention != llvm::CallingConv::C)
6887 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6888 else
6889 return (getABIKind() == AAPCS_VFP) ||
6890 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6891}
6892
6893Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6894 QualType Ty) const {
6895 CharUnits SlotSize = CharUnits::fromQuantity(4);
6896
6897 // Empty records are ignored for parameter passing purposes.
6898 if (isEmptyRecord(getContext(), Ty, true)) {
6899 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6900 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6901 return Addr;
6902 }
6903
6904 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
6905 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
6906
6907 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6908 bool IsIndirect = false;
6909 const Type *Base = nullptr;
6910 uint64_t Members = 0;
6911 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6912 IsIndirect = true;
6913
6914 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6915 // allocated by the caller.
6916 } else if (TySize > CharUnits::fromQuantity(16) &&
6917 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6918 !isHomogeneousAggregate(Ty, Base, Members)) {
6919 IsIndirect = true;
6920
6921 // Otherwise, bound the type's ABI alignment.
6922 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6923 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6924 // Our callers should be prepared to handle an under-aligned address.
6925 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6926 getABIKind() == ARMABIInfo::AAPCS) {
6927 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6928 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6929 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6930 // ARMv7k allows type alignment up to 16 bytes.
6931 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6932 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6933 } else {
6934 TyAlignForABI = CharUnits::fromQuantity(4);
6935 }
6936
6937 TypeInfoChars TyInfo(TySize, TyAlignForABI, false);
6938 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6939 SlotSize, /*AllowHigherAlign*/ true);
6940}
6941
6942//===----------------------------------------------------------------------===//
6943// NVPTX ABI Implementation
6944//===----------------------------------------------------------------------===//
6945
6946namespace {
6947
6948class NVPTXTargetCodeGenInfo;
6949
6950class NVPTXABIInfo : public ABIInfo {
6951 NVPTXTargetCodeGenInfo &CGInfo;
6952
6953public:
6954 NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
6955 : ABIInfo(CGT), CGInfo(Info) {}
6956
6957 ABIArgInfo classifyReturnType(QualType RetTy) const;
6958 ABIArgInfo classifyArgumentType(QualType Ty) const;
6959
6960 void computeInfo(CGFunctionInfo &FI) const override;
6961 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6962 QualType Ty) const override;
6963 bool isUnsupportedType(QualType T) const;
6964 ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
6965};
6966
6967class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6968public:
6969 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6970 : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
6971
6972 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6973 CodeGen::CodeGenModule &M) const override;
6974 bool shouldEmitStaticExternCAliases() const override;
6975
6976 llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
6977 // On the device side, surface reference is represented as an object handle
6978 // in 64-bit integer.
6979 return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
6980 }
6981
6982 llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
6983 // On the device side, texture reference is represented as an object handle
6984 // in 64-bit integer.
6985 return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
6986 }
6987
6988 bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6989 LValue Src) const override {
6990 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
6991 return true;
6992 }
6993
6994 bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6995 LValue Src) const override {
6996 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
6997 return true;
6998 }
6999
7000private:
7001 // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
7002 // resulting MDNode to the nvvm.annotations MDNode.
7003 static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
7004 int Operand);
7005
7006 static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
7007 LValue Src) {
7008 llvm::Value *Handle = nullptr;
7009 llvm::Constant *C =
7010 llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
7011 // Lookup `addrspacecast` through the constant pointer if any.
7012 if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
7013 C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
7014 if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
7015 // Load the handle from the specific global variable using
7016 // `nvvm.texsurf.handle.internal` intrinsic.
7017 Handle = CGF.EmitRuntimeCall(
7018 CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
7019 {GV->getType()}),
7020 {GV}, "texsurf_handle");
7021 } else
7022 Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
7023 CGF.EmitStoreOfScalar(Handle, Dst);
7024 }
7025};
7026
7027/// Checks if the type is unsupported directly by the current target.
7028bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
7029 ASTContext &Context = getContext();
7030 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
7031 return true;
7032 if (!Context.getTargetInfo().hasFloat128Type() &&
7033 (T->isFloat128Type() ||
7034 (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
7035 return true;
7036 if (const auto *EIT = T->getAs<ExtIntType>())
7037 return EIT->getNumBits() >
7038 (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
7039 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
7040 Context.getTypeSize(T) > 64U)
7041 return true;
7042 if (const auto *AT = T->getAsArrayTypeUnsafe())
7043 return isUnsupportedType(AT->getElementType());
7044 const auto *RT = T->getAs<RecordType>();
7045 if (!RT)
7046 return false;
7047 const RecordDecl *RD = RT->getDecl();
7048
7049 // If this is a C++ record, check the bases first.
7050 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
7051 for (const CXXBaseSpecifier &I : CXXRD->bases())
7052 if (isUnsupportedType(I.getType()))
7053 return true;
7054
7055 for (const FieldDecl *I : RD->fields())
7056 if (isUnsupportedType(I->getType()))
7057 return true;
7058 return false;
7059}
7060
7061/// Coerce the given type into an array with maximum allowed size of elements.
7062ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
7063 unsigned MaxSize) const {
7064 // Alignment and Size are measured in bits.
7065 const uint64_t Size = getContext().getTypeSize(Ty);
7066 const uint64_t Alignment = getContext().getTypeAlign(Ty);
7067 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
7068 llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
7069 const uint64_t NumElements = (Size + Div - 1) / Div;
7070 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
7071}
7072
7073ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
7074 if (RetTy->isVoidType())
7075 return ABIArgInfo::getIgnore();
7076
7077 if (getContext().getLangOpts().OpenMP &&
7078 getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy))
7079 return coerceToIntArrayWithLimit(RetTy, 64);
7080
7081 // note: this is different from default ABI
7082 if (!RetTy->isScalarType())
7083 return ABIArgInfo::getDirect();
7084
7085 // Treat an enum type as its underlying type.
7086 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7087 RetTy = EnumTy->getDecl()->getIntegerType();
7088
7089 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
7090 : ABIArgInfo::getDirect());
7091}
7092
7093ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
7094 // Treat an enum type as its underlying type.
7095 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7096 Ty = EnumTy->getDecl()->getIntegerType();
7097
7098 // Return aggregates type as indirect by value
7099 if (isAggregateTypeForABI(Ty)) {
7100 // Under CUDA device compilation, tex/surf builtin types are replaced with
7101 // object types and passed directly.
7102 if (getContext().getLangOpts().CUDAIsDevice) {
7103 if (Ty->isCUDADeviceBuiltinSurfaceType())
7104 return ABIArgInfo::getDirect(
7105 CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
7106 if (Ty->isCUDADeviceBuiltinTextureType())
7107 return ABIArgInfo::getDirect(
7108 CGInfo.getCUDADeviceBuiltinTextureDeviceType());
7109 }
7110 return getNaturalAlignIndirect(Ty, /* byval */ true);
7111 }
7112
7113 if (const auto *EIT = Ty->getAs<ExtIntType>()) {
7114 if ((EIT->getNumBits() > 128) ||
7115 (!getContext().getTargetInfo().hasInt128Type() &&
7116 EIT->getNumBits() > 64))
7117 return getNaturalAlignIndirect(Ty, /* byval */ true);
7118 }
7119
7120 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
7121 : ABIArgInfo::getDirect());
7122}
7123
7124void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
7125 if (!getCXXABI().classifyReturnType(FI))
7126 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7127 for (auto &I : FI.arguments())
7128 I.info = classifyArgumentType(I.type);
7129
7130 // Always honor user-specified calling convention.
7131 if (FI.getCallingConvention() != llvm::CallingConv::C)
7132 return;
7133
7134 FI.setEffectiveCallingConvention(getRuntimeCC());
7135}
7136
7137Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7138 QualType Ty) const {
7139 llvm_unreachable("NVPTX does not support varargs")__builtin_unreachable();
7140}
7141
7142void NVPTXTargetCodeGenInfo::setTargetAttributes(
7143 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7144 if (GV->isDeclaration())
7145 return;
7146 const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
7147 if (VD) {
7148 if (M.getLangOpts().CUDA) {
7149 if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
7150 addNVVMMetadata(GV, "surface", 1);
7151 else if (VD->getType()->isCUDADeviceBuiltinTextureType())
7152 addNVVMMetadata(GV, "texture", 1);
7153 return;
7154 }
7155 }
7156
7157 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7158 if (!FD) return;
7159
7160 llvm::Function *F = cast<llvm::Function>(GV);
7161
7162 // Perform special handling in OpenCL mode
7163 if (M.getLangOpts().OpenCL) {
7164 // Use OpenCL function attributes to check for kernel functions
7165 // By default, all functions are device functions
7166 if (FD->hasAttr<OpenCLKernelAttr>()) {
7167 // OpenCL __kernel functions get kernel metadata
7168 // Create !{<func-ref>, metadata !"kernel", i32 1} node
7169 addNVVMMetadata(F, "kernel", 1);
7170 // And kernel functions are not subject to inlining
7171 F->addFnAttr(llvm::Attribute::NoInline);
7172 }
7173 }
7174
7175 // Perform special handling in CUDA mode.
7176 if (M.getLangOpts().CUDA) {
7177 // CUDA __global__ functions get a kernel metadata entry. Since
7178 // __global__ functions cannot be called from the device, we do not
7179 // need to set the noinline attribute.
7180 if (FD->hasAttr<CUDAGlobalAttr>()) {
7181 // Create !{<func-ref>, metadata !"kernel", i32 1} node
7182 addNVVMMetadata(F, "kernel", 1);
7183 }
7184 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
7185 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
7186 llvm::APSInt MaxThreads(32);
7187 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
7188 if (MaxThreads > 0)
7189 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
7190
7191 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
7192 // not specified in __launch_bounds__ or if the user specified a 0 value,
7193 // we don't have to add a PTX directive.
7194 if (Attr->getMinBlocks()) {
7195 llvm::APSInt MinBlocks(32);
7196 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
7197 if (MinBlocks > 0)
7198 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
7199 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
7200 }
7201 }
7202 }
7203}
7204
7205void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
7206 StringRef Name, int Operand) {
7207 llvm::Module *M = GV->getParent();
7208 llvm::LLVMContext &Ctx = M->getContext();
7209
7210 // Get "nvvm.annotations" metadata node
7211 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
7212
7213 llvm::Metadata *MDVals[] = {
7214 llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
7215 llvm::ConstantAsMetadata::get(
7216 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
7217 // Append metadata to nvvm.annotations
7218 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7219}
7220
7221bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
7222 return false;
7223}
7224}
7225
7226//===----------------------------------------------------------------------===//
7227// SystemZ ABI Implementation
7228//===----------------------------------------------------------------------===//
7229
7230namespace {
7231
7232class SystemZABIInfo : public SwiftABIInfo {
7233 bool HasVector;
7234 bool IsSoftFloatABI;
7235
7236public:
7237 SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
7238 : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
7239
7240 bool isPromotableIntegerTypeForABI(QualType Ty) const;
7241 bool isCompoundType(QualType Ty) const;
7242 bool isVectorArgumentType(QualType Ty) const;
7243 bool isFPArgumentType(QualType Ty) const;
7244 QualType GetSingleElementType(QualType Ty) const;
7245
7246 ABIArgInfo classifyReturnType(QualType RetTy) const;
7247 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
7248
7249 void computeInfo(CGFunctionInfo &FI) const override {
7250 if (!getCXXABI().classifyReturnType(FI))
7251 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7252 for (auto &I : FI.arguments())
7253 I.info = classifyArgumentType(I.type);
7254 }
7255
7256 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7257 QualType Ty) const override;
7258
7259 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
7260 bool asReturnValue) const override {
7261 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
7262 }
7263 bool isSwiftErrorInRegister() const override {
7264 return false;
7265 }
7266};
7267
7268class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
7269public:
7270 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
7271 : TargetCodeGenInfo(
7272 std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {}
7273
7274 llvm::Value *testFPKind(llvm::Value *V, unsigned BuiltinID,
7275 CGBuilderTy &Builder,
7276 CodeGenModule &CGM) const override {
7277 assert(V->getType()->isFloatingPointTy() && "V should have an FP type.")((void)0);
7278 // Only use TDC in constrained FP mode.
7279 if (!Builder.getIsFPConstrained())
7280 return nullptr;
7281
7282 llvm::Type *Ty = V->getType();
7283 if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) {
7284 llvm::Module &M = CGM.getModule();
7285 auto &Ctx = M.getContext();
7286 llvm::Function *TDCFunc =
7287 llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty);
7288 unsigned TDCBits = 0;
7289 switch (BuiltinID) {
7290 case Builtin::BI__builtin_isnan:
7291 TDCBits = 0xf;
7292 break;
7293 case Builtin::BIfinite:
7294 case Builtin::BI__finite:
7295 case Builtin::BIfinitef:
7296 case Builtin::BI__finitef:
7297 case Builtin::BIfinitel:
7298 case Builtin::BI__finitel:
7299 case Builtin::BI__builtin_isfinite:
7300 TDCBits = 0xfc0;
7301 break;
7302 case Builtin::BI__builtin_isinf:
7303 TDCBits = 0x30;
7304 break;
7305 default:
7306 break;
7307 }
7308 if (TDCBits)
7309 return Builder.CreateCall(
7310 TDCFunc,
7311 {V, llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), TDCBits)});
7312 }
7313 return nullptr;
7314 }
7315};
7316}
7317
7318bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
7319 // Treat an enum type as its underlying type.
7320 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7321 Ty = EnumTy->getDecl()->getIntegerType();
7322
7323 // Promotable integer types are required to be promoted by the ABI.
7324 if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
7325 return true;
7326
7327 if (const auto *EIT = Ty->getAs<ExtIntType>())
7328 if (EIT->getNumBits() < 64)
7329 return true;
7330
7331 // 32-bit values must also be promoted.
7332 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
7333 switch (BT->getKind()) {
7334 case BuiltinType::Int:
7335 case BuiltinType::UInt:
7336 return true;
7337 default:
7338 return false;
7339 }
7340 return false;
7341}
7342
7343bool SystemZABIInfo::isCompoundType(QualType Ty) const {
7344 return (Ty->isAnyComplexType() ||
7345 Ty->isVectorType() ||
7346 isAggregateTypeForABI(Ty));
7347}
7348
7349bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
7350 return (HasVector &&
7351 Ty->isVectorType() &&
7352 getContext().getTypeSize(Ty) <= 128);
7353}
7354
7355bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
7356 if (IsSoftFloatABI)
7357 return false;
7358
7359 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
7360 switch (BT->getKind()) {
7361 case BuiltinType::Float:
7362 case BuiltinType::Double:
7363 return true;
7364 default:
7365 return false;
7366 }
7367
7368 return false;
7369}
7370
7371QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
7372 const RecordType *RT = Ty->getAs<RecordType>();
7373
7374 if (RT && RT->isStructureOrClassType()) {
7375 const RecordDecl *RD = RT->getDecl();
7376 QualType Found;
7377
7378 // If this is a C++ record, check the bases first.
7379 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
7380 for (const auto &I : CXXRD->bases()) {
7381 QualType Base = I.getType();
7382
7383 // Empty bases don't affect things either way.
7384 if (isEmptyRecord(getContext(), Base, true))
7385 continue;
7386
7387 if (!Found.isNull())
7388 return Ty;
7389 Found = GetSingleElementType(Base);
7390 }
7391
7392 // Check the fields.
7393 for (const auto *FD : RD->fields()) {
7394 // For compatibility with GCC, ignore empty bitfields in C++ mode.
7395 // Unlike isSingleElementStruct(), empty structure and array fields
7396 // do count. So do anonymous bitfields that aren't zero-sized.
7397 if (getContext().getLangOpts().CPlusPlus &&
7398 FD->isZeroLengthBitField(getContext()))
7399 continue;
7400 // Like isSingleElementStruct(), ignore C++20 empty data members.
7401 if (FD->hasAttr<NoUniqueAddressAttr>() &&
7402 isEmptyRecord(getContext(), FD->getType(), true))
7403 continue;
7404
7405 // Unlike isSingleElementStruct(), arrays do not count.
7406 // Nested structures still do though.
7407 if (!Found.isNull())
7408 return Ty;
7409 Found = GetSingleElementType(FD->getType());
7410 }
7411
7412 // Unlike isSingleElementStruct(), trailing padding is allowed.
7413 // An 8-byte aligned struct s { float f; } is passed as a double.
7414 if (!Found.isNull())
7415 return Found;
7416 }
7417
7418 return Ty;
7419}
7420
7421Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7422 QualType Ty) const {
7423 // Assume that va_list type is correct; should be pointer to LLVM type:
7424 // struct {
7425 // i64 __gpr;
7426 // i64 __fpr;
7427 // i8 *__overflow_arg_area;
7428 // i8 *__reg_save_area;
7429 // };
7430
7431 // Every non-vector argument occupies 8 bytes and is passed by preference
7432 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
7433 // always passed on the stack.
7434 Ty = getContext().getCanonicalType(Ty);
7435 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7436 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
7437 llvm::Type *DirectTy = ArgTy;
7438 ABIArgInfo AI = classifyArgumentType(Ty);
7439 bool IsIndirect = AI.isIndirect();
7440 bool InFPRs = false;
7441 bool IsVector = false;
7442 CharUnits UnpaddedSize;
7443 CharUnits DirectAlign;
7444 if (IsIndirect) {
7445 DirectTy = llvm::PointerType::getUnqual(DirectTy);
7446 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
7447 } else {
7448 if (AI.getCoerceToType())
7449 ArgTy = AI.getCoerceToType();
7450 InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
7451 IsVector = ArgTy->isVectorTy();
7452 UnpaddedSize = TyInfo.Width;
7453 DirectAlign = TyInfo.Align;
7454 }
7455 CharUnits PaddedSize = CharUnits::fromQuantity(8);
7456 if (IsVector && UnpaddedSize > PaddedSize)
7457 PaddedSize = CharUnits::fromQuantity(16);
7458 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.")((void)0);
7459
7460 CharUnits Padding = (PaddedSize - UnpaddedSize);
7461
7462 llvm::Type *IndexTy = CGF.Int64Ty;
7463 llvm::Value *PaddedSizeV =
7464 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
7465
7466 if (IsVector) {
7467 // Work out the address of a vector argument on the stack.
7468 // Vector arguments are always passed in the high bits of a
7469 // single (8 byte) or double (16 byte) stack slot.
7470 Address OverflowArgAreaPtr =
7471 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
7472 Address OverflowArgArea =
7473 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
7474 TyInfo.Align);
7475 Address MemAddr =
7476 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
7477
7478 // Update overflow_arg_area_ptr pointer
7479 llvm::Value *NewOverflowArgArea =
7480 CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
7481 OverflowArgArea.getPointer(), PaddedSizeV,
7482 "overflow_arg_area");
7483 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
7484
7485 return MemAddr;
7486 }
7487
7488 assert(PaddedSize.getQuantity() == 8)((void)0);
7489
7490 unsigned MaxRegs, RegCountField, RegSaveIndex;
7491 CharUnits RegPadding;
7492 if (InFPRs) {
7493 MaxRegs = 4; // Maximum of 4 FPR arguments
7494 RegCountField = 1; // __fpr
7495 RegSaveIndex = 16; // save offset for f0
7496 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
7497 } else {
7498 MaxRegs = 5; // Maximum of 5 GPR arguments
7499 RegCountField = 0; // __gpr
7500 RegSaveIndex = 2; // save offset for r2
7501 RegPadding = Padding; // values are passed in the low bits of a GPR
7502 }
7503
7504 Address RegCountPtr =
7505 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
7506 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
7507 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
7508 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
7509 "fits_in_regs");
7510
7511 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
7512 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
7513 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
7514 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
7515
7516 // Emit code to load the value if it was passed in registers.
7517 CGF.EmitBlock(InRegBlock);
7518
7519 // Work out the address of an argument register.
7520 llvm::Value *ScaledRegCount =
7521 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
7522 llvm::Value *RegBase =
7523 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
7524 + RegPadding.getQuantity());
7525 llvm::Value *RegOffset =
7526 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
7527 Address RegSaveAreaPtr =
7528 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
7529 llvm::Value *RegSaveArea =
7530 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
7531 Address RawRegAddr(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset,
7532 "raw_reg_addr"),
7533 PaddedSize);
7534 Address RegAddr =
7535 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
7536
7537 // Update the register count
7538 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
7539 llvm::Value *NewRegCount =
7540 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
7541 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
7542 CGF.EmitBranch(ContBlock);
7543
7544 // Emit code to load the value if it was passed in memory.
7545 CGF.EmitBlock(InMemBlock);
7546
7547 // Work out the address of a stack argument.
7548 Address OverflowArgAreaPtr =
7549 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
7550 Address OverflowArgArea =
7551 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
7552 PaddedSize);
7553 Address RawMemAddr =
7554 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
7555 Address MemAddr =
7556 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
7557
7558 // Update overflow_arg_area_ptr pointer
7559 llvm::Value *NewOverflowArgArea =
7560 CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
7561 OverflowArgArea.getPointer(), PaddedSizeV,
7562 "overflow_arg_area");
7563 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
7564 CGF.EmitBranch(ContBlock);
7565
7566 // Return the appropriate result.
7567 CGF.EmitBlock(ContBlock);
7568 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
7569 MemAddr, InMemBlock, "va_arg.addr");
7570
7571 if (IsIndirect)
7572 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
7573 TyInfo.Align);
7574
7575 return ResAddr;
7576}
7577
7578ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
7579 if (RetTy->isVoidType())
7580 return ABIArgInfo::getIgnore();
7581 if (isVectorArgumentType(RetTy))
7582 return ABIArgInfo::getDirect();
7583 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
7584 return getNaturalAlignIndirect(RetTy);
7585 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
7586 : ABIArgInfo::getDirect());
7587}
7588
7589ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
7590 // Handle the generic C++ ABI.
7591 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7592 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7593
7594 // Integers and enums are extended to full register width.
7595 if (isPromotableIntegerTypeForABI(Ty))
7596 return ABIArgInfo::getExtend(Ty);
7597
7598 // Handle vector types and vector-like structure types. Note that
7599 // as opposed to float-like structure types, we do not allow any
7600 // padding for vector-like structures, so verify the sizes match.
7601 uint64_t Size = getContext().getTypeSize(Ty);
7602 QualType SingleElementTy = GetSingleElementType(Ty);
7603 if (isVectorArgumentType(SingleElementTy) &&
7604 getContext().getTypeSize(SingleElementTy) == Size)
7605 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
7606
7607 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
7608 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
7609 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7610
7611 // Handle small structures.
7612 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7613 // Structures with flexible arrays have variable length, so really
7614 // fail the size test above.
7615 const RecordDecl *RD = RT->getDecl();
7616 if (RD->hasFlexibleArrayMember())
7617 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7618
7619 // The structure is passed as an unextended integer, a float, or a double.
7620 llvm::Type *PassTy;
7621 if (isFPArgumentType(SingleElementTy)) {
7622 assert(Size == 32 || Size == 64)((void)0);
7623 if (Size == 32)
7624 PassTy = llvm::Type::getFloatTy(getVMContext());
7625 else
7626 PassTy = llvm::Type::getDoubleTy(getVMContext());
7627 } else
7628 PassTy = llvm::IntegerType::get(getVMContext(), Size);
7629 return ABIArgInfo::getDirect(PassTy);
7630 }
7631
7632 // Non-structure compounds are passed indirectly.
7633 if (isCompoundType(Ty))
7634 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7635
7636 return ABIArgInfo::getDirect(nullptr);
7637}
7638
7639//===----------------------------------------------------------------------===//
7640// MSP430 ABI Implementation
7641//===----------------------------------------------------------------------===//
7642
7643namespace {
7644
7645class MSP430ABIInfo : public DefaultABIInfo {
7646 static ABIArgInfo complexArgInfo() {
7647 ABIArgInfo Info = ABIArgInfo::getDirect();
7648 Info.setCanBeFlattened(false);
7649 return Info;
7650 }
7651
7652public:
7653 MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7654
7655 ABIArgInfo classifyReturnType(QualType RetTy) const {
7656 if (RetTy->isAnyComplexType())
7657 return complexArgInfo();
7658
7659 return DefaultABIInfo::classifyReturnType(RetTy);
7660 }
7661
7662 ABIArgInfo classifyArgumentType(QualType RetTy) const {
7663 if (RetTy->isAnyComplexType())
7664 return complexArgInfo();
7665
7666 return DefaultABIInfo::classifyArgumentType(RetTy);
7667 }
7668
7669 // Just copy the original implementations because
7670 // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
7671 void computeInfo(CGFunctionInfo &FI) const override {
7672 if (!getCXXABI().classifyReturnType(FI))
7673 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7674 for (auto &I : FI.arguments())
7675 I.info = classifyArgumentType(I.type);
7676 }
7677
7678 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7679 QualType Ty) const override {
7680 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
7681 }
7682};
7683
7684class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
7685public:
7686 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
7687 : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
7688 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7689 CodeGen::CodeGenModule &M) const override;
7690};
7691
7692}
7693
7694void MSP430TargetCodeGenInfo::setTargetAttributes(
7695 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7696 if (GV->isDeclaration())
7697 return;
7698 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
7699 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
7700 if (!InterruptAttr)
7701 return;
7702
7703 // Handle 'interrupt' attribute:
7704 llvm::Function *F = cast<llvm::Function>(GV);
7705
7706 // Step 1: Set ISR calling convention.
7707 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
7708
7709 // Step 2: Add attributes goodness.
7710 F->addFnAttr(llvm::Attribute::NoInline);
7711 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
7712 }
7713}
7714
7715//===----------------------------------------------------------------------===//
7716// MIPS ABI Implementation. This works for both little-endian and
7717// big-endian variants.
7718//===----------------------------------------------------------------------===//
7719
7720namespace {
7721class MipsABIInfo : public ABIInfo {
7722 bool IsO32;
7723 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
7724 void CoerceToIntArgs(uint64_t TySize,
7725 SmallVectorImpl<llvm::Type *> &ArgList) const;
7726 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
7727 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
7728 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
7729public:
7730 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
7731 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
7732 StackAlignInBytes(IsO32 ? 8 : 16) {}
7733
7734 ABIArgInfo classifyReturnType(QualType RetTy) const;
7735 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
7736 void computeInfo(CGFunctionInfo &FI) const override;
7737 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7738 QualType Ty) const override;
7739 ABIArgInfo extendType(QualType Ty) const;
7740};
7741
7742class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
7743 unsigned SizeOfUnwindException;
7744public:
7745 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
7746 : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
7747 SizeOfUnwindException(IsO32 ? 24 : 32) {}
7748
7749 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
7750 return 29;
7751 }
7752
7753 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7754 CodeGen::CodeGenModule &CGM) const override {
7755 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7756 if (!FD) return;
7757 llvm::Function *Fn = cast<llvm::Function>(GV);
7758
7759 if (FD->hasAttr<MipsLongCallAttr>())
7760 Fn->addFnAttr("long-call");
7761 else if (FD->hasAttr<MipsShortCallAttr>())
7762 Fn->addFnAttr("short-call");
7763
7764 // Other attributes do not have a meaning for declarations.
7765 if (GV->isDeclaration())
7766 return;
7767
7768 if (FD->hasAttr<Mips16Attr>()) {
7769 Fn->addFnAttr("mips16");
7770 }
7771 else if (FD->hasAttr<NoMips16Attr>()) {
7772 Fn->addFnAttr("nomips16");
7773 }
7774
7775 if (FD->hasAttr<MicroMipsAttr>())
7776 Fn->addFnAttr("micromips");
7777 else if (FD->hasAttr<NoMicroMipsAttr>())
7778 Fn->addFnAttr("nomicromips");
7779
7780 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
7781 if (!Attr)
7782 return;
7783
7784 const char *Kind;
7785 switch (Attr->getInterrupt()) {
7786 case MipsInterruptAttr::eic: Kind = "eic"; break;
7787 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
7788 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
7789 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
7790 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
7791 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
7792 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
7793 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
7794 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
7795 }
7796
7797 Fn->addFnAttr("interrupt", Kind);
7798
7799 }
7800
7801 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7802 llvm::Value *Address) const override;
7803
7804 unsigned getSizeOfUnwindException() const override {
7805 return SizeOfUnwindException;
7806 }
7807};
7808}
7809
7810void MipsABIInfo::CoerceToIntArgs(
7811 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
7812 llvm::IntegerType *IntTy =
7813 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
7814
7815 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
7816 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
7817 ArgList.push_back(IntTy);
7818
7819 // If necessary, add one more integer type to ArgList.
7820 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
7821
7822 if (R)
7823 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
7824}
7825
7826// In N32/64, an aligned double precision floating point field is passed in
7827// a register.
7828llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
7829 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
7830
7831 if (IsO32) {
7832 CoerceToIntArgs(TySize, ArgList);
7833 return llvm::StructType::get(getVMContext(), ArgList);
7834 }
7835
7836 if (Ty->isComplexType())
7837 return CGT.ConvertType(Ty);
7838
7839 const RecordType *RT = Ty->getAs<RecordType>();
7840
7841 // Unions/vectors are passed in integer registers.
7842 if (!RT || !RT->isStructureOrClassType()) {
7843 CoerceToIntArgs(TySize, ArgList);
7844 return llvm::StructType::get(getVMContext(), ArgList);
7845 }
7846
7847 const RecordDecl *RD = RT->getDecl();
7848 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7849 assert(!(TySize % 8) && "Size of structure must be multiple of 8.")((void)0);
7850
7851 uint64_t LastOffset = 0;
7852 unsigned idx = 0;
7853 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
7854
7855 // Iterate over fields in the struct/class and check if there are any aligned
7856 // double fields.
7857 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
7858 i != e; ++i, ++idx) {
7859 const QualType Ty = i->getType();
7860 const BuiltinType *BT = Ty->getAs<BuiltinType>();
7861
7862 if (!BT || BT->getKind() != BuiltinType::Double)
7863 continue;
7864
7865 uint64_t Offset = Layout.getFieldOffset(idx);
7866 if (Offset % 64) // Ignore doubles that are not aligned.
7867 continue;
7868
7869 // Add ((Offset - LastOffset) / 64) args of type i64.
7870 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7871 ArgList.push_back(I64);
7872
7873 // Add double type.
7874 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
7875 LastOffset = Offset + 64;
7876 }
7877
7878 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7879 ArgList.append(IntArgList.begin(), IntArgList.end());
7880
7881 return llvm::StructType::get(getVMContext(), ArgList);
7882}
7883
7884llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7885 uint64_t Offset) const {
7886 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7887 return nullptr;
7888
7889 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
7890}
7891
7892ABIArgInfo
7893MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
7894 Ty = useFirstFieldIfTransparentUnion(Ty);
7895
7896 uint64_t OrigOffset = Offset;
7897 uint64_t TySize = getContext().getTypeSize(Ty);
7898 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
7899
7900 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
7901 (uint64_t)StackAlignInBytes);
7902 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7903 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7904
7905 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
7906 // Ignore empty aggregates.
7907 if (TySize == 0)
7908 return ABIArgInfo::getIgnore();
7909
7910 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
7911 Offset = OrigOffset + MinABIStackAlignInBytes;
7912 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7913 }
7914
7915 // If we have reached here, aggregates are passed directly by coercing to
7916 // another structure type. Padding is inserted if the offset of the
7917 // aggregate is unaligned.
7918 ABIArgInfo ArgInfo =
7919 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
7920 getPaddingType(OrigOffset, CurrOffset));
7921 ArgInfo.setInReg(true);
7922 return ArgInfo;
7923 }
7924
7925 // Treat an enum type as its underlying type.
7926 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7927 Ty = EnumTy->getDecl()->getIntegerType();
7928
7929 // Make sure we pass indirectly things that are too large.
7930 if (const auto *EIT = Ty->getAs<ExtIntType>())
7931 if (EIT->getNumBits() > 128 ||
7932 (EIT->getNumBits() > 64 &&
7933 !getContext().getTargetInfo().hasInt128Type()))
7934 return getNaturalAlignIndirect(Ty);
7935
7936 // All integral types are promoted to the GPR width.
7937 if (Ty->isIntegralOrEnumerationType())
7938 return extendType(Ty);
7939
7940 return ABIArgInfo::getDirect(
7941 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
7942}
7943
7944llvm::Type*
7945MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
7946 const RecordType *RT = RetTy->getAs<RecordType>();
7947 SmallVector<llvm::Type*, 8> RTList;
7948
7949 if (RT && RT->isStructureOrClassType()) {
7950 const RecordDecl *RD = RT->getDecl();
7951 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7952 unsigned FieldCnt = Layout.getFieldCount();
7953
7954 // N32/64 returns struct/classes in floating point registers if the
7955 // following conditions are met:
7956 // 1. The size of the struct/class is no larger than 128-bit.
7957 // 2. The struct/class has one or two fields all of which are floating
7958 // point types.
7959 // 3. The offset of the first field is zero (this follows what gcc does).
7960 //
7961 // Any other composite results are returned in integer registers.
7962 //
7963 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
7964 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
7965 for (; b != e; ++b) {
7966 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
7967
7968 if (!BT || !BT->isFloatingPoint())
7969 break;
7970
7971 RTList.push_back(CGT.ConvertType(b->getType()));
7972 }
7973
7974 if (b == e)
7975 return llvm::StructType::get(getVMContext(), RTList,
7976 RD->hasAttr<PackedAttr>());
7977
7978 RTList.clear();
7979 }
7980 }
7981
7982 CoerceToIntArgs(Size, RTList);
7983 return llvm::StructType::get(getVMContext(), RTList);
7984}
7985
7986ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
7987 uint64_t Size = getContext().getTypeSize(RetTy);
7988
7989 if (RetTy->isVoidType())
7990 return ABIArgInfo::getIgnore();
7991
7992 // O32 doesn't treat zero-sized structs differently from other structs.
7993 // However, N32/N64 ignores zero sized return values.
7994 if (!IsO32 && Size == 0)
7995 return ABIArgInfo::getIgnore();
7996
7997 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
7998 if (Size <= 128) {
7999 if (RetTy->isAnyComplexType())
8000 return ABIArgInfo::getDirect();
8001
8002 // O32 returns integer vectors in registers and N32/N64 returns all small
8003 // aggregates in registers.
8004 if (!IsO32 ||
8005 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
8006 ABIArgInfo ArgInfo =
8007 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
8008 ArgInfo.setInReg(true);
8009 return ArgInfo;
8010 }
8011 }
8012
8013 return getNaturalAlignIndirect(RetTy);
8014 }
8015
8016 // Treat an enum type as its underlying type.
8017 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
8018 RetTy = EnumTy->getDecl()->getIntegerType();
8019
8020 // Make sure we pass indirectly things that are too large.
8021 if (const auto *EIT = RetTy->getAs<ExtIntType>())
8022 if (EIT->getNumBits() > 128 ||
8023 (EIT->getNumBits() > 64 &&
8024 !getContext().getTargetInfo().hasInt128Type()))
8025 return getNaturalAlignIndirect(RetTy);
8026
8027 if (isPromotableIntegerTypeForABI(RetTy))
8028 return ABIArgInfo::getExtend(RetTy);
8029
8030 if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
8031 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
8032 return ABIArgInfo::getSignExtend(RetTy);
8033
8034 return ABIArgInfo::getDirect();
8035}
8036
8037void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
8038 ABIArgInfo &RetInfo = FI.getReturnInfo();
8039 if (!getCXXABI().classifyReturnType(FI))
8040 RetInfo = classifyReturnType(FI.getReturnType());
8041
8042 // Check if a pointer to an aggregate is passed as a hidden argument.
8043 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
8044
8045 for (auto &I : FI.arguments())
8046 I.info = classifyArgumentType(I.type, Offset);
8047}
8048
8049Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8050 QualType OrigTy) const {
8051 QualType Ty = OrigTy;
8052
8053 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
8054 // Pointers are also promoted in the same way but this only matters for N32.
8055 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
8056 unsigned PtrWidth = getTarget().getPointerWidth(0);
8057 bool DidPromote = false;
8058 if ((Ty->isIntegerType() &&
8059 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
8060 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
8061 DidPromote = true;
8062 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
8063 Ty->isSignedIntegerType());
8064 }
8065
8066 auto TyInfo = getContext().getTypeInfoInChars(Ty);
8067
8068 // The alignment of things in the argument area is never larger than
8069 // StackAlignInBytes.
8070 TyInfo.Align =
8071 std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
8072
8073 // MinABIStackAlignInBytes is the size of argument slots on the stack.
8074 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
8075
8076 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
8077 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
8078
8079
8080 // If there was a promotion, "unpromote" into a temporary.
8081 // TODO: can we just use a pointer into a subset of the original slot?
8082 if (DidPromote) {
8083 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
8084 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
8085
8086 // Truncate down to the right width.
8087 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
8088 : CGF.IntPtrTy);
8089 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
8090 if (OrigTy->isPointerType())
8091 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
8092
8093 CGF.Builder.CreateStore(V, Temp);
8094 Addr = Temp;
8095 }
8096
8097 return Addr;
8098}
8099
8100ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
8101 int TySize = getContext().getTypeSize(Ty);
8102
8103 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
8104 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
8105 return ABIArgInfo::getSignExtend(Ty);
8106
8107 return ABIArgInfo::getExtend(Ty);
8108}
8109
8110bool
8111MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8112 llvm::Value *Address) const {
8113 // This information comes from gcc's implementation, which seems to
8114 // as canonical as it gets.
8115
8116 // Everything on MIPS is 4 bytes. Double-precision FP registers
8117 // are aliased to pairs of single-precision FP registers.
8118 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
8119
8120 // 0-31 are the general purpose registers, $0 - $31.
8121 // 32-63 are the floating-point registers, $f0 - $f31.
8122 // 64 and 65 are the multiply/divide registers, $hi and $lo.
8123 // 66 is the (notional, I think) register for signal-handler return.
8124 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
8125
8126 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
8127 // They are one bit wide and ignored here.
8128
8129 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
8130 // (coprocessor 1 is the FP unit)
8131 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
8132 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
8133 // 176-181 are the DSP accumulator registers.
8134 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
8135 return false;
8136}
8137
8138//===----------------------------------------------------------------------===//
8139// M68k ABI Implementation
8140//===----------------------------------------------------------------------===//
8141
8142namespace {
8143
8144class M68kTargetCodeGenInfo : public TargetCodeGenInfo {
8145public:
8146 M68kTargetCodeGenInfo(CodeGenTypes &CGT)
8147 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
8148 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8149 CodeGen::CodeGenModule &M) const override;
8150};
8151
8152} // namespace
8153
8154void M68kTargetCodeGenInfo::setTargetAttributes(
8155 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8156 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
8157 if (const auto *attr = FD->getAttr<M68kInterruptAttr>()) {
8158 // Handle 'interrupt' attribute:
8159 llvm::Function *F = cast<llvm::Function>(GV);
8160
8161 // Step 1: Set ISR calling convention.
8162 F->setCallingConv(llvm::CallingConv::M68k_INTR);
8163
8164 // Step 2: Add attributes goodness.
8165 F->addFnAttr(llvm::Attribute::NoInline);
8166
8167 // Step 3: Emit ISR vector alias.
8168 unsigned Num = attr->getNumber() / 2;
8169 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
8170 "__isr_" + Twine(Num), F);
8171 }
8172 }
8173}
8174
8175//===----------------------------------------------------------------------===//
8176// AVR ABI Implementation. Documented at
8177// https://gcc.gnu.org/wiki/avr-gcc#Calling_Convention
8178// https://gcc.gnu.org/wiki/avr-gcc#Reduced_Tiny
8179//===----------------------------------------------------------------------===//
8180
8181namespace {
8182class AVRABIInfo : public DefaultABIInfo {
8183public:
8184 AVRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8185
8186 ABIArgInfo classifyReturnType(QualType Ty) const {
8187 // A return struct with size less than or equal to 8 bytes is returned
8188 // directly via registers R18-R25.
8189 if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) <= 64)
8190 return ABIArgInfo::getDirect();
8191 else
8192 return DefaultABIInfo::classifyReturnType(Ty);
8193 }
8194
8195 // Just copy the original implementation of DefaultABIInfo::computeInfo(),
8196 // since DefaultABIInfo::classify{Return,Argument}Type() are not virtual.
8197 void computeInfo(CGFunctionInfo &FI) const override {
8198 if (!getCXXABI().classifyReturnType(FI))
8199 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8200 for (auto &I : FI.arguments())
8201 I.info = classifyArgumentType(I.type);
8202 }
8203};
8204
8205class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
8206public:
8207 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
8208 : TargetCodeGenInfo(std::make_unique<AVRABIInfo>(CGT)) {}
8209
8210 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
8211 const VarDecl *D) const override {
8212 // Check if a global/static variable is defined within address space 1
8213 // but not constant.
8214 LangAS AS = D->getType().getAddressSpace();
8215 if (isTargetAddressSpace(AS) && toTargetAddressSpace(AS) == 1 &&
8216 !D->getType().isConstQualified())
8217 CGM.getDiags().Report(D->getLocation(),
8218 diag::err_verify_nonconst_addrspace)
8219 << "__flash";
8220 return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
8221 }
8222
8223 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8224 CodeGen::CodeGenModule &CGM) const override {
8225 if (GV->isDeclaration())
8226 return;
8227 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
8228 if (!FD) return;
8229 auto *Fn = cast<llvm::Function>(GV);
8230
8231 if (FD->getAttr<AVRInterruptAttr>())
8232 Fn->addFnAttr("interrupt");
8233
8234 if (FD->getAttr<AVRSignalAttr>())
8235 Fn->addFnAttr("signal");
8236 }
8237};
8238}
8239
8240//===----------------------------------------------------------------------===//
8241// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
8242// Currently subclassed only to implement custom OpenCL C function attribute
8243// handling.
8244//===----------------------------------------------------------------------===//
8245
8246namespace {
8247
8248class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
8249public:
8250 TCETargetCodeGenInfo(CodeGenTypes &CGT)
8251 : DefaultTargetCodeGenInfo(CGT) {}
8252
8253 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8254 CodeGen::CodeGenModule &M) const override;
8255};
8256
8257void TCETargetCodeGenInfo::setTargetAttributes(
8258 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8259 if (GV->isDeclaration())
8260 return;
8261 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8262 if (!FD) return;
8263
8264 llvm::Function *F = cast<llvm::Function>(GV);
8265
8266 if (M.getLangOpts().OpenCL) {
8267 if (FD->hasAttr<OpenCLKernelAttr>()) {
8268 // OpenCL C Kernel functions are not subject to inlining
8269 F->addFnAttr(llvm::Attribute::NoInline);
8270 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
8271 if (Attr) {
8272 // Convert the reqd_work_group_size() attributes to metadata.
8273 llvm::LLVMContext &Context = F->getContext();
8274 llvm::NamedMDNode *OpenCLMetadata =
8275 M.getModule().getOrInsertNamedMetadata(
8276 "opencl.kernel_wg_size_info");
8277
8278 SmallVector<llvm::Metadata *, 5> Operands;
8279 Operands.push_back(llvm::ConstantAsMetadata::get(F));
8280
8281 Operands.push_back(
8282 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8283 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
8284 Operands.push_back(
8285 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8286 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
8287 Operands.push_back(
8288 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8289 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
8290
8291 // Add a boolean constant operand for "required" (true) or "hint"
8292 // (false) for implementing the work_group_size_hint attr later.
8293 // Currently always true as the hint is not yet implemented.
8294 Operands.push_back(
8295 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
8296 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
8297 }
8298 }
8299 }
8300}
8301
8302}
8303
8304//===----------------------------------------------------------------------===//
8305// Hexagon ABI Implementation
8306//===----------------------------------------------------------------------===//
8307
8308namespace {
8309
8310class HexagonABIInfo : public DefaultABIInfo {
8311public:
8312 HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8313
8314private:
8315 ABIArgInfo classifyReturnType(QualType RetTy) const;
8316 ABIArgInfo classifyArgumentType(QualType RetTy) const;
8317 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
8318
8319 void computeInfo(CGFunctionInfo &FI) const override;
8320
8321 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8322 QualType Ty) const override;
8323 Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
8324 QualType Ty) const;
8325 Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
8326 QualType Ty) const;
8327 Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
8328 QualType Ty) const;
8329};
8330
8331class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
8332public:
8333 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
8334 : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
8335
8336 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8337 return 29;
8338 }
8339
8340 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8341 CodeGen::CodeGenModule &GCM) const override {
8342 if (GV->isDeclaration())
8343 return;
8344 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8345 if (!FD)
8346 return;
8347 }
8348};
8349
8350} // namespace
8351
8352void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
8353 unsigned RegsLeft = 6;
8354 if (!getCXXABI().classifyReturnType(FI))
8355 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8356 for (auto &I : FI.arguments())
8357 I.info = classifyArgumentType(I.type, &RegsLeft);
8358}
8359
8360static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
8361 assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"((void)0)
8362 " through registers")((void)0);
8363
8364 if (*RegsLeft == 0)
8365 return false;
8366
8367 if (Size <= 32) {
8368 (*RegsLeft)--;
8369 return true;
8370 }
8371
8372 if (2 <= (*RegsLeft & (~1U))) {
8373 *RegsLeft = (*RegsLeft & (~1U)) - 2;
8374 return true;
8375 }
8376
8377 // Next available register was r5 but candidate was greater than 32-bits so it
8378 // has to go on the stack. However we still consume r5
8379 if (*RegsLeft == 1)
8380 *RegsLeft = 0;
8381
8382 return false;
8383}
8384
8385ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
8386 unsigned *RegsLeft) const {
8387 if (!isAggregateTypeForABI(Ty)) {
8388 // Treat an enum type as its underlying type.
8389 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8390 Ty = EnumTy->getDecl()->getIntegerType();
8391
8392 uint64_t Size = getContext().getTypeSize(Ty);
8393 if (Size <= 64)
8394 HexagonAdjustRegsLeft(Size, RegsLeft);
8395
8396 if (Size > 64 && Ty->isExtIntType())
8397 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8398
8399 return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
8400 : ABIArgInfo::getDirect();
8401 }
8402
8403 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8404 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8405
8406 // Ignore empty records.
8407 if (isEmptyRecord(getContext(), Ty, true))
8408 return ABIArgInfo::getIgnore();
8409
8410 uint64_t Size = getContext().getTypeSize(Ty);
8411 unsigned Align = getContext().getTypeAlign(Ty);
8412
8413 if (Size > 64)
8414 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8415
8416 if (HexagonAdjustRegsLeft(Size, RegsLeft))
8417 Align = Size <= 32 ? 32 : 64;
8418 if (Size <= Align) {
8419 // Pass in the smallest viable integer type.
8420 if (!llvm::isPowerOf2_64(Size))
8421 Size = llvm::NextPowerOf2(Size);
8422 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
8423 }
8424 return DefaultABIInfo::classifyArgumentType(Ty);
8425}
8426
8427ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
8428 if (RetTy->isVoidType())
8429 return ABIArgInfo::getIgnore();
8430
8431 const TargetInfo &T = CGT.getTarget();
8432 uint64_t Size = getContext().getTypeSize(RetTy);
8433
8434 if (RetTy->getAs<VectorType>()) {
8435 // HVX vectors are returned in vector registers or register pairs.
8436 if (T.hasFeature("hvx")) {
8437 assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"))((void)0);
8438 uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
8439 if (Size == VecSize || Size == 2*VecSize)
8440 return ABIArgInfo::getDirectInReg();
8441 }
8442 // Large vector types should be returned via memory.
8443 if (Size > 64)
8444 return getNaturalAlignIndirect(RetTy);
8445 }
8446
8447 if (!isAggregateTypeForABI(RetTy)) {
8448 // Treat an enum type as its underlying type.
8449 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
8450 RetTy = EnumTy->getDecl()->getIntegerType();
8451
8452 if (Size > 64 && RetTy->isExtIntType())
8453 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
8454
8455 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
8456 : ABIArgInfo::getDirect();
8457 }
8458
8459 if (isEmptyRecord(getContext(), RetTy, true))
8460 return ABIArgInfo::getIgnore();
8461
8462 // Aggregates <= 8 bytes are returned in registers, other aggregates
8463 // are returned indirectly.
8464 if (Size <= 64) {
8465 // Return in the smallest viable integer type.
8466 if (!llvm::isPowerOf2_64(Size))
8467 Size = llvm::NextPowerOf2(Size);
8468 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
8469 }
8470 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
8471}
8472
8473Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
8474 Address VAListAddr,
8475 QualType Ty) const {
8476 // Load the overflow area pointer.
8477 Address __overflow_area_pointer_p =
8478 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
8479 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
8480 __overflow_area_pointer_p, "__overflow_area_pointer");
8481
8482 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
8483 if (Align > 4) {
8484 // Alignment should be a power of 2.
8485 assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!")((void)0);
8486
8487 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
8488 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
8489
8490 // Add offset to the current pointer to access the argument.
8491 __overflow_area_pointer =
8492 CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset);
8493 llvm::Value *AsInt =
8494 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
8495
8496 // Create a mask which should be "AND"ed
8497 // with (overflow_arg_area + align - 1)
8498 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
8499 __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
8500 CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
8501 "__overflow_area_pointer.align");
8502 }
8503
8504 // Get the type of the argument from memory and bitcast
8505 // overflow area pointer to the argument type.
8506 llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
8507 Address AddrTyped = CGF.Builder.CreateBitCast(
8508 Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)),
8509 llvm::PointerType::getUnqual(PTy));
8510
8511 // Round up to the minimum stack alignment for varargs which is 4 bytes.
8512 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
8513
8514 __overflow_area_pointer = CGF.Builder.CreateGEP(
8515 CGF.Int8Ty, __overflow_area_pointer,
8516 llvm::ConstantInt::get(CGF.Int32Ty, Offset),
8517 "__overflow_area_pointer.next");
8518 CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
8519
8520 return AddrTyped;
8521}
8522
8523Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
8524 Address VAListAddr,
8525 QualType Ty) const {
8526 // FIXME: Need to handle alignment
8527 llvm::Type *BP = CGF.Int8PtrTy;
8528 llvm::Type *BPP = CGF.Int8PtrPtrTy;
8529 CGBuilderTy &Builder = CGF.Builder;
8530 Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
8531 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
8532 // Handle address alignment for type alignment > 32 bits
8533 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
8534 if (TyAlign > 4) {
8535 assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!")((void)0);
8536 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
8537 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
8538 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
8539 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
8540 }
8541 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
8542 Address AddrTyped = Builder.CreateBitCast(
8543 Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy);
8544
8545 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
8546 llvm::Value *NextAddr = Builder.CreateGEP(
8547 CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
8548 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
8549
8550 return AddrTyped;
8551}
8552
8553Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
8554 Address VAListAddr,
8555 QualType Ty) const {
8556 int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
8557
8558 if (ArgSize > 8)
8559 return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
8560
8561 // Here we have check if the argument is in register area or
8562 // in overflow area.
8563 // If the saved register area pointer + argsize rounded up to alignment >
8564 // saved register area end pointer, argument is in overflow area.
8565 unsigned RegsLeft = 6;
8566 Ty = CGF.getContext().getCanonicalType(Ty);
8567 (void)classifyArgumentType(Ty, &RegsLeft);
8568
8569 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
8570 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
8571 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
8572 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
8573
8574 // Get rounded size of the argument.GCC does not allow vararg of
8575 // size < 4 bytes. We follow the same logic here.
8576 ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
8577 int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
8578
8579 // Argument may be in saved register area
8580 CGF.EmitBlock(MaybeRegBlock);
8581
8582 // Load the current saved register area pointer.
8583 Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
8584 VAListAddr, 0, "__current_saved_reg_area_pointer_p");
8585 llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
8586 __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
8587
8588 // Load the saved register area end pointer.
8589 Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
8590 VAListAddr, 1, "__saved_reg_area_end_pointer_p");
8591 llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
8592 __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
8593
8594 // If the size of argument is > 4 bytes, check if the stack
8595 // location is aligned to 8 bytes
8596 if (ArgAlign > 4) {
8597
8598 llvm::Value *__current_saved_reg_area_pointer_int =
8599 CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
8600 CGF.Int32Ty);
8601
8602 __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
8603 __current_saved_reg_area_pointer_int,
8604 llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
8605 "align_current_saved_reg_area_pointer");
8606
8607 __current_saved_reg_area_pointer_int =
8608 CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
8609 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
8610 "align_current_saved_reg_area_pointer");
8611
8612 __current_saved_reg_area_pointer =
8613 CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
8614 __current_saved_reg_area_pointer->getType(),
8615 "align_current_saved_reg_area_pointer");
8616 }
8617
8618 llvm::Value *__new_saved_reg_area_pointer =
8619 CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer,
8620 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
8621 "__new_saved_reg_area_pointer");
8622
8623 llvm::Value *UsingStack = 0;
8624 UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
8625 __saved_reg_area_end_pointer);
8626
8627 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
8628
8629 // Argument in saved register area
8630 // Implement the block where argument is in register saved area
8631 CGF.EmitBlock(InRegBlock);
8632
8633 llvm::Type *PTy = CGF.ConvertType(Ty);
8634 llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
8635 __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
8636
8637 CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
8638 __current_saved_reg_area_pointer_p);
8639
8640 CGF.EmitBranch(ContBlock);
8641
8642 // Argument in overflow area
8643 // Implement the block where the argument is in overflow area.
8644 CGF.EmitBlock(OnStackBlock);
8645
8646 // Load the overflow area pointer
8647 Address __overflow_area_pointer_p =
8648 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
8649 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
8650 __overflow_area_pointer_p, "__overflow_area_pointer");
8651
8652 // Align the overflow area pointer according to the alignment of the argument
8653 if (ArgAlign > 4) {
8654 llvm::Value *__overflow_area_pointer_int =
8655 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
8656
8657 __overflow_area_pointer_int =
8658 CGF.Builder.CreateAdd(__overflow_area_pointer_int,
8659 llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
8660 "align_overflow_area_pointer");
8661
8662 __overflow_area_pointer_int =
8663 CGF.Builder.CreateAnd(__overflow_area_pointer_int,
8664 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
8665 "align_overflow_area_pointer");
8666
8667 __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
8668 __overflow_area_pointer_int, __overflow_area_pointer->getType(),
8669 "align_overflow_area_pointer");
8670 }
8671
8672 // Get the pointer for next argument in overflow area and store it
8673 // to overflow area pointer.
8674 llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
8675 CGF.Int8Ty, __overflow_area_pointer,
8676 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
8677 "__overflow_area_pointer.next");
8678
8679 CGF.Builder.CreateStore(__new_overflow_area_pointer,
8680 __overflow_area_pointer_p);
8681
8682 CGF.Builder.CreateStore(__new_overflow_area_pointer,
8683 __current_saved_reg_area_pointer_p);
8684
8685 // Bitcast the overflow area pointer to the type of argument.
8686 llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
8687 llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
8688 __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
8689
8690 CGF.EmitBranch(ContBlock);
8691
8692 // Get the correct pointer to load the variable argument
8693 // Implement the ContBlock
8694 CGF.EmitBlock(ContBlock);
8695
8696 llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
8697 llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
8698 ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
8699 ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
8700
8701 return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign));
8702}
8703
8704Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8705 QualType Ty) const {
8706
8707 if (getTarget().getTriple().isMusl())
8708 return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
8709
8710 return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
8711}
8712
8713//===----------------------------------------------------------------------===//
8714// Lanai ABI Implementation
8715//===----------------------------------------------------------------------===//
8716
8717namespace {
8718class LanaiABIInfo : public DefaultABIInfo {
8719public:
8720 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8721
8722 bool shouldUseInReg(QualType Ty, CCState &State) const;
8723
8724 void computeInfo(CGFunctionInfo &FI) const override {
8725 CCState State(FI);
8726 // Lanai uses 4 registers to pass arguments unless the function has the
8727 // regparm attribute set.
8728 if (FI.getHasRegParm()) {
8729 State.FreeRegs = FI.getRegParm();
8730 } else {
8731 State.FreeRegs = 4;
8732 }
8733
8734 if (!getCXXABI().classifyReturnType(FI))
8735 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8736 for (auto &I : FI.arguments())
8737 I.info = classifyArgumentType(I.type, State);
8738 }
8739
8740 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
8741 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
8742};
8743} // end anonymous namespace
8744
8745bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
8746 unsigned Size = getContext().getTypeSize(Ty);
8747 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
8748
8749 if (SizeInRegs == 0)
8750 return false;
8751
8752 if (SizeInRegs > State.FreeRegs) {
8753 State.FreeRegs = 0;
8754 return false;
8755 }
8756
8757 State.FreeRegs -= SizeInRegs;
8758
8759 return true;
8760}
8761
8762ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
8763 CCState &State) const {
8764 if (!ByVal) {
8765 if (State.FreeRegs) {
8766 --State.FreeRegs; // Non-byval indirects just use one pointer.
8767 return getNaturalAlignIndirectInReg(Ty);
8768 }
8769 return getNaturalAlignIndirect(Ty, false);
8770 }
8771
8772 // Compute the byval alignment.
8773 const unsigned MinABIStackAlignInBytes = 4;
8774 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
8775 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
8776 /*Realign=*/TypeAlign >
8777 MinABIStackAlignInBytes);
8778}
8779
8780ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
8781 CCState &State) const {
8782 // Check with the C++ ABI first.
8783 const RecordType *RT = Ty->getAs<RecordType>();
8784 if (RT) {
8785 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
8786 if (RAA == CGCXXABI::RAA_Indirect) {
8787 return getIndirectResult(Ty, /*ByVal=*/false, State);
8788 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
8789 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8790 }
8791 }
8792
8793 if (isAggregateTypeForABI(Ty)) {
8794 // Structures with flexible arrays are always indirect.
8795 if (RT && RT->getDecl()->hasFlexibleArrayMember())
8796 return getIndirectResult(Ty, /*ByVal=*/true, State);
8797
8798 // Ignore empty structs/unions.
8799 if (isEmptyRecord(getContext(), Ty, true))
8800 return ABIArgInfo::getIgnore();
8801
8802 llvm::LLVMContext &LLVMContext = getVMContext();
8803 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
8804 if (SizeInRegs <= State.FreeRegs) {
8805 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8806 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
8807 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8808 State.FreeRegs -= SizeInRegs;
8809 return ABIArgInfo::getDirectInReg(Result);
8810 } else {
8811 State.FreeRegs = 0;
8812 }
8813 return getIndirectResult(Ty, true, State);
8814 }
8815
8816 // Treat an enum type as its underlying type.
8817 if (const auto *EnumTy = Ty->getAs<EnumType>())
8818 Ty = EnumTy->getDecl()->getIntegerType();
8819
8820 bool InReg = shouldUseInReg(Ty, State);
8821
8822 // Don't pass >64 bit integers in registers.
8823 if (const auto *EIT = Ty->getAs<ExtIntType>())
8824 if (EIT->getNumBits() > 64)
8825 return getIndirectResult(Ty, /*ByVal=*/true, State);
8826
8827 if (isPromotableIntegerTypeForABI(Ty)) {
8828 if (InReg)
8829 return ABIArgInfo::getDirectInReg();
8830 return ABIArgInfo::getExtend(Ty);
8831 }
8832 if (InReg)
8833 return ABIArgInfo::getDirectInReg();
8834 return ABIArgInfo::getDirect();
8835}
8836
8837namespace {
8838class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
8839public:
8840 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8841 : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
8842};
8843}
8844
8845//===----------------------------------------------------------------------===//
8846// AMDGPU ABI Implementation
8847//===----------------------------------------------------------------------===//
8848
8849namespace {
8850
8851class AMDGPUABIInfo final : public DefaultABIInfo {
8852private:
8853 static const unsigned MaxNumRegsForArgsRet = 16;
8854
8855 unsigned numRegsForType(QualType Ty) const;
8856
8857 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
8858 bool isHomogeneousAggregateSmallEnough(const Type *Base,
8859 uint64_t Members) const override;
8860
8861 // Coerce HIP scalar pointer arguments from generic pointers to global ones.
8862 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
8863 unsigned ToAS) const {
8864 // Single value types.
8865 if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
8866 return llvm::PointerType::get(
8867 cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
8868 return Ty;
8869 }
8870
8871public:
8872 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
8873 DefaultABIInfo(CGT) {}
8874
8875 ABIArgInfo classifyReturnType(QualType RetTy) const;
8876 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
8877 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
8878
8879 void computeInfo(CGFunctionInfo &FI) const override;
8880 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8881 QualType Ty) const override;
8882};
8883
8884bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
8885 return true;
8886}
8887
8888bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
8889 const Type *Base, uint64_t Members) const {
8890 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
8891
8892 // Homogeneous Aggregates may occupy at most 16 registers.
8893 return Members * NumRegs <= MaxNumRegsForArgsRet;
8894}
8895
8896/// Estimate number of registers the type will use when passed in registers.
8897unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
8898 unsigned NumRegs = 0;
8899
8900 if (const VectorType *VT = Ty->getAs<VectorType>()) {
8901 // Compute from the number of elements. The reported size is based on the
8902 // in-memory size, which includes the padding 4th element for 3-vectors.
8903 QualType EltTy = VT->getElementType();
8904 unsigned EltSize = getContext().getTypeSize(EltTy);
8905
8906 // 16-bit element vectors should be passed as packed.
8907 if (EltSize == 16)
8908 return (VT->getNumElements() + 1) / 2;
8909
8910 unsigned EltNumRegs = (EltSize + 31) / 32;
8911 return EltNumRegs * VT->getNumElements();
8912 }
8913
8914 if (const RecordType *RT = Ty->getAs<RecordType>()) {
8915 const RecordDecl *RD = RT->getDecl();
8916 assert(!RD->hasFlexibleArrayMember())((void)0);
8917
8918 for (const FieldDecl *Field : RD->fields()) {
8919 QualType FieldTy = Field->getType();
8920 NumRegs += numRegsForType(FieldTy);
8921 }
8922
8923 return NumRegs;
8924 }
8925
8926 return (getContext().getTypeSize(Ty) + 31) / 32;
8927}
8928
8929void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
8930 llvm::CallingConv::ID CC = FI.getCallingConvention();
8931
8932 if (!getCXXABI().classifyReturnType(FI))
8933 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8934
8935 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
8936 for (auto &Arg : FI.arguments()) {
8937 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
8938 Arg.info = classifyKernelArgumentType(Arg.type);
8939 } else {
8940 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
8941 }
8942 }
8943}
8944
8945Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8946 QualType Ty) const {
8947 llvm_unreachable("AMDGPU does not support varargs")__builtin_unreachable();
8948}
8949
8950ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
8951 if (isAggregateTypeForABI(RetTy)) {
8952 // Records with non-trivial destructors/copy-constructors should not be
8953 // returned by value.
8954 if (!getRecordArgABI(RetTy, getCXXABI())) {
8955 // Ignore empty structs/unions.
8956 if (isEmptyRecord(getContext(), RetTy, true))
8957 return ABIArgInfo::getIgnore();
8958
8959 // Lower single-element structs to just return a regular value.
8960 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
8961 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
8962
8963 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
8964 const RecordDecl *RD = RT->getDecl();
8965 if (RD->hasFlexibleArrayMember())
8966 return DefaultABIInfo::classifyReturnType(RetTy);
8967 }
8968
8969 // Pack aggregates <= 4 bytes into single VGPR or pair.
8970 uint64_t Size = getContext().getTypeSize(RetTy);
8971 if (Size <= 16)
8972 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
8973
8974 if (Size <= 32)
8975 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
8976
8977 if (Size <= 64) {
8978 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
8979 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
8980 }
8981
8982 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
8983 return ABIArgInfo::getDirect();
8984 }
8985 }
8986
8987 // Otherwise just do the default thing.
8988 return DefaultABIInfo::classifyReturnType(RetTy);
8989}
8990
8991/// For kernels all parameters are really passed in a special buffer. It doesn't
8992/// make sense to pass anything byval, so everything must be direct.
8993ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
8994 Ty = useFirstFieldIfTransparentUnion(Ty);
8995
8996 // TODO: Can we omit empty structs?
8997
8998 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
8999 Ty = QualType(SeltTy, 0);
9000
9001 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
9002 llvm::Type *LTy = OrigLTy;
9003 if (getContext().getLangOpts().HIP) {
9004 LTy = coerceKernelArgumentType(
9005 OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
9006 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
9007 }
9008
9009 // FIXME: Should also use this for OpenCL, but it requires addressing the
9010 // problem of kernels being called.
9011 //
9012 // FIXME: This doesn't apply the optimization of coercing pointers in structs
9013 // to global address space when using byref. This would require implementing a
9014 // new kind of coercion of the in-memory type when for indirect arguments.
9015 if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
9016 isAggregateTypeForABI(Ty)) {
9017 return ABIArgInfo::getIndirectAliased(
9018 getContext().getTypeAlignInChars(Ty),
9019 getContext().getTargetAddressSpace(LangAS::opencl_constant),
9020 false /*Realign*/, nullptr /*Padding*/);
9021 }
9022
9023 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
9024 // individual elements, which confuses the Clover OpenCL backend; therefore we
9025 // have to set it to false here. Other args of getDirect() are just defaults.
9026 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
9027}
9028
9029ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
9030 unsigned &NumRegsLeft) const {
9031 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow")((void)0);
9032
9033 Ty = useFirstFieldIfTransparentUnion(Ty);
9034
9035 if (isAggregateTypeForABI(Ty)) {
9036 // Records with non-trivial destructors/copy-constructors should not be
9037 // passed by value.
9038 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
9039 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
9040
9041 // Ignore empty structs/unions.
9042 if (isEmptyRecord(getContext(), Ty, true))
9043 return ABIArgInfo::getIgnore();
9044
9045 // Lower single-element structs to just pass a regular value. TODO: We
9046 // could do reasonable-size multiple-element structs too, using getExpand(),
9047 // though watch out for things like bitfields.
9048 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
9049 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
9050
9051 if (const RecordType *RT = Ty->getAs<RecordType>()) {
9052 const RecordDecl *RD = RT->getDecl();
9053 if (RD->hasFlexibleArrayMember())
9054 return DefaultABIInfo::classifyArgumentType(Ty);
9055 }
9056
9057 // Pack aggregates <= 8 bytes into single VGPR or pair.
9058 uint64_t Size = getContext().getTypeSize(Ty);
9059 if (Size <= 64) {
9060 unsigned NumRegs = (Size + 31) / 32;
9061 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
9062
9063 if (Size <= 16)
9064 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
9065
9066 if (Size <= 32)
9067 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
9068
9069 // XXX: Should this be i64 instead, and should the limit increase?
9070 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
9071 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
9072 }
9073
9074 if (NumRegsLeft > 0) {
9075 unsigned NumRegs = numRegsForType(Ty);
9076 if (NumRegsLeft >= NumRegs) {
9077 NumRegsLeft -= NumRegs;
9078 return ABIArgInfo::getDirect();
9079 }
9080 }
9081 }
9082
9083 // Otherwise just do the default thing.
9084 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
9085 if (!ArgInfo.isIndirect()) {
9086 unsigned NumRegs = numRegsForType(Ty);
9087 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
9088 }
9089
9090 return ArgInfo;
9091}
9092
9093class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
9094public:
9095 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
9096 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
9097 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
9098 CodeGen::CodeGenModule &M) const override;
9099 unsigned getOpenCLKernelCallingConv() const override;
9100
9101 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
9102 llvm::PointerType *T, QualType QT) const override;
9103
9104 LangAS getASTAllocaAddressSpace() const override {
9105 return getLangASFromTargetAS(
9106 getABIInfo().getDataLayout().getAllocaAddrSpace());
9107 }
9108 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
9109 const VarDecl *D) const override;
9110 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
9111 SyncScope Scope,
9112 llvm::AtomicOrdering Ordering,
9113 llvm::LLVMContext &Ctx) const override;
9114 llvm::Function *
9115 createEnqueuedBlockKernel(CodeGenFunction &CGF,
9116 llvm::Function *BlockInvokeFunc,
9117 llvm::Value *BlockLiteral) const override;
9118 bool shouldEmitStaticExternCAliases() const override;
9119 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
9120};
9121}
9122
9123static bool requiresAMDGPUProtectedVisibility(const Decl *D,
9124 llvm::GlobalValue *GV) {
9125 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
9126 return false;
9127
9128 return D->hasAttr<OpenCLKernelAttr>() ||
9129 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
9130 (isa<VarDecl>(D) &&
9131 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
9132 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
9133 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
9134}
9135
9136void AMDGPUTargetCodeGenInfo::setTargetAttributes(
9137 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
9138 if (requiresAMDGPUProtectedVisibility(D, GV)) {
9139 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
9140 GV->setDSOLocal(true);
9141 }
9142
9143 if (GV->isDeclaration())
9144 return;
9145 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
9146 if (!FD)
9147 return;
9148
9149 llvm::Function *F = cast<llvm::Function>(GV);
9150
9151 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
9152 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
9153
9154
9155 const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
9156 FD->hasAttr<OpenCLKernelAttr>();
9157 const bool IsHIPKernel = M.getLangOpts().HIP &&
9158 FD->hasAttr<CUDAGlobalAttr>();
9159 if ((IsOpenCLKernel || IsHIPKernel) &&
9160 (M.getTriple().getOS() == llvm::Triple::AMDHSA))
9161 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
9162
9163 if (IsHIPKernel)
9164 F->addFnAttr("uniform-work-group-size", "true");
9165
9166
9167 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
9168 if (ReqdWGS || FlatWGS) {
9169 unsigned Min = 0;
9170 unsigned Max = 0;
9171 if (FlatWGS) {
9172 Min = FlatWGS->getMin()
9173 ->EvaluateKnownConstInt(M.getContext())
9174 .getExtValue();
9175 Max = FlatWGS->getMax()
9176 ->EvaluateKnownConstInt(M.getContext())
9177 .getExtValue();
9178 }
9179 if (ReqdWGS && Min == 0 && Max == 0)
9180 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
9181
9182 if (Min != 0) {
9183 assert(Min <= Max && "Min must be less than or equal Max")((void)0);
9184
9185 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
9186 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
9187 } else
9188 assert(Max == 0 && "Max must be zero")((void)0);
9189 } else if (IsOpenCLKernel || IsHIPKernel) {
9190 // By default, restrict the maximum size to a value specified by
9191 // --gpu-max-threads-per-block=n or its default value for HIP.
9192 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
9193 const unsigned DefaultMaxWorkGroupSize =
9194 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
9195 : M.getLangOpts().GPUMaxThreadsPerBlock;
9196 std::string AttrVal =
9197 std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
9198 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
9199 }
9200
9201 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
9202 unsigned Min =
9203 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
9204 unsigned Max = Attr->getMax() ? Attr->getMax()
9205 ->EvaluateKnownConstInt(M.getContext())
9206 .getExtValue()
9207 : 0;
9208
9209 if (Min != 0) {
9210 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max")((void)0);
9211
9212 std::string AttrVal = llvm::utostr(Min);
9213 if (Max != 0)
9214 AttrVal = AttrVal + "," + llvm::utostr(Max);
9215 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
9216 } else
9217 assert(Max == 0 && "Max must be zero")((void)0);
9218 }
9219
9220 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
9221 unsigned NumSGPR = Attr->getNumSGPR();
9222
9223 if (NumSGPR != 0)
9224 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
9225 }
9226
9227 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
9228 uint32_t NumVGPR = Attr->getNumVGPR();
9229
9230 if (NumVGPR != 0)
9231 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
9232 }
9233
9234 if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
9235 F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
9236
9237 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
9238 F->addFnAttr("amdgpu-ieee", "false");
9239}
9240
9241unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
9242 return llvm::CallingConv::AMDGPU_KERNEL;
9243}
9244
9245// Currently LLVM assumes null pointers always have value 0,
9246// which results in incorrectly transformed IR. Therefore, instead of
9247// emitting null pointers in private and local address spaces, a null
9248// pointer in generic address space is emitted which is casted to a
9249// pointer in local or private address space.
9250llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
9251 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
9252 QualType QT) const {
9253 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
9254 return llvm::ConstantPointerNull::get(PT);
9255
9256 auto &Ctx = CGM.getContext();
9257 auto NPT = llvm::PointerType::get(PT->getElementType(),
9258 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
9259 return llvm::ConstantExpr::getAddrSpaceCast(
9260 llvm::ConstantPointerNull::get(NPT), PT);
9261}
9262
9263LangAS
9264AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
9265 const VarDecl *D) const {
9266 assert(!CGM.getLangOpts().OpenCL &&((void)0)
9267 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&((void)0)
9268 "Address space agnostic languages only")((void)0);
9269 LangAS DefaultGlobalAS = getLangASFromTargetAS(
9270 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
9271 if (!D)
9272 return DefaultGlobalAS;
9273
9274 LangAS AddrSpace = D->getType().getAddressSpace();
9275 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace))((void)0);
9276 if (AddrSpace != LangAS::Default)
9277 return AddrSpace;
9278
9279 if (CGM.isTypeConstant(D->getType(), false)) {
9280 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
9281 return ConstAS.getValue();
9282 }
9283 return DefaultGlobalAS;
9284}
9285
9286llvm::SyncScope::ID
9287AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
9288 SyncScope Scope,
9289 llvm::AtomicOrdering Ordering,
9290 llvm::LLVMContext &Ctx) const {
9291 std::string Name;
9292 switch (Scope) {
9293 case SyncScope::OpenCLWorkGroup:
9294 Name = "workgroup";
9295 break;
9296 case SyncScope::OpenCLDevice:
9297 Name = "agent";
9298 break;
9299 case SyncScope::OpenCLAllSVMDevices:
9300 Name = "";
9301 break;
9302 case SyncScope::OpenCLSubGroup:
9303 Name = "wavefront";
9304 }
9305
9306 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
9307 if (!Name.empty())
9308 Name = Twine(Twine(Name) + Twine("-")).str();
9309
9310 Name = Twine(Twine(Name) + Twine("one-as")).str();
9311 }
9312
9313 return Ctx.getOrInsertSyncScopeID(Name);
9314}
9315
9316bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
9317 return false;
9318}
9319
9320void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
9321 const FunctionType *&FT) const {
9322 FT = getABIInfo().getContext().adjustFunctionType(
9323 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
9324}
9325
9326//===----------------------------------------------------------------------===//
9327// SPARC v8 ABI Implementation.
9328// Based on the SPARC Compliance Definition version 2.4.1.
9329//
9330// Ensures that complex values are passed in registers.
9331//
9332namespace {
9333class SparcV8ABIInfo : public DefaultABIInfo {
9334public:
9335 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
9336
9337private:
9338 ABIArgInfo classifyReturnType(QualType RetTy) const;
9339 void computeInfo(CGFunctionInfo &FI) const override;
9340};
9341} // end anonymous namespace
9342
9343
9344ABIArgInfo
9345SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
9346 if (Ty->isAnyComplexType()) {
9347 return ABIArgInfo::getDirect();
9348 }
9349 else {
9350 return DefaultABIInfo::classifyReturnType(Ty);
9351 }
9352}
9353
9354void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
9355
9356 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
9357 for (auto &Arg : FI.arguments())
9358 Arg.info = classifyArgumentType(Arg.type);
9359}
9360
9361namespace {
9362class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
9363public:
9364 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
9365 : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
9366};
9367} // end anonymous namespace
9368
9369//===----------------------------------------------------------------------===//
9370// SPARC v9 ABI Implementation.
9371// Based on the SPARC Compliance Definition version 2.4.1.
9372//
9373// Function arguments a mapped to a nominal "parameter array" and promoted to
9374// registers depending on their type. Each argument occupies 8 or 16 bytes in
9375// the array, structs larger than 16 bytes are passed indirectly.
9376//
9377// One case requires special care:
9378//
9379// struct mixed {
9380// int i;
9381// float f;
9382// };
9383//
9384// When a struct mixed is passed by value, it only occupies 8 bytes in the
9385// parameter array, but the int is passed in an integer register, and the float
9386// is passed in a floating point register. This is represented as two arguments
9387// with the LLVM IR inreg attribute:
9388//
9389// declare void f(i32 inreg %i, float inreg %f)
9390//
9391// The code generator will only allocate 4 bytes from the parameter array for
9392// the inreg arguments. All other arguments are allocated a multiple of 8
9393// bytes.
9394//
9395namespace {
9396class SparcV9ABIInfo : public ABIInfo {
9397public:
9398 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
9399
9400private:
9401 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
9402 void computeInfo(CGFunctionInfo &FI) const override;
9403 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9404 QualType Ty) const override;
9405
9406 // Coercion type builder for structs passed in registers. The coercion type
9407 // serves two purposes:
9408 //
9409 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
9410 // in registers.
9411 // 2. Expose aligned floating point elements as first-level elements, so the
9412 // code generator knows to pass them in floating point registers.
9413 //
9414 // We also compute the InReg flag which indicates that the struct contains
9415 // aligned 32-bit floats.
9416 //
9417 struct CoerceBuilder {
9418 llvm::LLVMContext &Context;
9419 const llvm::DataLayout &DL;
9420 SmallVector<llvm::Type*, 8> Elems;
9421 uint64_t Size;
9422 bool InReg;
9423
9424 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
9425 : Context(c), DL(dl), Size(0), InReg(false) {}
9426
9427 // Pad Elems with integers until Size is ToSize.
9428 void pad(uint64_t ToSize) {
9429 assert(ToSize >= Size && "Cannot remove elements")((void)0);
9430 if (ToSize == Size)
9431 return;
9432
9433 // Finish the current 64-bit word.
9434 uint64_t Aligned = llvm::alignTo(Size, 64);
9435 if (Aligned > Size && Aligned <= ToSize) {
9436 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
9437 Size = Aligned;
9438 }
9439
9440 // Add whole 64-bit words.
9441 while (Size + 64 <= ToSize) {
9442 Elems.push_back(llvm::Type::getInt64Ty(Context));
9443 Size += 64;
9444 }
9445
9446 // Final in-word padding.
9447 if (Size < ToSize) {
9448 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
9449 Size = ToSize;
9450 }
9451 }
9452
9453 // Add a floating point element at Offset.
9454 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
9455 // Unaligned floats are treated as integers.
9456 if (Offset % Bits)
9457 return;
9458 // The InReg flag is only required if there are any floats < 64 bits.
9459 if (Bits < 64)
9460 InReg = true;
9461 pad(Offset);
9462 Elems.push_back(Ty);
9463 Size = Offset + Bits;
9464 }
9465
9466 // Add a struct type to the coercion type, starting at Offset (in bits).
9467 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
9468 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
9469 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
9470 llvm::Type *ElemTy = StrTy->getElementType(i);
9471 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
9472 switch (ElemTy->getTypeID()) {
9473 case llvm::Type::StructTyID:
9474 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
9475 break;
9476 case llvm::Type::FloatTyID:
9477 addFloat(ElemOffset, ElemTy, 32);
9478 break;
9479 case llvm::Type::DoubleTyID:
9480 addFloat(ElemOffset, ElemTy, 64);
9481 break;
9482 case llvm::Type::FP128TyID:
9483 addFloat(ElemOffset, ElemTy, 128);
9484 break;
9485 case llvm::Type::PointerTyID:
9486 if (ElemOffset % 64 == 0) {
9487 pad(ElemOffset);
9488 Elems.push_back(ElemTy);
9489 Size += 64;
9490 }
9491 break;
9492 default:
9493 break;
9494 }
9495 }
9496 }
9497
9498 // Check if Ty is a usable substitute for the coercion type.
9499 bool isUsableType(llvm::StructType *Ty) const {
9500 return llvm::makeArrayRef(Elems) == Ty->elements();
9501 }
9502
9503 // Get the coercion type as a literal struct type.
9504 llvm::Type *getType() const {
9505 if (Elems.size() == 1)
9506 return Elems.front();
9507 else
9508 return llvm::StructType::get(Context, Elems);
9509 }
9510 };
9511};
9512} // end anonymous namespace
9513
9514ABIArgInfo
9515SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
9516 if (Ty->isVoidType())
9517 return ABIArgInfo::getIgnore();
9518
9519 uint64_t Size = getContext().getTypeSize(Ty);
9520
9521 // Anything too big to fit in registers is passed with an explicit indirect
9522 // pointer / sret pointer.
9523 if (Size > SizeLimit)
9524 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
9525
9526 // Treat an enum type as its underlying type.
9527 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9528 Ty = EnumTy->getDecl()->getIntegerType();
9529
9530 // Integer types smaller than a register are extended.
9531 if (Size < 64 && Ty->isIntegerType())
9532 return ABIArgInfo::getExtend(Ty);
9533
9534 if (const auto *EIT = Ty->getAs<ExtIntType>())
9535 if (EIT->getNumBits() < 64)
9536 return ABIArgInfo::getExtend(Ty);
9537
9538 // Other non-aggregates go in registers.
9539 if (!isAggregateTypeForABI(Ty))
9540 return ABIArgInfo::getDirect();
9541
9542 // If a C++ object has either a non-trivial copy constructor or a non-trivial
9543 // destructor, it is passed with an explicit indirect pointer / sret pointer.
9544 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
9545 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
9546
9547 // This is a small aggregate type that should be passed in registers.
9548 // Build a coercion type from the LLVM struct type.
9549 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
9550 if (!StrTy)
9551 return ABIArgInfo::getDirect();
9552
9553 CoerceBuilder CB(getVMContext(), getDataLayout());
9554 CB.addStruct(0, StrTy);
9555 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
9556
9557 // Try to use the original type for coercion.
9558 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
9559
9560 if (CB.InReg)
9561 return ABIArgInfo::getDirectInReg(CoerceTy);
9562 else
9563 return ABIArgInfo::getDirect(CoerceTy);
9564}
9565
9566Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9567 QualType Ty) const {
9568 ABIArgInfo AI = classifyType(Ty, 16 * 8);
9569 llvm::Type *ArgTy = CGT.ConvertType(Ty);
9570 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
9571 AI.setCoerceToType(ArgTy);
9572
9573 CharUnits SlotSize = CharUnits::fromQuantity(8);
9574
9575 CGBuilderTy &Builder = CGF.Builder;
9576 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
9577 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
9578
9579 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
9580
9581 Address ArgAddr = Address::invalid();
9582 CharUnits Stride;
9583 switch (AI.getKind()) {
9584 case ABIArgInfo::Expand:
9585 case ABIArgInfo::CoerceAndExpand:
9586 case ABIArgInfo::InAlloca:
9587 llvm_unreachable("Unsupported ABI kind for va_arg")__builtin_unreachable();
9588
9589 case ABIArgInfo::Extend: {
9590 Stride = SlotSize;
9591 CharUnits Offset = SlotSize - TypeInfo.Width;
9592 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
9593 break;
9594 }
9595
9596 case ABIArgInfo::Direct: {
9597 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
9598 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
9599 ArgAddr = Addr;
9600 break;
9601 }
9602
9603 case ABIArgInfo::Indirect:
9604 case ABIArgInfo::IndirectAliased:
9605 Stride = SlotSize;
9606 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
9607 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
9608 TypeInfo.Align);
9609 break;
9610
9611 case ABIArgInfo::Ignore:
9612 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.Align);
9613 }
9614
9615 // Update VAList.
9616 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
9617 Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
9618
9619 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
9620}
9621
9622void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
9623 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
9624 for (auto &I : FI.arguments())
9625 I.info = classifyType(I.type, 16 * 8);
9626}
9627
9628namespace {
9629class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
9630public:
9631 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
9632 : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
9633
9634 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
9635 return 14;
9636 }
9637
9638 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
9639 llvm::Value *Address) const override;
9640};
9641} // end anonymous namespace
9642
9643bool
9644SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
9645 llvm::Value *Address) const {
9646 // This is calculated from the LLVM and GCC tables and verified
9647 // against gcc output. AFAIK all ABIs use the same encoding.
9648
9649 CodeGen::CGBuilderTy &Builder = CGF.Builder;
9650
9651 llvm::IntegerType *i8 = CGF.Int8Ty;
9652 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
9653 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
9654
9655 // 0-31: the 8-byte general-purpose registers
9656 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
9657
9658 // 32-63: f0-31, the 4-byte floating-point registers
9659 AssignToArrayRange(Builder, Address, Four8, 32, 63);
9660
9661 // Y = 64
9662 // PSR = 65
9663 // WIM = 66
9664 // TBR = 67
9665 // PC = 68
9666 // NPC = 69
9667 // FSR = 70
9668 // CSR = 71
9669 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
9670
9671 // 72-87: d0-15, the 8-byte floating-point registers
9672 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
9673
9674 return false;
9675}
9676
9677// ARC ABI implementation.
9678namespace {
9679
9680class ARCABIInfo : public DefaultABIInfo {
9681public:
9682 using DefaultABIInfo::DefaultABIInfo;
9683
9684private:
9685 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9686 QualType Ty) const override;
9687
9688 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
9689 if (!State.FreeRegs)
9690 return;
9691 if (Info.isIndirect() && Info.getInReg())
9692 State.FreeRegs--;
9693 else if (Info.isDirect() && Info.getInReg()) {
9694 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
9695 if (sz < State.FreeRegs)
9696 State.FreeRegs -= sz;
9697 else
9698 State.FreeRegs = 0;
9699 }
9700 }
9701
9702 void computeInfo(CGFunctionInfo &FI) const override {
9703 CCState State(FI);
9704 // ARC uses 8 registers to pass arguments.
9705 State.FreeRegs = 8;
9706
9707 if (!getCXXABI().classifyReturnType(FI))
9708 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
9709 updateState(FI.getReturnInfo(), FI.getReturnType(), State);
9710 for (auto &I : FI.arguments()) {
9711 I.info = classifyArgumentType(I.type, State.FreeRegs);
9712 updateState(I.info, I.type, State);
9713 }
9714 }
9715
9716 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
9717 ABIArgInfo getIndirectByValue(QualType Ty) const;
9718 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
9719 ABIArgInfo classifyReturnType(QualType RetTy) const;
9720};
9721
9722class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
9723public:
9724 ARCTargetCodeGenInfo(CodeGenTypes &CGT)
9725 : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
9726};
9727
9728
9729ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
9730 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
9731 getNaturalAlignIndirect(Ty, false);
9732}
9733
9734ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
9735 // Compute the byval alignment.
9736 const unsigned MinABIStackAlignInBytes = 4;
9737 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
9738 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
9739 TypeAlign > MinABIStackAlignInBytes);
9740}
9741
9742Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9743 QualType Ty) const {
9744 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
9745 getContext().getTypeInfoInChars(Ty),
9746 CharUnits::fromQuantity(4), true);
9747}
9748
9749ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
9750 uint8_t FreeRegs) const {
9751 // Handle the generic C++ ABI.
9752 const RecordType *RT = Ty->getAs<RecordType>();
9753 if (RT) {
9754 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
9755 if (RAA == CGCXXABI::RAA_Indirect)
9756 return getIndirectByRef(Ty, FreeRegs > 0);
9757
9758 if (RAA == CGCXXABI::RAA_DirectInMemory)
9759 return getIndirectByValue(Ty);
9760 }
9761
9762 // Treat an enum type as its underlying type.
9763 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9764 Ty = EnumTy->getDecl()->getIntegerType();
9765
9766 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
9767
9768 if (isAggregateTypeForABI(Ty)) {
9769 // Structures with flexible arrays are always indirect.
9770 if (RT && RT->getDecl()->hasFlexibleArrayMember())
9771 return getIndirectByValue(Ty);
9772
9773 // Ignore empty structs/unions.
9774 if (isEmptyRecord(getContext(), Ty, true))
9775 return ABIArgInfo::getIgnore();
9776
9777 llvm::LLVMContext &LLVMContext = getVMContext();
9778
9779 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
9780 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
9781 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
9782
9783 return FreeRegs >= SizeInRegs ?
9784 ABIArgInfo::getDirectInReg(Result) :
9785 ABIArgInfo::getDirect(Result, 0, nullptr, false);
9786 }
9787
9788 if (const auto *EIT = Ty->getAs<ExtIntType>())
9789 if (EIT->getNumBits() > 64)
9790 return getIndirectByValue(Ty);
9791
9792 return isPromotableIntegerTypeForABI(Ty)
9793 ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
9794 : ABIArgInfo::getExtend(Ty))
9795 : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
9796 : ABIArgInfo::getDirect());
9797}
9798
9799ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
9800 if (RetTy->isAnyComplexType())
9801 return ABIArgInfo::getDirectInReg();
9802
9803 // Arguments of size > 4 registers are indirect.
9804 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
9805 if (RetSize > 4)
9806 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
9807
9808 return DefaultABIInfo::classifyReturnType(RetTy);
9809}
9810
9811} // End anonymous namespace.
9812
9813//===----------------------------------------------------------------------===//
9814// XCore ABI Implementation
9815//===----------------------------------------------------------------------===//
9816
9817namespace {
9818
9819/// A SmallStringEnc instance is used to build up the TypeString by passing
9820/// it by reference between functions that append to it.
9821typedef llvm::SmallString<128> SmallStringEnc;
9822
9823/// TypeStringCache caches the meta encodings of Types.
9824///
9825/// The reason for caching TypeStrings is two fold:
9826/// 1. To cache a type's encoding for later uses;
9827/// 2. As a means to break recursive member type inclusion.
9828///
9829/// A cache Entry can have a Status of:
9830/// NonRecursive: The type encoding is not recursive;
9831/// Recursive: The type encoding is recursive;
9832/// Incomplete: An incomplete TypeString;
9833/// IncompleteUsed: An incomplete TypeString that has been used in a
9834/// Recursive type encoding.
9835///
9836/// A NonRecursive entry will have all of its sub-members expanded as fully
9837/// as possible. Whilst it may contain types which are recursive, the type
9838/// itself is not recursive and thus its encoding may be safely used whenever
9839/// the type is encountered.
9840///
9841/// A Recursive entry will have all of its sub-members expanded as fully as
9842/// possible. The type itself is recursive and it may contain other types which
9843/// are recursive. The Recursive encoding must not be used during the expansion
9844/// of a recursive type's recursive branch. For simplicity the code uses
9845/// IncompleteCount to reject all usage of Recursive encodings for member types.
9846///
9847/// An Incomplete entry is always a RecordType and only encodes its
9848/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
9849/// are placed into the cache during type expansion as a means to identify and
9850/// handle recursive inclusion of types as sub-members. If there is recursion
9851/// the entry becomes IncompleteUsed.
9852///
9853/// During the expansion of a RecordType's members:
9854///
9855/// If the cache contains a NonRecursive encoding for the member type, the
9856/// cached encoding is used;
9857///
9858/// If the cache contains a Recursive encoding for the member type, the
9859/// cached encoding is 'Swapped' out, as it may be incorrect, and...
9860///
9861/// If the member is a RecordType, an Incomplete encoding is placed into the
9862/// cache to break potential recursive inclusion of itself as a sub-member;
9863///
9864/// Once a member RecordType has been expanded, its temporary incomplete
9865/// entry is removed from the cache. If a Recursive encoding was swapped out
9866/// it is swapped back in;
9867///
9868/// If an incomplete entry is used to expand a sub-member, the incomplete
9869/// entry is marked as IncompleteUsed. The cache keeps count of how many
9870/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
9871///
9872/// If a member's encoding is found to be a NonRecursive or Recursive viz:
9873/// IncompleteUsedCount==0, the member's encoding is added to the cache.
9874/// Else the member is part of a recursive type and thus the recursion has
9875/// been exited too soon for the encoding to be correct for the member.
9876///
9877class TypeStringCache {
9878 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
9879 struct Entry {
9880 std::string Str; // The encoded TypeString for the type.
9881 enum Status State; // Information about the encoding in 'Str'.
9882 std::string Swapped; // A temporary place holder for a Recursive encoding
9883 // during the expansion of RecordType's members.
9884 };
9885 std::map<const IdentifierInfo *, struct Entry> Map;
9886 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
9887 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
9888public:
9889 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
9890 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
9891 bool removeIncomplete(const IdentifierInfo *ID);
9892 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
9893 bool IsRecursive);
9894 StringRef lookupStr(const IdentifierInfo *ID);
9895};
9896
9897/// TypeString encodings for enum & union fields must be order.
9898/// FieldEncoding is a helper for this ordering process.
9899class FieldEncoding {
9900 bool HasName;
9901 std::string Enc;
9902public:
9903 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
9904 StringRef str() { return Enc; }
9905 bool operator<(const FieldEncoding &rhs) const {
9906 if (HasName != rhs.HasName) return HasName;
9907 return Enc < rhs.Enc;
9908 }
9909};
9910
9911class XCoreABIInfo : public DefaultABIInfo {
9912public:
9913 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
9914 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9915 QualType Ty) const override;
9916};
9917
9918class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
9919 mutable TypeStringCache TSC;
9920 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
9921 const CodeGen::CodeGenModule &M) const;
9922
9923public:
9924 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
9925 : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
9926 void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
9927 const llvm::MapVector<GlobalDecl, StringRef>
9928 &MangledDeclNames) const override;
9929};
9930
9931} // End anonymous namespace.
9932
9933// TODO: this implementation is likely now redundant with the default
9934// EmitVAArg.
9935Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9936 QualType Ty) const {
9937 CGBuilderTy &Builder = CGF.Builder;
9938
9939 // Get the VAList.
9940 CharUnits SlotSize = CharUnits::fromQuantity(4);
9941 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
9942
9943 // Handle the argument.
9944 ABIArgInfo AI = classifyArgumentType(Ty);
9945 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
9946 llvm::Type *ArgTy = CGT.ConvertType(Ty);
9947 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
9948 AI.setCoerceToType(ArgTy);
9949 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
9950
9951 Address Val = Address::invalid();
9952 CharUnits ArgSize = CharUnits::Zero();
9953 switch (AI.getKind()) {
9954 case ABIArgInfo::Expand:
9955 case ABIArgInfo::CoerceAndExpand:
9956 case ABIArgInfo::InAlloca:
9957 llvm_unreachable("Unsupported ABI kind for va_arg")__builtin_unreachable();
9958 case ABIArgInfo::Ignore:
9959 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
9960 ArgSize = CharUnits::Zero();
9961 break;
9962 case ABIArgInfo::Extend:
9963 case ABIArgInfo::Direct:
9964 Val = Builder.CreateBitCast(AP, ArgPtrTy);
9965 ArgSize = CharUnits::fromQuantity(
9966 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
9967 ArgSize = ArgSize.alignTo(SlotSize);
9968 break;
9969 case ABIArgInfo::Indirect:
9970 case ABIArgInfo::IndirectAliased:
9971 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
9972 Val = Address(Builder.CreateLoad(Val), TypeAlign);
9973 ArgSize = SlotSize;
9974 break;
9975 }
9976
9977 // Increment the VAList.
9978 if (!ArgSize.isZero()) {
9979 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
9980 Builder.CreateStore(APN.getPointer(), VAListAddr);
9981 }
9982
9983 return Val;
9984}
9985
9986/// During the expansion of a RecordType, an incomplete TypeString is placed
9987/// into the cache as a means to identify and break recursion.
9988/// If there is a Recursive encoding in the cache, it is swapped out and will
9989/// be reinserted by removeIncomplete().
9990/// All other types of encoding should have been used rather than arriving here.
9991void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
9992 std::string StubEnc) {
9993 if (!ID)
9994 return;
9995 Entry &E = Map[ID];
9996 assert( (E.Str.empty() || E.State == Recursive) &&((void)0)
9997 "Incorrectly use of addIncomplete")((void)0);
9998 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()")((void)0);
9999 E.Swapped.swap(E.Str); // swap out the Recursive
10000 E.Str.swap(StubEnc);
10001 E.State = Incomplete;
10002 ++IncompleteCount;
10003}
10004
10005/// Once the RecordType has been expanded, the temporary incomplete TypeString
10006/// must be removed from the cache.
10007/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
10008/// Returns true if the RecordType was defined recursively.
10009bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
10010 if (!ID)
10011 return false;
10012 auto I = Map.find(ID);
10013 assert(I != Map.end() && "Entry not present")((void)0);
10014 Entry &E = I->second;
10015 assert( (E.State == Incomplete ||((void)0)
10016 E.State == IncompleteUsed) &&((void)0)
10017 "Entry must be an incomplete type")((void)0);
10018 bool IsRecursive = false;
10019 if (E.State == IncompleteUsed) {
10020 // We made use of our Incomplete encoding, thus we are recursive.
10021 IsRecursive = true;
10022 --IncompleteUsedCount;
10023 }
10024 if (E.Swapped.empty())
10025 Map.erase(I);
10026 else {
10027 // Swap the Recursive back.
10028 E.Swapped.swap(E.Str);
10029 E.Swapped.clear();
10030 E.State = Recursive;
10031 }
10032 --IncompleteCount;
10033 return IsRecursive;
10034}
10035
10036/// Add the encoded TypeString to the cache only if it is NonRecursive or
10037/// Recursive (viz: all sub-members were expanded as fully as possible).
10038void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
10039 bool IsRecursive) {
10040 if (!ID || IncompleteUsedCount)
10041 return; // No key or it is is an incomplete sub-type so don't add.
10042 Entry &E = Map[ID];
10043 if (IsRecursive && !E.Str.empty()) {
10044 assert(E.State==Recursive && E.Str.size() == Str.size() &&((void)0)
10045 "This is not the same Recursive entry")((void)0);
10046 // The parent container was not recursive after all, so we could have used
10047 // this Recursive sub-member entry after all, but we assumed the worse when
10048 // we started viz: IncompleteCount!=0.
10049 return;
10050 }
10051 assert(E.Str.empty() && "Entry already present")((void)0);
10052 E.Str = Str.str();
10053 E.State = IsRecursive? Recursive : NonRecursive;
10054}
10055
10056/// Return a cached TypeString encoding for the ID. If there isn't one, or we
10057/// are recursively expanding a type (IncompleteCount != 0) and the cached
10058/// encoding is Recursive, return an empty StringRef.
10059StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
10060 if (!ID)
10061 return StringRef(); // We have no key.
10062 auto I = Map.find(ID);
10063 if (I == Map.end())
10064 return StringRef(); // We have no encoding.
10065 Entry &E = I->second;
10066 if (E.State == Recursive && IncompleteCount)
10067 return StringRef(); // We don't use Recursive encodings for member types.
10068
10069 if (E.State == Incomplete) {
10070 // The incomplete type is being used to break out of recursion.
10071 E.State = IncompleteUsed;
10072 ++IncompleteUsedCount;
10073 }
10074 return E.Str;
10075}
10076
10077/// The XCore ABI includes a type information section that communicates symbol
10078/// type information to the linker. The linker uses this information to verify
10079/// safety/correctness of things such as array bound and pointers et al.
10080/// The ABI only requires C (and XC) language modules to emit TypeStrings.
10081/// This type information (TypeString) is emitted into meta data for all global
10082/// symbols: definitions, declarations, functions & variables.
10083///
10084/// The TypeString carries type, qualifier, name, size & value details.
10085/// Please see 'Tools Development Guide' section 2.16.2 for format details:
10086/// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
10087/// The output is tested by test/CodeGen/xcore-stringtype.c.
10088///
10089static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
10090 const CodeGen::CodeGenModule &CGM,
10091 TypeStringCache &TSC);
10092
10093/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
10094void XCoreTargetCodeGenInfo::emitTargetMD(
10095 const Decl *D, llvm::GlobalValue *GV,
10096 const CodeGen::CodeGenModule &CGM) const {
10097 SmallStringEnc Enc;
10098 if (getTypeString(Enc, D, CGM, TSC)) {
10099 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
10100 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
10101 llvm::MDString::get(Ctx, Enc.str())};
10102 llvm::NamedMDNode *MD =
10103 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
10104 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
10105 }
10106}
10107
10108void XCoreTargetCodeGenInfo::emitTargetMetadata(
10109 CodeGen::CodeGenModule &CGM,
10110 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
10111 // Warning, new MangledDeclNames may be appended within this loop.
10112 // We rely on MapVector insertions adding new elements to the end
10113 // of the container.
10114 for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
10115 auto Val = *(MangledDeclNames.begin() + I);
10116 llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
10117 if (GV) {
10118 const Decl *D = Val.first.getDecl()->getMostRecentDecl();
10119 emitTargetMD(D, GV, CGM);
10120 }
10121 }
10122}
10123//===----------------------------------------------------------------------===//
10124// SPIR ABI Implementation
10125//===----------------------------------------------------------------------===//
10126
10127namespace {
10128class SPIRABIInfo : public DefaultABIInfo {
10129public:
10130 SPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
10131
10132private:
10133 void setCCs();
10134};
10135} // end anonymous namespace
10136namespace {
10137class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
10138public:
10139 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
10140 : TargetCodeGenInfo(std::make_unique<SPIRABIInfo>(CGT)) {}
10141
10142 LangAS getASTAllocaAddressSpace() const override {
10143 return getLangASFromTargetAS(
10144 getABIInfo().getDataLayout().getAllocaAddrSpace());
10145 }
10146
10147 unsigned getOpenCLKernelCallingConv() const override;
10148};
10149
10150} // End anonymous namespace.
10151void SPIRABIInfo::setCCs() {
10152 assert(getRuntimeCC() == llvm::CallingConv::C)((void)0);
10153 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
10154}
10155
10156namespace clang {
10157namespace CodeGen {
10158void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
10159 DefaultABIInfo SPIRABI(CGM.getTypes());
10160 SPIRABI.computeInfo(FI);
10161}
10162}
10163}
10164
10165unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
10166 return llvm::CallingConv::SPIR_KERNEL;
10167}
10168
10169static bool appendType(SmallStringEnc &Enc, QualType QType,
10170 const CodeGen::CodeGenModule &CGM,
10171 TypeStringCache &TSC);
10172
10173/// Helper function for appendRecordType().
10174/// Builds a SmallVector containing the encoded field types in declaration
10175/// order.
10176static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
10177 const RecordDecl *RD,
10178 const CodeGen::CodeGenModule &CGM,
10179 TypeStringCache &TSC) {
10180 for (const auto *Field : RD->fields()) {
10181 SmallStringEnc Enc;
10182 Enc += "m(";
10183 Enc += Field->getName();
10184 Enc += "){";
10185 if (Field->isBitField()) {
10186 Enc += "b(";
10187 llvm::raw_svector_ostream OS(Enc);
10188 OS << Field->getBitWidthValue(CGM.getContext());
10189 Enc += ':';
10190 }
10191 if (!appendType(Enc, Field->getType(), CGM, TSC))
10192 return false;
10193 if (Field->isBitField())
10194 Enc += ')';
10195 Enc += '}';
10196 FE.emplace_back(!Field->getName().empty(), Enc);
10197 }
10198 return true;
10199}
10200
10201/// Appends structure and union types to Enc and adds encoding to cache.
10202/// Recursively calls appendType (via extractFieldType) for each field.
10203/// Union types have their fields ordered according to the ABI.
10204static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
10205 const CodeGen::CodeGenModule &CGM,
10206 TypeStringCache &TSC, const IdentifierInfo *ID) {
10207 // Append the cached TypeString if we have one.
10208 StringRef TypeString = TSC.lookupStr(ID);
10209 if (!TypeString.empty()) {
10210 Enc += TypeString;
10211 return true;
10212 }
10213
10214 // Start to emit an incomplete TypeString.
10215 size_t Start = Enc.size();
10216 Enc += (RT->isUnionType()? 'u' : 's');
10217 Enc += '(';
10218 if (ID)
10219 Enc += ID->getName();
10220 Enc += "){";
10221
10222 // We collect all encoded fields and order as necessary.
10223 bool IsRecursive = false;
10224 const RecordDecl *RD = RT->getDecl()->getDefinition();
10225 if (RD && !RD->field_empty()) {
10226 // An incomplete TypeString stub is placed in the cache for this RecordType
10227 // so that recursive calls to this RecordType will use it whilst building a
10228 // complete TypeString for this RecordType.
10229 SmallVector<FieldEncoding, 16> FE;
10230 std::string StubEnc(Enc.substr(Start).str());
10231 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
10232 TSC.addIncomplete(ID, std::move(StubEnc));
10233 if (!extractFieldType(FE, RD, CGM, TSC)) {
10234 (void) TSC.removeIncomplete(ID);
10235 return false;
10236 }
10237 IsRecursive = TSC.removeIncomplete(ID);
10238 // The ABI requires unions to be sorted but not structures.
10239 // See FieldEncoding::operator< for sort algorithm.
10240 if (RT->isUnionType())
10241 llvm::sort(FE);
10242 // We can now complete the TypeString.
10243 unsigned E = FE.size();
10244 for (unsigned I = 0; I != E; ++I) {
10245 if (I)
10246 Enc += ',';
10247 Enc += FE[I].str();
10248 }
10249 }
10250 Enc += '}';
10251 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
10252 return true;
10253}
10254
10255/// Appends enum types to Enc and adds the encoding to the cache.
10256static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
10257 TypeStringCache &TSC,
10258 const IdentifierInfo *ID) {
10259 // Append the cached TypeString if we have one.
10260 StringRef TypeString = TSC.lookupStr(ID);
10261 if (!TypeString.empty()) {
10262 Enc += TypeString;
10263 return true;
10264 }
10265
10266 size_t Start = Enc.size();
10267 Enc += "e(";
10268 if (ID)
10269 Enc += ID->getName();
10270 Enc += "){";
10271
10272 // We collect all encoded enumerations and order them alphanumerically.
10273 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
10274 SmallVector<FieldEncoding, 16> FE;
10275 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
10276 ++I) {
10277 SmallStringEnc EnumEnc;
10278 EnumEnc += "m(";
10279 EnumEnc += I->getName();
10280 EnumEnc += "){";
10281 I->getInitVal().toString(EnumEnc);
10282 EnumEnc += '}';
10283 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
10284 }
10285 llvm::sort(FE);
10286 unsigned E = FE.size();
10287 for (unsigned I = 0; I != E; ++I) {
10288 if (I)
10289 Enc += ',';
10290 Enc += FE[I].str();
10291 }
10292 }
10293 Enc += '}';
10294 TSC.addIfComplete(ID, Enc.substr(Start), false);
10295 return true;
10296}
10297
10298/// Appends type's qualifier to Enc.
10299/// This is done prior to appending the type's encoding.
10300static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
10301 // Qualifiers are emitted in alphabetical order.
10302 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
10303 int Lookup = 0;
10304 if (QT.isConstQualified())
10305 Lookup += 1<<0;
10306 if (QT.isRestrictQualified())
10307 Lookup += 1<<1;
10308 if (QT.isVolatileQualified())
10309 Lookup += 1<<2;
10310 Enc += Table[Lookup];
10311}
10312
10313/// Appends built-in types to Enc.
10314static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
10315 const char *EncType;
10316 switch (BT->getKind()) {
10317 case BuiltinType::Void:
10318 EncType = "0";
10319 break;
10320 case BuiltinType::Bool:
10321 EncType = "b";
10322 break;
10323 case BuiltinType::Char_U:
10324 EncType = "uc";
10325 break;
10326 case BuiltinType::UChar:
10327 EncType = "uc";
10328 break;
10329 case BuiltinType::SChar:
10330 EncType = "sc";
10331 break;
10332 case BuiltinType::UShort:
10333 EncType = "us";
10334 break;
10335 case BuiltinType::Short:
10336 EncType = "ss";
10337 break;
10338 case BuiltinType::UInt:
10339 EncType = "ui";
10340 break;
10341 case BuiltinType::Int:
10342 EncType = "si";
10343 break;
10344 case BuiltinType::ULong:
10345 EncType = "ul";
10346 break;
10347 case BuiltinType::Long:
10348 EncType = "sl";
10349 break;
10350 case BuiltinType::ULongLong:
10351 EncType = "ull";
10352 break;
10353 case BuiltinType::LongLong:
10354 EncType = "sll";
10355 break;
10356 case BuiltinType::Float:
10357 EncType = "ft";
10358 break;
10359 case BuiltinType::Double:
10360 EncType = "d";
10361 break;
10362 case BuiltinType::LongDouble:
10363 EncType = "ld";
10364 break;
10365 default:
10366 return false;
10367 }
10368 Enc += EncType;
10369 return true;
10370}
10371
10372/// Appends a pointer encoding to Enc before calling appendType for the pointee.
10373static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
10374 const CodeGen::CodeGenModule &CGM,
10375 TypeStringCache &TSC) {
10376 Enc += "p(";
10377 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
10378 return false;
10379 Enc += ')';
10380 return true;
10381}
10382
10383/// Appends array encoding to Enc before calling appendType for the element.
10384static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
10385 const ArrayType *AT,
10386 const CodeGen::CodeGenModule &CGM,
10387 TypeStringCache &TSC, StringRef NoSizeEnc) {
10388 if (AT->getSizeModifier() != ArrayType::Normal)
10389 return false;
10390 Enc += "a(";
10391 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
10392 CAT->getSize().toStringUnsigned(Enc);
10393 else
10394 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
10395 Enc += ':';
10396 // The Qualifiers should be attached to the type rather than the array.
10397 appendQualifier(Enc, QT);
10398 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
10399 return false;
10400 Enc += ')';
10401 return true;
10402}
10403
10404/// Appends a function encoding to Enc, calling appendType for the return type
10405/// and the arguments.
10406static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
10407 const CodeGen::CodeGenModule &CGM,
10408 TypeStringCache &TSC) {
10409 Enc += "f{";
10410 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
10411 return false;
10412 Enc += "}(";
10413 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
10414 // N.B. we are only interested in the adjusted param types.
10415 auto I = FPT->param_type_begin();
10416 auto E = FPT->param_type_end();
10417 if (I != E) {
10418 do {
10419 if (!appendType(Enc, *I, CGM, TSC))
10420 return false;
10421 ++I;
10422 if (I != E)
10423 Enc += ',';
10424 } while (I != E);
10425 if (FPT->isVariadic())
10426 Enc += ",va";
10427 } else {
10428 if (FPT->isVariadic())
10429 Enc += "va";
10430 else
10431 Enc += '0';
10432 }
10433 }
10434 Enc += ')';
10435 return true;
10436}
10437
10438/// Handles the type's qualifier before dispatching a call to handle specific
10439/// type encodings.
10440static bool appendType(SmallStringEnc &Enc, QualType QType,
10441 const CodeGen::CodeGenModule &CGM,
10442 TypeStringCache &TSC) {
10443
10444 QualType QT = QType.getCanonicalType();
10445
10446 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
10447 // The Qualifiers should be attached to the type rather than the array.
10448 // Thus we don't call appendQualifier() here.
10449 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
10450
10451 appendQualifier(Enc, QT);
10452
10453 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
10454 return appendBuiltinType(Enc, BT);
10455
10456 if (const PointerType *PT = QT->getAs<PointerType>())
10457 return appendPointerType(Enc, PT, CGM, TSC);
10458
10459 if (const EnumType *ET = QT->getAs<EnumType>())
10460 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
10461
10462 if (const RecordType *RT = QT->getAsStructureType())
10463 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
10464
10465 if (const RecordType *RT = QT->getAsUnionType())
10466 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
10467
10468 if (const FunctionType *FT = QT->getAs<FunctionType>())
10469 return appendFunctionType(Enc, FT, CGM, TSC);
10470
10471 return false;
10472}
10473
10474static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
10475 const CodeGen::CodeGenModule &CGM,
10476 TypeStringCache &TSC) {
10477 if (!D)
10478 return false;
10479
10480 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
10481 if (FD->getLanguageLinkage() != CLanguageLinkage)
10482 return false;
10483 return appendType(Enc, FD->getType(), CGM, TSC);
10484 }
10485
10486 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
10487 if (VD->getLanguageLinkage() != CLanguageLinkage)
10488 return false;
10489 QualType QT = VD->getType().getCanonicalType();
10490 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
10491 // Global ArrayTypes are given a size of '*' if the size is unknown.
10492 // The Qualifiers should be attached to the type rather than the array.
10493 // Thus we don't call appendQualifier() here.
10494 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
10495 }
10496 return appendType(Enc, QT, CGM, TSC);
10497 }
10498 return false;
10499}
10500
10501//===----------------------------------------------------------------------===//
10502// RISCV ABI Implementation
10503//===----------------------------------------------------------------------===//
10504
10505namespace {
10506class RISCVABIInfo : public DefaultABIInfo {
10507private:
10508 // Size of the integer ('x') registers in bits.
10509 unsigned XLen;
10510 // Size of the floating point ('f') registers in bits. Note that the target
10511 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
10512 // with soft float ABI has FLen==0).
10513 unsigned FLen;
10514 static const int NumArgGPRs = 8;
10515 static const int NumArgFPRs = 8;
10516 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
10517 llvm::Type *&Field1Ty,
10518 CharUnits &Field1Off,
10519 llvm::Type *&Field2Ty,
10520 CharUnits &Field2Off) const;
10521
10522public:
10523 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
10524 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
10525
10526 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
10527 // non-virtual, but computeInfo is virtual, so we overload it.
10528 void computeInfo(CGFunctionInfo &FI) const override;
10529
10530 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
10531 int &ArgFPRsLeft) const;
10532 ABIArgInfo classifyReturnType(QualType RetTy) const;
10533
10534 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
10535 QualType Ty) const override;
10536
10537 ABIArgInfo extendType(QualType Ty) const;
10538
10539 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
10540 CharUnits &Field1Off, llvm::Type *&Field2Ty,
10541 CharUnits &Field2Off, int &NeededArgGPRs,
10542 int &NeededArgFPRs) const;
10543 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
10544 CharUnits Field1Off,
10545 llvm::Type *Field2Ty,
10546 CharUnits Field2Off) const;
10547};
10548} // end anonymous namespace
10549
10550void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
10551 QualType RetTy = FI.getReturnType();
10552 if (!getCXXABI().classifyReturnType(FI))
10553 FI.getReturnInfo() = classifyReturnType(RetTy);
10554
10555 // IsRetIndirect is true if classifyArgumentType indicated the value should
10556 // be passed indirect, or if the type size is a scalar greater than 2*XLen
10557 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
10558 // in LLVM IR, relying on the backend lowering code to rewrite the argument
10559 // list and pass indirectly on RV32.
10560 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
10561 if (!IsRetIndirect && RetTy->isScalarType() &&
10562 getContext().getTypeSize(RetTy) > (2 * XLen)) {
10563 if (RetTy->isComplexType() && FLen) {
10564 QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
10565 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
10566 } else {
10567 // This is a normal scalar > 2*XLen, such as fp128 on RV32.
10568 IsRetIndirect = true;
10569 }
10570 }
10571
10572 // We must track the number of GPRs used in order to conform to the RISC-V
10573 // ABI, as integer scalars passed in registers should have signext/zeroext
10574 // when promoted, but are anyext if passed on the stack. As GPR usage is
10575 // different for variadic arguments, we must also track whether we are
10576 // examining a vararg or not.
10577 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
10578 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
10579 int NumFixedArgs = FI.getNumRequiredArgs();
10580
10581 int ArgNum = 0;
10582 for (auto &ArgInfo : FI.arguments()) {
10583 bool IsFixed = ArgNum < NumFixedArgs;
10584 ArgInfo.info =
10585 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
10586 ArgNum++;
10587 }
10588}
10589
10590// Returns true if the struct is a potential candidate for the floating point
10591// calling convention. If this function returns true, the caller is
10592// responsible for checking that if there is only a single field then that
10593// field is a float.
10594bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
10595 llvm::Type *&Field1Ty,
10596 CharUnits &Field1Off,
10597 llvm::Type *&Field2Ty,
10598 CharUnits &Field2Off) const {
10599 bool IsInt = Ty->isIntegralOrEnumerationType();
10600 bool IsFloat = Ty->isRealFloatingType();
10601
10602 if (IsInt || IsFloat) {
10603 uint64_t Size = getContext().getTypeSize(Ty);
10604 if (IsInt && Size > XLen)
10605 return false;
10606 // Can't be eligible if larger than the FP registers. Half precision isn't
10607 // currently supported on RISC-V and the ABI hasn't been confirmed, so
10608 // default to the integer ABI in that case.
10609 if (IsFloat && (Size > FLen || Size < 32))
10610 return false;
10611 // Can't be eligible if an integer type was already found (int+int pairs
10612 // are not eligible).
10613 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
10614 return false;
10615 if (!Field1Ty) {
10616 Field1Ty = CGT.ConvertType(Ty);
10617 Field1Off = CurOff;
10618 return true;
10619 }
10620 if (!Field2Ty) {
10621 Field2Ty = CGT.ConvertType(Ty);
10622 Field2Off = CurOff;
10623 return true;
10624 }
10625 return false;
10626 }
10627
10628 if (auto CTy = Ty->getAs<ComplexType>()) {
10629 if (Field1Ty)
10630 return false;
10631 QualType EltTy = CTy->getElementType();
10632 if (getContext().getTypeSize(EltTy) > FLen)
10633 return false;
10634 Field1Ty = CGT.ConvertType(EltTy);
10635 Field1Off = CurOff;
10636 Field2Ty = Field1Ty;
10637 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
10638 return true;
10639 }
10640
10641 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
10642 uint64_t ArraySize = ATy->getSize().getZExtValue();
10643 QualType EltTy = ATy->getElementType();
10644 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
10645 for (uint64_t i = 0; i < ArraySize; ++i) {
10646 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
10647 Field1Off, Field2Ty, Field2Off);
10648 if (!Ret)
10649 return false;
10650 CurOff += EltSize;
10651 }
10652 return true;
10653 }
10654
10655 if (const auto *RTy = Ty->getAs<RecordType>()) {
10656 // Structures with either a non-trivial destructor or a non-trivial
10657 // copy constructor are not eligible for the FP calling convention.
10658 if (getRecordArgABI(Ty, CGT.getCXXABI()))
10659 return false;
10660 if (isEmptyRecord(getContext(), Ty, true))
10661 return true;
10662 const RecordDecl *RD = RTy->getDecl();
10663 // Unions aren't eligible unless they're empty (which is caught above).
10664 if (RD->isUnion())
10665 return false;
10666 int ZeroWidthBitFieldCount = 0;
10667 for (const FieldDecl *FD : RD->fields()) {
10668 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
10669 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
10670 QualType QTy = FD->getType();
10671 if (FD->isBitField()) {
10672 unsigned BitWidth = FD->getBitWidthValue(getContext());
10673 // Allow a bitfield with a type greater than XLen as long as the
10674 // bitwidth is XLen or less.
10675 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
10676 QTy = getContext().getIntTypeForBitwidth(XLen, false);
10677 if (BitWidth == 0) {
10678 ZeroWidthBitFieldCount++;
10679 continue;
10680 }
10681 }
10682
10683 bool Ret = detectFPCCEligibleStructHelper(
10684 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
10685 Field1Ty, Field1Off, Field2Ty, Field2Off);
10686 if (!Ret)
10687 return false;
10688
10689 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
10690 // or int+fp structs, but are ignored for a struct with an fp field and
10691 // any number of zero-width bitfields.
10692 if (Field2Ty && ZeroWidthBitFieldCount > 0)
10693 return false;
10694 }
10695 return Field1Ty != nullptr;
10696 }
10697
10698 return false;
10699}
10700
10701// Determine if a struct is eligible for passing according to the floating
10702// point calling convention (i.e., when flattened it contains a single fp
10703// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
10704// NeededArgGPRs are incremented appropriately.
10705bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
10706 CharUnits &Field1Off,
10707 llvm::Type *&Field2Ty,
10708 CharUnits &Field2Off,
10709 int &NeededArgGPRs,
10710 int &NeededArgFPRs) const {
10711 Field1Ty = nullptr;
10712 Field2Ty = nullptr;
10713 NeededArgGPRs = 0;
10714 NeededArgFPRs = 0;
10715 bool IsCandidate = detectFPCCEligibleStructHelper(
10716 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
10717 // Not really a candidate if we have a single int but no float.
10718 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
10719 return false;
10720 if (!IsCandidate)
10721 return false;
10722 if (Field1Ty && Field1Ty->isFloatingPointTy())
10723 NeededArgFPRs++;
10724 else if (Field1Ty)
10725 NeededArgGPRs++;
10726 if (Field2Ty && Field2Ty->isFloatingPointTy())
10727 NeededArgFPRs++;
10728 else if (Field2Ty)
10729 NeededArgGPRs++;
10730 return true;
10731}
10732
10733// Call getCoerceAndExpand for the two-element flattened struct described by
10734// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
10735// appropriate coerceToType and unpaddedCoerceToType.
10736ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
10737 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
10738 CharUnits Field2Off) const {
10739 SmallVector<llvm::Type *, 3> CoerceElts;
10740 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
10741 if (!Field1Off.isZero())
10742 CoerceElts.push_back(llvm::ArrayType::get(
10743 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
10744
10745 CoerceElts.push_back(Field1Ty);
10746 UnpaddedCoerceElts.push_back(Field1Ty);
10747
10748 if (!Field2Ty) {
10749 return ABIArgInfo::getCoerceAndExpand(
10750 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
10751 UnpaddedCoerceElts[0]);
10752 }
10753
10754 CharUnits Field2Align =
10755 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
10756 CharUnits Field1End = Field1Off +
10757 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
10758 CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
10759
10760 CharUnits Padding = CharUnits::Zero();
10761 if (Field2Off > Field2OffNoPadNoPack)
10762 Padding = Field2Off - Field2OffNoPadNoPack;
10763 else if (Field2Off != Field2Align && Field2Off > Field1End)
10764 Padding = Field2Off - Field1End;
10765
10766 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
10767
10768 if (!Padding.isZero())
10769 CoerceElts.push_back(llvm::ArrayType::get(
10770 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
10771
10772 CoerceElts.push_back(Field2Ty);
10773 UnpaddedCoerceElts.push_back(Field2Ty);
10774
10775 auto CoerceToType =
10776 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
10777 auto UnpaddedCoerceToType =
10778 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
10779
10780 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
10781}
10782
10783ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
10784 int &ArgGPRsLeft,
10785 int &ArgFPRsLeft) const {
10786 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow")((void)0);
10787 Ty = useFirstFieldIfTransparentUnion(Ty);
10788
10789 // Structures with either a non-trivial destructor or a non-trivial
10790 // copy constructor are always passed indirectly.
10791 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
10792 if (ArgGPRsLeft)
10793 ArgGPRsLeft -= 1;
10794 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
10795 CGCXXABI::RAA_DirectInMemory);
10796 }
10797
10798 // Ignore empty structs/unions.
10799 if (isEmptyRecord(getContext(), Ty, true))
10800 return ABIArgInfo::getIgnore();
10801
10802 uint64_t Size = getContext().getTypeSize(Ty);
10803
10804 // Pass floating point values via FPRs if possible.
10805 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
10806 FLen >= Size && ArgFPRsLeft) {
10807 ArgFPRsLeft--;
10808 return ABIArgInfo::getDirect();
10809 }
10810
10811 // Complex types for the hard float ABI must be passed direct rather than
10812 // using CoerceAndExpand.
10813 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
10814 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
10815 if (getContext().getTypeSize(EltTy) <= FLen) {
10816 ArgFPRsLeft -= 2;
10817 return ABIArgInfo::getDirect();
10818 }
10819 }
10820
10821 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
10822 llvm::Type *Field1Ty = nullptr;
10823 llvm::Type *Field2Ty = nullptr;
10824 CharUnits Field1Off = CharUnits::Zero();
10825 CharUnits Field2Off = CharUnits::Zero();
10826 int NeededArgGPRs = 0;
10827 int NeededArgFPRs = 0;
10828 bool IsCandidate =
10829 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
10830 NeededArgGPRs, NeededArgFPRs);
10831 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
10832 NeededArgFPRs <= ArgFPRsLeft) {
10833 ArgGPRsLeft -= NeededArgGPRs;
10834 ArgFPRsLeft -= NeededArgFPRs;
10835 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
10836 Field2Off);
10837 }
10838 }
10839
10840 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
10841 bool MustUseStack = false;
10842 // Determine the number of GPRs needed to pass the current argument
10843 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
10844 // register pairs, so may consume 3 registers.
10845 int NeededArgGPRs = 1;
10846 if (!IsFixed && NeededAlign == 2 * XLen)
10847 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
10848 else if (Size > XLen && Size <= 2 * XLen)
10849 NeededArgGPRs = 2;
10850
10851 if (NeededArgGPRs > ArgGPRsLeft) {
10852 MustUseStack = true;
10853 NeededArgGPRs = ArgGPRsLeft;
10854 }
10855
10856 ArgGPRsLeft -= NeededArgGPRs;
10857
10858 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
10859 // Treat an enum type as its underlying type.
10860 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
10861 Ty = EnumTy->getDecl()->getIntegerType();
10862
10863 // All integral types are promoted to XLen width, unless passed on the
10864 // stack.
10865 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
10866 return extendType(Ty);
10867 }
10868
10869 if (const auto *EIT = Ty->getAs<ExtIntType>()) {
10870 if (EIT->getNumBits() < XLen && !MustUseStack)
10871 return extendType(Ty);
10872 if (EIT->getNumBits() > 128 ||
10873 (!getContext().getTargetInfo().hasInt128Type() &&
10874 EIT->getNumBits() > 64))
10875 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
10876 }
10877
10878 return ABIArgInfo::getDirect();
10879 }
10880
10881 // Aggregates which are <= 2*XLen will be passed in registers if possible,
10882 // so coerce to integers.
10883 if (Size <= 2 * XLen) {
10884 unsigned Alignment = getContext().getTypeAlign(Ty);
10885
10886 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
10887 // required, and a 2-element XLen array if only XLen alignment is required.
10888 if (Size <= XLen) {
10889 return ABIArgInfo::getDirect(
10890 llvm::IntegerType::get(getVMContext(), XLen));
10891 } else if (Alignment == 2 * XLen) {
10892 return ABIArgInfo::getDirect(
10893 llvm::IntegerType::get(getVMContext(), 2 * XLen));
10894 } else {
10895 return ABIArgInfo::getDirect(llvm::ArrayType::get(
10896 llvm::IntegerType::get(getVMContext(), XLen), 2));
10897 }
10898 }
10899 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
10900}
10901
10902ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
10903 if (RetTy->isVoidType())
10904 return ABIArgInfo::getIgnore();
10905
10906 int ArgGPRsLeft = 2;
10907 int ArgFPRsLeft = FLen ? 2 : 0;
10908
10909 // The rules for return and argument types are the same, so defer to
10910 // classifyArgumentType.
10911 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
10912 ArgFPRsLeft);
10913}
10914
10915Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
10916 QualType Ty) const {
10917 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
10918
10919 // Empty records are ignored for parameter passing purposes.
10920 if (isEmptyRecord(getContext(), Ty, true)) {
10921 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
10922 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
10923 return Addr;
10924 }
10925
10926 auto TInfo = getContext().getTypeInfoInChars(Ty);
10927
10928 // Arguments bigger than 2*Xlen bytes are passed indirectly.
10929 bool IsIndirect = TInfo.Width > 2 * SlotSize;
10930
10931 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
10932 SlotSize, /*AllowHigherAlign=*/true);
10933}
10934
10935ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
10936 int TySize = getContext().getTypeSize(Ty);
10937 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
10938 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
10939 return ABIArgInfo::getSignExtend(Ty);
10940 return ABIArgInfo::getExtend(Ty);
10941}
10942
10943namespace {
10944class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
10945public:
10946 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
10947 unsigned FLen)
10948 : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
10949
10950 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
10951 CodeGen::CodeGenModule &CGM) const override {
10952 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
10953 if (!FD) return;
10954
10955 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
10956 if (!Attr)
10957 return;
10958
10959 const char *Kind;
10960 switch (Attr->getInterrupt()) {
10961 case RISCVInterruptAttr::user: Kind = "user"; break;
10962 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
10963 case RISCVInterruptAttr::machine: Kind = "machine"; break;
10964 }
10965
10966 auto *Fn = cast<llvm::Function>(GV);
10967
10968 Fn->addFnAttr("interrupt", Kind);
10969 }
10970};
10971} // namespace
10972
10973//===----------------------------------------------------------------------===//
10974// VE ABI Implementation.
10975//
10976namespace {
10977class VEABIInfo : public DefaultABIInfo {
10978public:
10979 VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
10980
10981private:
10982 ABIArgInfo classifyReturnType(QualType RetTy) const;
10983 ABIArgInfo classifyArgumentType(QualType RetTy) const;
10984 void computeInfo(CGFunctionInfo &FI) const override;
10985};
10986} // end anonymous namespace
10987
10988ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
10989 if (Ty->isAnyComplexType())
10990 return ABIArgInfo::getDirect();
10991 uint64_t Size = getContext().getTypeSize(Ty);
10992 if (Size < 64 && Ty->isIntegerType())
10993 return ABIArgInfo::getExtend(Ty);
10994 return DefaultABIInfo::classifyReturnType(Ty);
10995}
10996
10997ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
10998 if (Ty->isAnyComplexType())
10999 return ABIArgInfo::getDirect();
11000 uint64_t Size = getContext().getTypeSize(Ty);
11001 if (Size < 64 && Ty->isIntegerType())
11002 return ABIArgInfo::getExtend(Ty);
11003 return DefaultABIInfo::classifyArgumentType(Ty);
11004}
11005
11006void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
11007 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
11008 for (auto &Arg : FI.arguments())
11009 Arg.info = classifyArgumentType(Arg.type);
11010}
11011
11012namespace {
11013class VETargetCodeGenInfo : public TargetCodeGenInfo {
11014public:
11015 VETargetCodeGenInfo(CodeGenTypes &CGT)
11016 : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
11017 // VE ABI requires the arguments of variadic and prototype-less functions
11018 // are passed in both registers and memory.
11019 bool isNoProtoCallVariadic(const CallArgList &args,
11020 const FunctionNoProtoType *fnType) const override {
11021 return true;
11022 }
11023};
11024} // end anonymous namespace
11025
11026//===----------------------------------------------------------------------===//
11027// Driver code
11028//===----------------------------------------------------------------------===//
11029
11030bool CodeGenModule::supportsCOMDAT() const {
11031 return getTriple().supportsCOMDAT();
11032}
11033
11034const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
11035 if (TheTargetCodeGenInfo)
11036 return *TheTargetCodeGenInfo;
11037
11038 // Helper to set the unique_ptr while still keeping the return value.
11039 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
11040 this->TheTargetCodeGenInfo.reset(P);
11041 return *P;
11042 };
11043
11044 const llvm::Triple &Triple = getTarget().getTriple();
11045 switch (Triple.getArch()) {
11046 default:
11047 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
11048
11049 case llvm::Triple::le32:
11050 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
11051 case llvm::Triple::m68k:
11052 return SetCGInfo(new M68kTargetCodeGenInfo(Types));
11053 case llvm::Triple::mips:
11054 case llvm::Triple::mipsel:
11055 if (Triple.getOS() == llvm::Triple::NaCl)
11056 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
11057 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
11058
11059 case llvm::Triple::mips64:
11060 case llvm::Triple::mips64el:
11061 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
11062
11063 case llvm::Triple::avr:
11064 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
11065
11066 case llvm::Triple::aarch64:
11067 case llvm::Triple::aarch64_32:
11068 case llvm::Triple::aarch64_be: {
11069 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
11070 if (getTarget().getABI() == "darwinpcs")
11071 Kind = AArch64ABIInfo::DarwinPCS;
11072 else if (Triple.isOSWindows())
11073 return SetCGInfo(
11074 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
11075
11076 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
11077 }
11078
11079 case llvm::Triple::wasm32:
11080 case llvm::Triple::wasm64: {
11081 WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP;
11082 if (getTarget().getABI() == "experimental-mv")
11083 Kind = WebAssemblyABIInfo::ExperimentalMV;
11084 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind));
11085 }
11086
11087 case llvm::Triple::arm:
11088 case llvm::Triple::armeb:
11089 case llvm::Triple::thumb:
11090 case llvm::Triple::thumbeb: {
11091 if (Triple.getOS() == llvm::Triple::Win32) {
11092 return SetCGInfo(
11093 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
11094 }
11095
11096 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
11097 StringRef ABIStr = getTarget().getABI();
11098 if (ABIStr == "apcs-gnu")
11099 Kind = ARMABIInfo::APCS;
11100 else if (ABIStr == "aapcs16")
11101 Kind = ARMABIInfo::AAPCS16_VFP;
11102 else if (CodeGenOpts.FloatABI == "hard" ||
11103 (CodeGenOpts.FloatABI != "soft" &&
11104 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
11105 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
11106 Triple.getEnvironment() == llvm::Triple::EABIHF)))
11107 Kind = ARMABIInfo::AAPCS_VFP;
11108
11109 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
11110 }
11111
11112 case llvm::Triple::ppc: {
11113 if (Triple.isOSAIX())
11114 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false));
11115
11116 bool IsSoftFloat =
11117 CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe");
11118 bool RetSmallStructInRegABI =
11119 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
11120 return SetCGInfo(
11121 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
11122 }
11123 case llvm::Triple::ppcle: {
11124 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
11125 bool RetSmallStructInRegABI =
11126 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
11127 return SetCGInfo(
11128 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
11129 }
11130 case llvm::Triple::ppc64:
11131 if (Triple.isOSAIX())
11132 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
11133
11134 if (Triple.isOSBinFormatELF()) {
11135 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
11136 if (getTarget().getABI() == "elfv2")
11137 Kind = PPC64_SVR4_ABIInfo::ELFv2;
11138 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
11139
11140 return SetCGInfo(
11141 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
11142 }
11143 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
11144 case llvm::Triple::ppc64le: {
11145 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!")((void)0);
11146 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
11147 if (getTarget().getABI() == "elfv1")
11148 Kind = PPC64_SVR4_ABIInfo::ELFv1;
11149 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
11150
11151 return SetCGInfo(
11152 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
11153 }
11154
11155 case llvm::Triple::nvptx:
11156 case llvm::Triple::nvptx64:
11157 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
11158
11159 case llvm::Triple::msp430:
11160 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
11161
11162 case llvm::Triple::riscv32:
11163 case llvm::Triple::riscv64: {
11164 StringRef ABIStr = getTarget().getABI();
11165 unsigned XLen = getTarget().getPointerWidth(0);
11166 unsigned ABIFLen = 0;
11167 if (ABIStr.endswith("f"))
11168 ABIFLen = 32;
11169 else if (ABIStr.endswith("d"))
11170 ABIFLen = 64;
11171 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
11172 }
11173
11174 case llvm::Triple::systemz: {
11175 bool SoftFloat = CodeGenOpts.FloatABI == "soft";
11176 bool HasVector = !SoftFloat && getTarget().getABI() == "vector";
11177 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat));
11178 }
11179
11180 case llvm::Triple::tce:
11181 case llvm::Triple::tcele:
11182 return SetCGInfo(new TCETargetCodeGenInfo(Types));
11183
11184 case llvm::Triple::x86: {
11185 bool IsDarwinVectorABI = Triple.isOSDarwin();
11186 bool RetSmallStructInRegABI =
11187 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
11188 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
11189
11190 if (Triple.getOS() == llvm::Triple::Win32) {
11191 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
11192 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
11193 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
11194 } else {
11195 return SetCGInfo(new X86_32TargetCodeGenInfo(
11196 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
11197 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
11198 CodeGenOpts.FloatABI == "soft"));
11199 }
11200 }
11201
11202 case llvm::Triple::x86_64: {
11203 StringRef ABI = getTarget().getABI();
11204 X86AVXABILevel AVXLevel =
11205 (ABI == "avx512"
11206 ? X86AVXABILevel::AVX512
11207 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
11208
11209 switch (Triple.getOS()) {
11210 case llvm::Triple::Win32:
11211 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
11212 default:
11213 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
11214 }
11215 }
11216 case llvm::Triple::hexagon:
11217 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
11218 case llvm::Triple::lanai:
11219 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
11220 case llvm::Triple::r600:
11221 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
11222 case llvm::Triple::amdgcn:
11223 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
11224 case llvm::Triple::sparc:
11225 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
11226 case llvm::Triple::sparcv9:
11227 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
11228 case llvm::Triple::xcore:
11229 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
11230 case llvm::Triple::arc:
11231 return SetCGInfo(new ARCTargetCodeGenInfo(Types));
11232 case llvm::Triple::spir:
11233 case llvm::Triple::spir64:
11234 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
11235 case llvm::Triple::ve:
11236 return SetCGInfo(new VETargetCodeGenInfo(Types));
11237 }
11238}
11239
11240/// Create an OpenCL kernel for an enqueued block.
11241///
11242/// The kernel has the same function type as the block invoke function. Its
11243/// name is the name of the block invoke function postfixed with "_kernel".
11244/// It simply calls the block invoke function then returns.
11245llvm::Function *
11246TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
11247 llvm::Function *Invoke,
11248 llvm::Value *BlockLiteral) const {
11249 auto *InvokeFT = Invoke->getFunctionType();
11250 llvm::SmallVector<llvm::Type *, 2> ArgTys;
11251 for (auto &P : InvokeFT->params())
11252 ArgTys.push_back(P);
11253 auto &C = CGF.getLLVMContext();
11254 std::string Name = Invoke->getName().str() + "_kernel";
11255 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
11256 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
11257 &CGF.CGM.getModule());
11258 auto IP = CGF.Builder.saveIP();
11259 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
11260 auto &Builder = CGF.Builder;
11261 Builder.SetInsertPoint(BB);
11262 llvm::SmallVector<llvm::Value *, 2> Args;
11263 for (auto &A : F->args())
11264 Args.push_back(&A);
11265 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
11266 call->setCallingConv(Invoke->getCallingConv());
11267 Builder.CreateRetVoid();
11268 Builder.restoreIP(IP);
11269 return F;
11270}
11271
11272/// Create an OpenCL kernel for an enqueued block.
11273///
11274/// The type of the first argument (the block literal) is the struct type
11275/// of the block literal instead of a pointer type. The first argument
11276/// (block literal) is passed directly by value to the kernel. The kernel
11277/// allocates the same type of struct on stack and stores the block literal
11278/// to it and passes its pointer to the block invoke function. The kernel
11279/// has "enqueued-block" function attribute and kernel argument metadata.
11280llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
11281 CodeGenFunction &CGF, llvm::Function *Invoke,
11282 llvm::Value *BlockLiteral) const {
11283 auto &Builder = CGF.Builder;
11284 auto &C = CGF.getLLVMContext();
11285
11286 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
11287 auto *InvokeFT = Invoke->getFunctionType();
11288 llvm::SmallVector<llvm::Type *, 2> ArgTys;
11289 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
11290 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
11291 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
11292 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
11293 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
11294 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
11295
11296 ArgTys.push_back(BlockTy);
11297 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
11298 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
11299 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
11300 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
11301 AccessQuals.push_back(llvm::MDString::get(C, "none"));
11302 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
11303 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
11304 ArgTys.push_back(InvokeFT->getParamType(I));
11305 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
11306 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
11307 AccessQuals.push_back(llvm::MDString::get(C, "none"));
11308 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
11309 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
11310 ArgNames.push_back(
11311 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
11312 }
11313 std::string Name = Invoke->getName().str() + "_kernel";
11314 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
11315 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
11316 &CGF.CGM.getModule());
11317 F->addFnAttr("enqueued-block");
11318 auto IP = CGF.Builder.saveIP();
11319 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
11320 Builder.SetInsertPoint(BB);
11321 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
11322 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
11323 BlockPtr->setAlignment(BlockAlign);
11324 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
11325 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
11326 llvm::SmallVector<llvm::Value *, 2> Args;
11327 Args.push_back(Cast);
11328 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
11329 Args.push_back(I);
11330 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
11331 call->setCallingConv(Invoke->getCallingConv());
11332 Builder.CreateRetVoid();
11333 Builder.restoreIP(IP);
11334
11335 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
11336 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
11337 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
11338 F->setMetadata("kernel_arg_base_type",
11339 llvm::MDNode::get(C, ArgBaseTypeNames));
11340 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
11341 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
11342 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
11343
11344 return F;
11345}