clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name TargetInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/TargetInfo.cpp
1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | |
14 | #include "TargetInfo.h" |
15 | #include "ABIInfo.h" |
16 | #include "CGBlocks.h" |
17 | #include "CGCXXABI.h" |
18 | #include "CGValue.h" |
19 | #include "CodeGenFunction.h" |
20 | #include "clang/AST/Attr.h" |
21 | #include "clang/AST/RecordLayout.h" |
22 | #include "clang/Basic/CodeGenOptions.h" |
23 | #include "clang/Basic/DiagnosticFrontend.h" |
24 | #include "clang/Basic/Builtins.h" |
25 | #include "clang/CodeGen/CGFunctionInfo.h" |
26 | #include "clang/CodeGen/SwiftCallingConv.h" |
27 | #include "llvm/ADT/SmallBitVector.h" |
28 | #include "llvm/ADT/StringExtras.h" |
29 | #include "llvm/ADT/StringSwitch.h" |
30 | #include "llvm/ADT/Triple.h" |
31 | #include "llvm/ADT/Twine.h" |
32 | #include "llvm/IR/DataLayout.h" |
33 | #include "llvm/IR/IntrinsicsNVPTX.h" |
34 | #include "llvm/IR/IntrinsicsS390.h" |
35 | #include "llvm/IR/Type.h" |
36 | #include "llvm/Support/raw_ostream.h" |
37 | #include <algorithm> // std::sort |
38 | |
39 | using namespace clang; |
40 | using namespace CodeGen; |
41 | |
42 | |
43 | |
44 | |
45 | |
46 | |
47 | |
48 | |
49 | |
50 | |
51 | |
52 | |
53 | |
54 | |
55 | |
56 | static ABIArgInfo coerceToIntArray(QualType Ty, |
57 | ASTContext &Context, |
58 | llvm::LLVMContext &LLVMContext) { |
59 | |
60 | const uint64_t Size = Context.getTypeSize(Ty); |
61 | const uint64_t Alignment = Context.getTypeAlign(Ty); |
62 | llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); |
63 | const uint64_t NumElements = (Size + Alignment - 1) / Alignment; |
64 | return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); |
65 | } |
66 | |
67 | static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, |
68 | llvm::Value *Array, |
69 | llvm::Value *Value, |
70 | unsigned FirstIndex, |
71 | unsigned LastIndex) { |
72 | |
73 | for (unsigned I = FirstIndex; I <= LastIndex; ++I) { |
74 | llvm::Value *Cell = |
75 | Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); |
76 | Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); |
77 | } |
78 | } |
79 | |
80 | static bool isAggregateTypeForABI(QualType T) { |
81 | return !CodeGenFunction::hasScalarEvaluationKind(T) || |
82 | T->isMemberFunctionPointerType(); |
83 | } |
84 | |
85 | ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal, |
86 | bool Realign, |
87 | llvm::Type *Padding) const { |
88 | return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal, |
89 | Realign, Padding); |
90 | } |
91 | |
92 | ABIArgInfo |
93 | ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { |
94 | return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), |
95 | false, Realign); |
96 | } |
97 | |
98 | Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
99 | QualType Ty) const { |
100 | return Address::invalid(); |
101 | } |
102 | |
103 | bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { |
104 | if (Ty->isPromotableIntegerType()) |
105 | return true; |
106 | |
107 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
108 | if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy)) |
109 | return true; |
110 | |
111 | return false; |
112 | } |
113 | |
114 | ABIInfo::~ABIInfo() {} |
115 | |
116 | |
117 | |
118 | |
119 | |
120 | |
121 | |
122 | |
123 | |
124 | |
125 | |
126 | |
127 | |
128 | |
129 | static bool occupiesMoreThan(CodeGenTypes &cgt, |
130 | ArrayRef<llvm::Type*> scalarTypes, |
131 | unsigned maxAllRegisters) { |
132 | unsigned intCount = 0, fpCount = 0; |
133 | for (llvm::Type *type : scalarTypes) { |
134 | if (type->isPointerTy()) { |
135 | intCount++; |
136 | } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { |
137 | auto ptrWidth = cgt.getTarget().getPointerWidth(0); |
138 | intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; |
139 | } else { |
140 | assert(type->isVectorTy() || type->isFloatingPointTy()); |
141 | fpCount++; |
142 | } |
143 | } |
144 | |
145 | return (intCount + fpCount > maxAllRegisters); |
146 | } |
147 | |
148 | bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, |
149 | llvm::Type *eltTy, |
150 | unsigned numElts) const { |
151 | |
152 | |
153 | return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); |
154 | } |
155 | |
156 | static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, |
157 | CGCXXABI &CXXABI) { |
158 | const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); |
159 | if (!RD) { |
160 | if (!RT->getDecl()->canPassInRegisters()) |
161 | return CGCXXABI::RAA_Indirect; |
162 | return CGCXXABI::RAA_Default; |
163 | } |
164 | return CXXABI.getRecordArgABI(RD); |
165 | } |
166 | |
167 | static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, |
168 | CGCXXABI &CXXABI) { |
169 | const RecordType *RT = T->getAs<RecordType>(); |
170 | if (!RT) |
171 | return CGCXXABI::RAA_Default; |
172 | return getRecordArgABI(RT, CXXABI); |
173 | } |
174 | |
175 | static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, |
176 | const ABIInfo &Info) { |
177 | QualType Ty = FI.getReturnType(); |
178 | |
179 | if (const auto *RT = Ty->getAs<RecordType>()) |
180 | if (!isa<CXXRecordDecl>(RT->getDecl()) && |
181 | !RT->getDecl()->canPassInRegisters()) { |
182 | FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty); |
183 | return true; |
184 | } |
185 | |
186 | return CXXABI.classifyReturnType(FI); |
187 | } |
188 | |
189 | |
190 | |
191 | static QualType useFirstFieldIfTransparentUnion(QualType Ty) { |
192 | if (const RecordType *UT = Ty->getAsUnionType()) { |
193 | const RecordDecl *UD = UT->getDecl(); |
194 | if (UD->hasAttr<TransparentUnionAttr>()) { |
195 | assert(!UD->field_empty() && "sema created an empty transparent union"); |
196 | return UD->field_begin()->getType(); |
197 | } |
198 | } |
199 | return Ty; |
200 | } |
201 | |
202 | CGCXXABI &ABIInfo::getCXXABI() const { |
203 | return CGT.getCXXABI(); |
204 | } |
205 | |
206 | ASTContext &ABIInfo::getContext() const { |
207 | return CGT.getContext(); |
208 | } |
209 | |
210 | llvm::LLVMContext &ABIInfo::getVMContext() const { |
211 | return CGT.getLLVMContext(); |
212 | } |
213 | |
214 | const llvm::DataLayout &ABIInfo::getDataLayout() const { |
215 | return CGT.getDataLayout(); |
216 | } |
217 | |
218 | const TargetInfo &ABIInfo::getTarget() const { |
219 | return CGT.getTarget(); |
220 | } |
221 | |
222 | const CodeGenOptions &ABIInfo::getCodeGenOpts() const { |
223 | return CGT.getCodeGenOpts(); |
224 | } |
225 | |
226 | bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } |
227 | |
228 | bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
229 | return false; |
230 | } |
231 | |
232 | bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, |
233 | uint64_t Members) const { |
234 | return false; |
235 | } |
236 | |
237 | LLVM_DUMP_METHOD void ABIArgInfo::dump() const { |
238 | raw_ostream &OS = llvm::errs(); |
239 | OS << "(ABIArgInfo Kind="; |
240 | switch (TheKind) { |
241 | case Direct: |
242 | OS << "Direct Type="; |
243 | if (llvm::Type *Ty = getCoerceToType()) |
244 | Ty->print(OS); |
245 | else |
246 | OS << "null"; |
247 | break; |
248 | case Extend: |
249 | OS << "Extend"; |
250 | break; |
251 | case Ignore: |
252 | OS << "Ignore"; |
253 | break; |
254 | case InAlloca: |
255 | OS << "InAlloca Offset=" << getInAllocaFieldIndex(); |
256 | break; |
257 | case Indirect: |
258 | OS << "Indirect Align=" << getIndirectAlign().getQuantity() |
259 | << " ByVal=" << getIndirectByVal() |
260 | << " Realign=" << getIndirectRealign(); |
261 | break; |
262 | case IndirectAliased: |
263 | OS << "Indirect Align=" << getIndirectAlign().getQuantity() |
264 | << " AadrSpace=" << getIndirectAddrSpace() |
265 | << " Realign=" << getIndirectRealign(); |
266 | break; |
267 | case Expand: |
268 | OS << "Expand"; |
269 | break; |
270 | case CoerceAndExpand: |
271 | OS << "CoerceAndExpand Type="; |
272 | getCoerceAndExpandType()->print(OS); |
273 | break; |
274 | } |
275 | OS << ")\n"; |
276 | } |
277 | |
278 | |
279 | static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, |
280 | llvm::Value *Ptr, |
281 | CharUnits Align) { |
282 | llvm::Value *PtrAsInt = Ptr; |
283 | |
284 | PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); |
285 | PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, |
286 | llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); |
287 | PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, |
288 | llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); |
289 | PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, |
290 | Ptr->getType(), |
291 | Ptr->getName() + ".aligned"); |
292 | return PtrAsInt; |
293 | } |
294 | |
295 | |
296 | |
297 | |
298 | |
299 | |
300 | |
301 | |
302 | |
303 | |
304 | |
305 | |
306 | |
307 | |
308 | |
309 | static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, |
310 | Address VAListAddr, |
311 | llvm::Type *DirectTy, |
312 | CharUnits DirectSize, |
313 | CharUnits DirectAlign, |
314 | CharUnits SlotSize, |
315 | bool AllowHigherAlign) { |
316 | |
317 | |
318 | if (VAListAddr.getElementType() != CGF.Int8PtrTy) |
319 | VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); |
320 | |
321 | llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); |
322 | |
323 | |
324 | Address Addr = Address::invalid(); |
325 | if (AllowHigherAlign && DirectAlign > SlotSize) { |
326 | Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), |
327 | DirectAlign); |
328 | } else { |
329 | Addr = Address(Ptr, SlotSize); |
330 | } |
331 | |
332 | |
333 | CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); |
334 | Address NextPtr = |
335 | CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next"); |
336 | CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr); |
337 | |
338 | |
339 | |
340 | if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && |
341 | !DirectTy->isStructTy()) { |
342 | Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); |
343 | } |
344 | |
345 | Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); |
346 | return Addr; |
347 | } |
348 | |
349 | |
350 | |
351 | |
352 | |
353 | |
354 | |
355 | |
356 | |
357 | |
358 | |
359 | |
360 | |
361 | |
362 | static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, |
363 | QualType ValueTy, bool IsIndirect, |
364 | TypeInfoChars ValueInfo, |
365 | CharUnits SlotSizeAndAlign, |
366 | bool AllowHigherAlign) { |
367 | |
368 | CharUnits DirectSize, DirectAlign; |
369 | if (IsIndirect) { |
370 | DirectSize = CGF.getPointerSize(); |
371 | DirectAlign = CGF.getPointerAlign(); |
372 | } else { |
373 | DirectSize = ValueInfo.Width; |
374 | DirectAlign = ValueInfo.Align; |
375 | } |
376 | |
377 | |
378 | llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); |
379 | if (IsIndirect) |
380 | DirectTy = DirectTy->getPointerTo(0); |
381 | |
382 | Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, |
383 | DirectSize, DirectAlign, |
384 | SlotSizeAndAlign, |
385 | AllowHigherAlign); |
386 | |
387 | if (IsIndirect) { |
388 | Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align); |
389 | } |
390 | |
391 | return Addr; |
392 | |
393 | } |
394 | |
395 | static Address emitMergePHI(CodeGenFunction &CGF, |
396 | Address Addr1, llvm::BasicBlock *Block1, |
397 | Address Addr2, llvm::BasicBlock *Block2, |
398 | const llvm::Twine &Name = "") { |
399 | assert(Addr1.getType() == Addr2.getType()); |
400 | llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); |
401 | PHI->addIncoming(Addr1.getPointer(), Block1); |
402 | PHI->addIncoming(Addr2.getPointer(), Block2); |
403 | CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); |
404 | return Address(PHI, Align); |
405 | } |
406 | |
407 | TargetCodeGenInfo::~TargetCodeGenInfo() = default; |
408 | |
409 | |
410 | |
411 | unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { |
412 | |
413 | |
414 | |
415 | |
416 | |
417 | |
418 | return 32; |
419 | } |
420 | |
421 | bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, |
422 | const FunctionNoProtoType *fnType) const { |
423 | |
424 | |
425 | |
426 | |
427 | return false; |
428 | } |
429 | |
430 | void |
431 | TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, |
432 | llvm::SmallString<24> &Opt) const { |
433 | |
434 | |
435 | |
436 | Opt = "-l"; |
437 | Opt += Lib; |
438 | } |
439 | |
440 | unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { |
441 | |
442 | |
443 | |
444 | |
445 | |
446 | |
447 | |
448 | |
449 | |
450 | |
451 | return llvm::CallingConv::SPIR_KERNEL; |
452 | } |
453 | |
454 | llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, |
455 | llvm::PointerType *T, QualType QT) const { |
456 | return llvm::ConstantPointerNull::get(T); |
457 | } |
458 | |
459 | LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, |
460 | const VarDecl *D) const { |
461 | assert(!CGM.getLangOpts().OpenCL && |
462 | !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && |
463 | "Address space agnostic languages only"); |
464 | return D ? D->getType().getAddressSpace() : LangAS::Default; |
465 | } |
466 | |
467 | llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( |
468 | CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr, |
469 | LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const { |
470 | |
471 | |
472 | if (auto *C = dyn_cast<llvm::Constant>(Src)) |
473 | return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); |
474 | |
475 | return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( |
476 | Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : ""); |
477 | } |
478 | |
479 | llvm::Constant * |
480 | TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, |
481 | LangAS SrcAddr, LangAS DestAddr, |
482 | llvm::Type *DestTy) const { |
483 | |
484 | |
485 | return llvm::ConstantExpr::getPointerCast(Src, DestTy); |
486 | } |
487 | |
488 | llvm::SyncScope::ID |
489 | TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, |
490 | SyncScope Scope, |
491 | llvm::AtomicOrdering Ordering, |
492 | llvm::LLVMContext &Ctx) const { |
493 | return Ctx.getOrInsertSyncScopeID(""); |
494 | } |
495 | |
496 | static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); |
497 | |
498 | |
499 | |
500 | static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, |
501 | bool AllowArrays) { |
502 | if (FD->isUnnamedBitfield()) |
503 | return true; |
504 | |
505 | QualType FT = FD->getType(); |
506 | |
507 | |
508 | |
509 | bool WasArray = false; |
510 | if (AllowArrays) |
511 | while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { |
512 | if (AT->getSize() == 0) |
513 | return true; |
514 | FT = AT->getElementType(); |
515 | |
516 | |
517 | WasArray = true; |
518 | } |
519 | |
520 | const RecordType *RT = FT->getAs<RecordType>(); |
521 | if (!RT) |
522 | return false; |
523 | |
524 | |
525 | |
526 | |
527 | |
528 | |
529 | |
530 | |
531 | |
532 | |
533 | |
534 | if (isa<CXXRecordDecl>(RT->getDecl()) && |
535 | (WasArray || !FD->hasAttr<NoUniqueAddressAttr>())) |
536 | return false; |
537 | |
538 | return isEmptyRecord(Context, FT, AllowArrays); |
539 | } |
540 | |
541 | |
542 | |
543 | |
544 | static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { |
545 | const RecordType *RT = T->getAs<RecordType>(); |
546 | if (!RT) |
547 | return false; |
548 | const RecordDecl *RD = RT->getDecl(); |
549 | if (RD->hasFlexibleArrayMember()) |
550 | return false; |
551 | |
552 | |
553 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) |
554 | for (const auto &I : CXXRD->bases()) |
555 | if (!isEmptyRecord(Context, I.getType(), true)) |
556 | return false; |
557 | |
558 | for (const auto *I : RD->fields()) |
559 | if (!isEmptyField(Context, I, AllowArrays)) |
560 | return false; |
561 | return true; |
562 | } |
563 | |
564 | |
565 | |
566 | |
567 | |
568 | |
569 | |
570 | |
571 | |
572 | static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { |
573 | const RecordType *RT = T->getAs<RecordType>(); |
574 | if (!RT) |
575 | return nullptr; |
576 | |
577 | const RecordDecl *RD = RT->getDecl(); |
578 | if (RD->hasFlexibleArrayMember()) |
579 | return nullptr; |
580 | |
581 | const Type *Found = nullptr; |
582 | |
583 | |
584 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
585 | for (const auto &I : CXXRD->bases()) { |
586 | |
587 | if (isEmptyRecord(Context, I.getType(), true)) |
588 | continue; |
589 | |
590 | |
591 | if (Found) |
592 | return nullptr; |
593 | |
594 | |
595 | |
596 | Found = isSingleElementStruct(I.getType(), Context); |
597 | if (!Found) |
598 | return nullptr; |
599 | } |
600 | } |
601 | |
602 | |
603 | for (const auto *FD : RD->fields()) { |
604 | QualType FT = FD->getType(); |
605 | |
606 | |
607 | if (isEmptyField(Context, FD, true)) |
608 | continue; |
609 | |
610 | |
611 | |
612 | if (Found) |
613 | return nullptr; |
614 | |
615 | |
616 | while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { |
617 | if (AT->getSize().getZExtValue() != 1) |
618 | break; |
619 | FT = AT->getElementType(); |
620 | } |
621 | |
622 | if (!isAggregateTypeForABI(FT)) { |
623 | Found = FT.getTypePtr(); |
624 | } else { |
625 | Found = isSingleElementStruct(FT, Context); |
626 | if (!Found) |
627 | return nullptr; |
628 | } |
629 | } |
630 | |
631 | |
632 | |
633 | if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) |
634 | return nullptr; |
635 | |
636 | return Found; |
637 | } |
638 | |
639 | namespace { |
640 | Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, |
641 | const ABIArgInfo &AI) { |
642 | |
643 | |
644 | |
645 | |
646 | |
647 | |
648 | |
649 | |
650 | |
651 | llvm::Value *Val; |
652 | |
653 | if (AI.isIndirect()) { |
654 | assert(!AI.getPaddingType() && |
655 | "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); |
656 | assert( |
657 | !AI.getIndirectRealign() && |
658 | "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); |
659 | |
660 | auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); |
661 | CharUnits TyAlignForABI = TyInfo.Align; |
662 | |
663 | llvm::Type *BaseTy = |
664 | llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); |
665 | llvm::Value *Addr = |
666 | CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); |
667 | return Address(Addr, TyAlignForABI); |
668 | } else { |
669 | assert((AI.isDirect() || AI.isExtend()) && |
670 | "Unexpected ArgInfo Kind in generic VAArg emitter!"); |
671 | |
672 | assert(!AI.getInReg() && |
673 | "Unexpected InReg seen in arginfo in generic VAArg emitter!"); |
674 | assert(!AI.getPaddingType() && |
675 | "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); |
676 | assert(!AI.getDirectOffset() && |
677 | "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); |
678 | assert(!AI.getCoerceToType() && |
679 | "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); |
680 | |
681 | Address Temp = CGF.CreateMemTemp(Ty, "varet"); |
682 | Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); |
683 | CGF.Builder.CreateStore(Val, Temp); |
684 | return Temp; |
685 | } |
686 | } |
687 | |
688 | |
689 | |
690 | |
691 | |
692 | class DefaultABIInfo : public ABIInfo { |
693 | public: |
694 | DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} |
695 | |
696 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
697 | ABIArgInfo classifyArgumentType(QualType RetTy) const; |
698 | |
699 | void computeInfo(CGFunctionInfo &FI) const override { |
700 | if (!getCXXABI().classifyReturnType(FI)) |
701 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
702 | for (auto &I : FI.arguments()) |
703 | I.info = classifyArgumentType(I.type); |
704 | } |
705 | |
706 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
707 | QualType Ty) const override { |
708 | return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); |
709 | } |
710 | }; |
711 | |
712 | class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { |
713 | public: |
714 | DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) |
715 | : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} |
716 | }; |
717 | |
718 | ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { |
719 | Ty = useFirstFieldIfTransparentUnion(Ty); |
720 | |
721 | if (isAggregateTypeForABI(Ty)) { |
722 | |
723 | |
724 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
725 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
726 | |
727 | return getNaturalAlignIndirect(Ty); |
728 | } |
729 | |
730 | |
731 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
732 | Ty = EnumTy->getDecl()->getIntegerType(); |
733 | |
734 | ASTContext &Context = getContext(); |
735 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
736 | if (EIT->getNumBits() > |
737 | Context.getTypeSize(Context.getTargetInfo().hasInt128Type() |
738 | ? Context.Int128Ty |
739 | : Context.LongLongTy)) |
740 | return getNaturalAlignIndirect(Ty); |
741 | |
742 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
743 | : ABIArgInfo::getDirect()); |
744 | } |
745 | |
746 | ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { |
747 | if (RetTy->isVoidType()) |
748 | return ABIArgInfo::getIgnore(); |
749 | |
750 | if (isAggregateTypeForABI(RetTy)) |
751 | return getNaturalAlignIndirect(RetTy); |
752 | |
753 | |
754 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
755 | RetTy = EnumTy->getDecl()->getIntegerType(); |
756 | |
757 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
758 | if (EIT->getNumBits() > |
759 | getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type() |
760 | ? getContext().Int128Ty |
761 | : getContext().LongLongTy)) |
762 | return getNaturalAlignIndirect(RetTy); |
763 | |
764 | return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
765 | : ABIArgInfo::getDirect()); |
766 | } |
767 | |
768 | |
769 | |
770 | |
771 | |
772 | |
773 | |
774 | class WebAssemblyABIInfo final : public SwiftABIInfo { |
775 | public: |
776 | enum ABIKind { |
777 | MVP = 0, |
778 | ExperimentalMV = 1, |
779 | }; |
780 | |
781 | private: |
782 | DefaultABIInfo defaultInfo; |
783 | ABIKind Kind; |
784 | |
785 | public: |
786 | explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) |
787 | : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {} |
788 | |
789 | private: |
790 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
791 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
792 | |
793 | |
794 | |
795 | |
796 | void computeInfo(CGFunctionInfo &FI) const override { |
797 | if (!getCXXABI().classifyReturnType(FI)) |
798 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
799 | for (auto &Arg : FI.arguments()) |
800 | Arg.info = classifyArgumentType(Arg.type); |
801 | } |
802 | |
803 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
804 | QualType Ty) const override; |
805 | |
806 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
807 | bool asReturnValue) const override { |
808 | return occupiesMoreThan(CGT, scalars, 4); |
809 | } |
810 | |
811 | bool isSwiftErrorInRegister() const override { |
812 | return false; |
813 | } |
814 | }; |
815 | |
816 | class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { |
817 | public: |
818 | explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, |
819 | WebAssemblyABIInfo::ABIKind K) |
820 | : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {} |
821 | |
822 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
823 | CodeGen::CodeGenModule &CGM) const override { |
824 | TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
825 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
826 | if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) { |
827 | llvm::Function *Fn = cast<llvm::Function>(GV); |
828 | llvm::AttrBuilder B; |
829 | B.addAttribute("wasm-import-module", Attr->getImportModule()); |
830 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); |
831 | } |
832 | if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) { |
833 | llvm::Function *Fn = cast<llvm::Function>(GV); |
834 | llvm::AttrBuilder B; |
835 | B.addAttribute("wasm-import-name", Attr->getImportName()); |
836 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); |
837 | } |
838 | if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) { |
839 | llvm::Function *Fn = cast<llvm::Function>(GV); |
840 | llvm::AttrBuilder B; |
841 | B.addAttribute("wasm-export-name", Attr->getExportName()); |
842 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); |
843 | } |
844 | } |
845 | |
846 | if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
847 | llvm::Function *Fn = cast<llvm::Function>(GV); |
848 | if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype()) |
849 | Fn->addFnAttr("no-prototype"); |
850 | } |
851 | } |
852 | }; |
853 | |
854 | |
855 | ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { |
856 | Ty = useFirstFieldIfTransparentUnion(Ty); |
857 | |
858 | if (isAggregateTypeForABI(Ty)) { |
859 | |
860 | |
861 | if (auto RAA = getRecordArgABI(Ty, getCXXABI())) |
862 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
863 | |
864 | if (isEmptyRecord(getContext(), Ty, true)) |
865 | return ABIArgInfo::getIgnore(); |
866 | |
867 | |
868 | |
869 | if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) |
870 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); |
871 | |
872 | if (Kind == ABIKind::ExperimentalMV) { |
873 | const RecordType *RT = Ty->getAs<RecordType>(); |
874 | assert(RT); |
875 | bool HasBitField = false; |
876 | for (auto *Field : RT->getDecl()->fields()) { |
877 | if (Field->isBitField()) { |
878 | HasBitField = true; |
879 | break; |
880 | } |
881 | } |
882 | if (!HasBitField) |
883 | return ABIArgInfo::getExpand(); |
884 | } |
885 | } |
886 | |
887 | |
888 | return defaultInfo.classifyArgumentType(Ty); |
889 | } |
890 | |
891 | ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { |
892 | if (isAggregateTypeForABI(RetTy)) { |
893 | |
894 | |
895 | if (!getRecordArgABI(RetTy, getCXXABI())) { |
896 | |
897 | if (isEmptyRecord(getContext(), RetTy, true)) |
898 | return ABIArgInfo::getIgnore(); |
899 | |
900 | |
901 | |
902 | if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) |
903 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); |
904 | |
905 | if (Kind == ABIKind::ExperimentalMV) |
906 | return ABIArgInfo::getDirect(); |
907 | } |
908 | } |
909 | |
910 | |
911 | return defaultInfo.classifyReturnType(RetTy); |
912 | } |
913 | |
914 | Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
915 | QualType Ty) const { |
916 | bool IsIndirect = isAggregateTypeForABI(Ty) && |
917 | !isEmptyRecord(getContext(), Ty, true) && |
918 | !isSingleElementStruct(Ty, getContext()); |
919 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, |
920 | getContext().getTypeInfoInChars(Ty), |
921 | CharUnits::fromQuantity(4), |
922 | true); |
923 | } |
924 | |
925 | |
926 | |
927 | |
928 | |
929 | |
930 | |
931 | |
932 | class PNaClABIInfo : public ABIInfo { |
933 | public: |
934 | PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} |
935 | |
936 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
937 | ABIArgInfo classifyArgumentType(QualType RetTy) const; |
938 | |
939 | void computeInfo(CGFunctionInfo &FI) const override; |
940 | Address EmitVAArg(CodeGenFunction &CGF, |
941 | Address VAListAddr, QualType Ty) const override; |
942 | }; |
943 | |
944 | class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { |
945 | public: |
946 | PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) |
947 | : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {} |
948 | }; |
949 | |
950 | void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { |
951 | if (!getCXXABI().classifyReturnType(FI)) |
952 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
953 | |
954 | for (auto &I : FI.arguments()) |
955 | I.info = classifyArgumentType(I.type); |
956 | } |
957 | |
958 | Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
959 | QualType Ty) const { |
960 | |
961 | |
962 | |
963 | |
964 | |
965 | |
966 | return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); |
967 | } |
968 | |
969 | |
970 | ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { |
971 | if (isAggregateTypeForABI(Ty)) { |
972 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
973 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
974 | return getNaturalAlignIndirect(Ty); |
975 | } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { |
976 | |
977 | Ty = EnumTy->getDecl()->getIntegerType(); |
978 | } else if (Ty->isFloatingType()) { |
979 | |
980 | return ABIArgInfo::getDirect(); |
981 | } else if (const auto *EIT = Ty->getAs<ExtIntType>()) { |
982 | |
983 | if (EIT->getNumBits() > 64) |
984 | return getNaturalAlignIndirect(Ty); |
985 | return ABIArgInfo::getDirect(); |
986 | } |
987 | |
988 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
989 | : ABIArgInfo::getDirect()); |
990 | } |
991 | |
992 | ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { |
993 | if (RetTy->isVoidType()) |
994 | return ABIArgInfo::getIgnore(); |
995 | |
996 | |
997 | if (isAggregateTypeForABI(RetTy)) |
998 | return getNaturalAlignIndirect(RetTy); |
999 | |
1000 | |
1001 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) { |
1002 | if (EIT->getNumBits() > 64) |
1003 | return getNaturalAlignIndirect(RetTy); |
1004 | return ABIArgInfo::getDirect(); |
1005 | } |
1006 | |
1007 | |
1008 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
1009 | RetTy = EnumTy->getDecl()->getIntegerType(); |
1010 | |
1011 | return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
1012 | : ABIArgInfo::getDirect()); |
1013 | } |
1014 | |
1015 | |
1016 | bool IsX86_MMXType(llvm::Type *IRType) { |
1017 | |
1018 | return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && |
1019 | cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && |
1020 | IRType->getScalarSizeInBits() != 64; |
1021 | } |
1022 | |
1023 | static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, |
1024 | StringRef Constraint, |
1025 | llvm::Type* Ty) { |
1026 | bool IsMMXCons = llvm::StringSwitch<bool>(Constraint) |
1027 | .Cases("y", "&y", "^Ym", true) |
1028 | .Default(false); |
1029 | if (IsMMXCons && Ty->isVectorTy()) { |
1030 | if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() != |
1031 | 64) { |
1032 | |
1033 | return nullptr; |
1034 | } |
1035 | |
1036 | return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); |
1037 | } |
1038 | |
1039 | |
1040 | return Ty; |
1041 | } |
1042 | |
1043 | |
1044 | |
1045 | static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { |
1046 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
1047 | if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { |
1048 | if (BT->getKind() == BuiltinType::LongDouble) { |
1049 | if (&Context.getTargetInfo().getLongDoubleFormat() == |
1050 | &llvm::APFloat::x87DoubleExtended()) |
1051 | return false; |
1052 | } |
1053 | return true; |
1054 | } |
1055 | } else if (const VectorType *VT = Ty->getAs<VectorType>()) { |
1056 | |
1057 | |
1058 | unsigned VecSize = Context.getTypeSize(VT); |
1059 | if (VecSize == 128 || VecSize == 256 || VecSize == 512) |
1060 | return true; |
1061 | } |
1062 | return false; |
1063 | } |
1064 | |
1065 | |
1066 | |
1067 | static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { |
1068 | return NumMembers <= 4; |
1069 | } |
1070 | |
1071 | |
1072 | static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { |
1073 | auto AI = ABIArgInfo::getDirect(T); |
1074 | AI.setInReg(true); |
1075 | AI.setCanBeFlattened(false); |
1076 | return AI; |
1077 | } |
1078 | |
1079 | |
1080 | |
1081 | |
1082 | |
1083 | |
1084 | struct CCState { |
1085 | CCState(CGFunctionInfo &FI) |
1086 | : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {} |
1087 | |
1088 | llvm::SmallBitVector IsPreassigned; |
1089 | unsigned CC = CallingConv::CC_C; |
1090 | unsigned FreeRegs = 0; |
1091 | unsigned FreeSSERegs = 0; |
1092 | }; |
1093 | |
1094 | |
1095 | class X86_32ABIInfo : public SwiftABIInfo { |
1096 | enum Class { |
1097 | Integer, |
1098 | Float |
1099 | }; |
1100 | |
1101 | static const unsigned MinABIStackAlignInBytes = 4; |
1102 | |
1103 | bool IsDarwinVectorABI; |
1104 | bool IsRetSmallStructInRegABI; |
1105 | bool IsWin32StructABI; |
1106 | bool IsSoftFloatABI; |
1107 | bool IsMCUABI; |
1108 | bool IsLinuxABI; |
1109 | unsigned DefaultNumRegisterParameters; |
1110 | |
1111 | static bool isRegisterSize(unsigned Size) { |
1112 | return (Size == 8 || Size == 16 || Size == 32 || Size == 64); |
1113 | } |
1114 | |
1115 | bool isHomogeneousAggregateBaseType(QualType Ty) const override { |
1116 | |
1117 | return isX86VectorTypeForVectorCall(getContext(), Ty); |
1118 | } |
1119 | |
1120 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
1121 | uint64_t NumMembers) const override { |
1122 | |
1123 | return isX86VectorCallAggregateSmallEnough(NumMembers); |
1124 | } |
1125 | |
1126 | bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; |
1127 | |
1128 | |
1129 | |
1130 | ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; |
1131 | |
1132 | ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; |
1133 | |
1134 | |
1135 | unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; |
1136 | |
1137 | Class classify(QualType Ty) const; |
1138 | ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; |
1139 | ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; |
1140 | |
1141 | |
1142 | |
1143 | bool updateFreeRegs(QualType Ty, CCState &State) const; |
1144 | |
1145 | bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, |
1146 | bool &NeedsPadding) const; |
1147 | bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; |
1148 | |
1149 | bool canExpandIndirectArgument(QualType Ty) const; |
1150 | |
1151 | |
1152 | |
1153 | void rewriteWithInAlloca(CGFunctionInfo &FI) const; |
1154 | |
1155 | void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, |
1156 | CharUnits &StackOffset, ABIArgInfo &Info, |
1157 | QualType Type) const; |
1158 | void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const; |
1159 | |
1160 | public: |
1161 | |
1162 | void computeInfo(CGFunctionInfo &FI) const override; |
1163 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
1164 | QualType Ty) const override; |
1165 | |
1166 | X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, |
1167 | bool RetSmallStructInRegABI, bool Win32StructABI, |
1168 | unsigned NumRegisterParameters, bool SoftFloatABI) |
1169 | : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), |
1170 | IsRetSmallStructInRegABI(RetSmallStructInRegABI), |
1171 | IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI), |
1172 | IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), |
1173 | IsLinuxABI(CGT.getTarget().getTriple().isOSLinux()), |
1174 | DefaultNumRegisterParameters(NumRegisterParameters) {} |
1175 | |
1176 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
1177 | bool asReturnValue) const override { |
1178 | |
1179 | |
1180 | |
1181 | |
1182 | return occupiesMoreThan(CGT, scalars, 3); |
1183 | } |
1184 | |
1185 | bool isSwiftErrorInRegister() const override { |
1186 | |
1187 | return false; |
1188 | } |
1189 | }; |
1190 | |
1191 | class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { |
1192 | public: |
1193 | X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, |
1194 | bool RetSmallStructInRegABI, bool Win32StructABI, |
1195 | unsigned NumRegisterParameters, bool SoftFloatABI) |
1196 | : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>( |
1197 | CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, |
1198 | NumRegisterParameters, SoftFloatABI)) {} |
1199 | |
1200 | static bool isStructReturnInRegABI( |
1201 | const llvm::Triple &Triple, const CodeGenOptions &Opts); |
1202 | |
1203 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
1204 | CodeGen::CodeGenModule &CGM) const override; |
1205 | |
1206 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { |
1207 | |
1208 | if (CGM.getTarget().getTriple().isOSDarwin()) return 5; |
1209 | return 4; |
1210 | } |
1211 | |
1212 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
1213 | llvm::Value *Address) const override; |
1214 | |
1215 | llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, |
1216 | StringRef Constraint, |
1217 | llvm::Type* Ty) const override { |
1218 | return X86AdjustInlineAsmType(CGF, Constraint, Ty); |
1219 | } |
1220 | |
1221 | void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, |
1222 | std::string &Constraints, |
1223 | std::vector<llvm::Type *> &ResultRegTypes, |
1224 | std::vector<llvm::Type *> &ResultTruncRegTypes, |
1225 | std::vector<LValue> &ResultRegDests, |
1226 | std::string &AsmString, |
1227 | unsigned NumOutputs) const override; |
1228 | |
1229 | llvm::Constant * |
1230 | getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { |
1231 | unsigned Sig = (0xeb << 0) | |
1232 | (0x06 << 8) | |
1233 | ('v' << 16) | |
1234 | ('2' << 24); |
1235 | return llvm::ConstantInt::get(CGM.Int32Ty, Sig); |
1236 | } |
1237 | |
1238 | StringRef getARCRetainAutoreleasedReturnValueMarker() const override { |
1239 | return "movl\t%ebp, %ebp" |
1240 | "\t\t// marker for objc_retainAutoreleaseReturnValue"; |
1241 | } |
1242 | }; |
1243 | |
1244 | } |
1245 | |
1246 | |
1247 | |
1248 | |
1249 | |
1250 | |
1251 | |
1252 | |
1253 | |
1254 | static void rewriteInputConstraintReferences(unsigned FirstIn, |
1255 | unsigned NumNewOuts, |
1256 | std::string &AsmString) { |
1257 | std::string Buf; |
1258 | llvm::raw_string_ostream OS(Buf); |
1259 | size_t Pos = 0; |
1260 | while (Pos < AsmString.size()) { |
1261 | size_t DollarStart = AsmString.find('$', Pos); |
1262 | if (DollarStart == std::string::npos) |
1263 | DollarStart = AsmString.size(); |
1264 | size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); |
1265 | if (DollarEnd == std::string::npos) |
1266 | DollarEnd = AsmString.size(); |
1267 | OS << StringRef(&AsmString[Pos], DollarEnd - Pos); |
1268 | Pos = DollarEnd; |
1269 | size_t NumDollars = DollarEnd - DollarStart; |
1270 | if (NumDollars % 2 != 0 && Pos < AsmString.size()) { |
1271 | |
1272 | size_t DigitStart = Pos; |
1273 | if (AsmString[DigitStart] == '{') { |
1274 | OS << '{'; |
1275 | ++DigitStart; |
1276 | } |
1277 | size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); |
1278 | if (DigitEnd == std::string::npos) |
1279 | DigitEnd = AsmString.size(); |
1280 | StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); |
1281 | unsigned OperandIndex; |
1282 | if (!OperandStr.getAsInteger(10, OperandIndex)) { |
1283 | if (OperandIndex >= FirstIn) |
1284 | OperandIndex += NumNewOuts; |
1285 | OS << OperandIndex; |
1286 | } else { |
1287 | OS << OperandStr; |
1288 | } |
1289 | Pos = DigitEnd; |
1290 | } |
1291 | } |
1292 | AsmString = std::move(OS.str()); |
1293 | } |
1294 | |
1295 | |
1296 | void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( |
1297 | CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, |
1298 | std::vector<llvm::Type *> &ResultRegTypes, |
1299 | std::vector<llvm::Type *> &ResultTruncRegTypes, |
1300 | std::vector<LValue> &ResultRegDests, std::string &AsmString, |
1301 | unsigned NumOutputs) const { |
1302 | uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); |
1303 | |
1304 | |
1305 | |
1306 | if (!Constraints.empty()) |
1307 | Constraints += ','; |
1308 | if (RetWidth <= 32) { |
1309 | Constraints += "={eax}"; |
1310 | ResultRegTypes.push_back(CGF.Int32Ty); |
1311 | } else { |
1312 | |
1313 | Constraints += "=A"; |
1314 | ResultRegTypes.push_back(CGF.Int64Ty); |
1315 | } |
1316 | |
1317 | |
1318 | llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); |
1319 | ResultTruncRegTypes.push_back(CoerceTy); |
1320 | |
1321 | |
1322 | ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF), |
1323 | CoerceTy->getPointerTo())); |
1324 | ResultRegDests.push_back(ReturnSlot); |
1325 | |
1326 | rewriteInputConstraintReferences(NumOutputs, 1, AsmString); |
1327 | } |
1328 | |
1329 | |
1330 | |
1331 | bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, |
1332 | ASTContext &Context) const { |
1333 | uint64_t Size = Context.getTypeSize(Ty); |
1334 | |
1335 | |
1336 | |
1337 | if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) |
1338 | return false; |
1339 | |
1340 | if (Ty->isVectorType()) { |
1341 | |
1342 | |
1343 | if (Size == 64 || Size == 128) |
1344 | return false; |
1345 | |
1346 | return true; |
1347 | } |
1348 | |
1349 | |
1350 | |
1351 | if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || |
1352 | Ty->isAnyComplexType() || Ty->isEnumeralType() || |
1353 | Ty->isBlockPointerType() || Ty->isMemberPointerType()) |
1354 | return true; |
1355 | |
1356 | |
1357 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) |
1358 | return shouldReturnTypeInRegister(AT->getElementType(), Context); |
1359 | |
1360 | |
1361 | const RecordType *RT = Ty->getAs<RecordType>(); |
1362 | if (!RT) return false; |
1363 | |
1364 | |
1365 | |
1366 | |
1367 | |
1368 | for (const auto *FD : RT->getDecl()->fields()) { |
1369 | |
1370 | if (isEmptyField(Context, FD, true)) |
1371 | continue; |
1372 | |
1373 | |
1374 | if (!shouldReturnTypeInRegister(FD->getType(), Context)) |
1375 | return false; |
1376 | } |
1377 | return true; |
1378 | } |
1379 | |
1380 | static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { |
1381 | |
1382 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
1383 | Ty = CTy->getElementType(); |
1384 | |
1385 | |
1386 | |
1387 | |
1388 | if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && |
1389 | !Ty->isEnumeralType() && !Ty->isBlockPointerType()) |
1390 | return false; |
1391 | |
1392 | uint64_t Size = Context.getTypeSize(Ty); |
1393 | return Size == 32 || Size == 64; |
1394 | } |
1395 | |
1396 | static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, |
1397 | uint64_t &Size) { |
1398 | for (const auto *FD : RD->fields()) { |
1399 | |
1400 | |
1401 | |
1402 | if (!is32Or64BitBasicType(FD->getType(), Context)) |
1403 | return false; |
1404 | |
1405 | |
1406 | |
1407 | |
1408 | if (FD->isBitField()) |
1409 | return false; |
1410 | |
1411 | Size += Context.getTypeSize(FD->getType()); |
1412 | } |
1413 | return true; |
1414 | } |
1415 | |
1416 | static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, |
1417 | uint64_t &Size) { |
1418 | |
1419 | for (const CXXBaseSpecifier &Base : RD->bases()) { |
1420 | if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), |
1421 | Size)) |
1422 | return false; |
1423 | } |
1424 | if (!addFieldSizes(Context, RD, Size)) |
1425 | return false; |
1426 | return true; |
1427 | } |
1428 | |
1429 | |
1430 | |
1431 | |
1432 | |
1433 | bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { |
1434 | |
1435 | const RecordType *RT = Ty->getAs<RecordType>(); |
1436 | if (!RT) |
1437 | return false; |
1438 | const RecordDecl *RD = RT->getDecl(); |
1439 | uint64_t Size = 0; |
1440 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
1441 | if (!IsWin32StructABI) { |
1442 | |
1443 | |
1444 | if (!CXXRD->isCLike()) |
1445 | return false; |
1446 | } else { |
1447 | |
1448 | if (CXXRD->isDynamicClass()) |
1449 | return false; |
1450 | } |
1451 | if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) |
1452 | return false; |
1453 | } else { |
1454 | if (!addFieldSizes(getContext(), RD, Size)) |
1455 | return false; |
1456 | } |
1457 | |
1458 | |
1459 | return Size == getContext().getTypeSize(Ty); |
1460 | } |
1461 | |
1462 | ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { |
1463 | |
1464 | |
1465 | if (State.FreeRegs) { |
1466 | --State.FreeRegs; |
1467 | if (!IsMCUABI) |
1468 | return getNaturalAlignIndirectInReg(RetTy); |
1469 | } |
1470 | return getNaturalAlignIndirect(RetTy, false); |
1471 | } |
1472 | |
1473 | ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, |
1474 | CCState &State) const { |
1475 | if (RetTy->isVoidType()) |
1476 | return ABIArgInfo::getIgnore(); |
1477 | |
1478 | const Type *Base = nullptr; |
1479 | uint64_t NumElts = 0; |
1480 | if ((State.CC == llvm::CallingConv::X86_VectorCall || |
1481 | State.CC == llvm::CallingConv::X86_RegCall) && |
1482 | isHomogeneousAggregate(RetTy, Base, NumElts)) { |
1483 | |
1484 | return ABIArgInfo::getDirect(); |
1485 | } |
1486 | |
1487 | if (const VectorType *VT = RetTy->getAs<VectorType>()) { |
1488 | |
1489 | if (IsDarwinVectorABI) { |
1490 | uint64_t Size = getContext().getTypeSize(RetTy); |
1491 | |
1492 | |
1493 | |
1494 | |
1495 | if (Size == 128) |
1496 | return ABIArgInfo::getDirect(llvm::FixedVectorType::get( |
1497 | llvm::Type::getInt64Ty(getVMContext()), 2)); |
1498 | |
1499 | |
1500 | |
1501 | if ((Size == 8 || Size == 16 || Size == 32) || |
1502 | (Size == 64 && VT->getNumElements() == 1)) |
1503 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), |
1504 | Size)); |
1505 | |
1506 | return getIndirectReturnResult(RetTy, State); |
1507 | } |
1508 | |
1509 | return ABIArgInfo::getDirect(); |
1510 | } |
1511 | |
1512 | if (isAggregateTypeForABI(RetTy)) { |
1513 | if (const RecordType *RT = RetTy->getAs<RecordType>()) { |
1514 | |
1515 | if (RT->getDecl()->hasFlexibleArrayMember()) |
1516 | return getIndirectReturnResult(RetTy, State); |
1517 | } |
1518 | |
1519 | |
1520 | if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) |
1521 | return getIndirectReturnResult(RetTy, State); |
1522 | |
1523 | |
1524 | if (isEmptyRecord(getContext(), RetTy, true)) |
1525 | return ABIArgInfo::getIgnore(); |
1526 | |
1527 | |
1528 | |
1529 | if (shouldReturnTypeInRegister(RetTy, getContext())) { |
1530 | uint64_t Size = getContext().getTypeSize(RetTy); |
1531 | |
1532 | |
1533 | |
1534 | |
1535 | |
1536 | |
1537 | if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) |
1538 | if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) |
1539 | || SeltTy->hasPointerRepresentation()) |
1540 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); |
1541 | |
1542 | |
1543 | |
1544 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); |
1545 | } |
1546 | |
1547 | return getIndirectReturnResult(RetTy, State); |
1548 | } |
1549 | |
1550 | |
1551 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
1552 | RetTy = EnumTy->getDecl()->getIntegerType(); |
1553 | |
1554 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
1555 | if (EIT->getNumBits() > 64) |
1556 | return getIndirectReturnResult(RetTy, State); |
1557 | |
1558 | return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
1559 | : ABIArgInfo::getDirect()); |
1560 | } |
1561 | |
1562 | static bool isSIMDVectorType(ASTContext &Context, QualType Ty) { |
1563 | return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; |
1564 | } |
1565 | |
1566 | static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) { |
1567 | const RecordType *RT = Ty->getAs<RecordType>(); |
1568 | if (!RT) |
1569 | return 0; |
1570 | const RecordDecl *RD = RT->getDecl(); |
1571 | |
1572 | |
1573 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) |
1574 | for (const auto &I : CXXRD->bases()) |
1575 | if (!isRecordWithSIMDVectorType(Context, I.getType())) |
1576 | return false; |
1577 | |
1578 | for (const auto *i : RD->fields()) { |
1579 | QualType FT = i->getType(); |
1580 | |
1581 | if (isSIMDVectorType(Context, FT)) |
1582 | return true; |
1583 | |
1584 | if (isRecordWithSIMDVectorType(Context, FT)) |
1585 | return true; |
1586 | } |
1587 | |
1588 | return false; |
1589 | } |
1590 | |
1591 | unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, |
1592 | unsigned Align) const { |
1593 | |
1594 | |
1595 | if (Align <= MinABIStackAlignInBytes) |
1596 | return 0; |
1597 | |
1598 | if (IsLinuxABI) { |
1599 | |
1600 | |
1601 | |
1602 | |
1603 | if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64)) |
1604 | return Align; |
1605 | } |
1606 | |
1607 | if (!IsDarwinVectorABI) { |
1608 | |
1609 | return MinABIStackAlignInBytes; |
1610 | } |
1611 | |
1612 | |
1613 | if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) || |
1614 | isRecordWithSIMDVectorType(getContext(), Ty))) |
1615 | return 16; |
1616 | |
1617 | return MinABIStackAlignInBytes; |
1618 | } |
1619 | |
1620 | ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, |
1621 | CCState &State) const { |
1622 | if (!ByVal) { |
1623 | if (State.FreeRegs) { |
1624 | --State.FreeRegs; |
1625 | if (!IsMCUABI) |
1626 | return getNaturalAlignIndirectInReg(Ty); |
1627 | } |
1628 | return getNaturalAlignIndirect(Ty, false); |
1629 | } |
1630 | |
1631 | |
1632 | unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; |
1633 | unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); |
1634 | if (StackAlign == 0) |
1635 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), true); |
1636 | |
1637 | |
1638 | |
1639 | bool Realign = TypeAlign > StackAlign; |
1640 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), |
1641 | true, Realign); |
1642 | } |
1643 | |
1644 | X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { |
1645 | const Type *T = isSingleElementStruct(Ty, getContext()); |
1646 | if (!T) |
1647 | T = Ty.getTypePtr(); |
1648 | |
1649 | if (const BuiltinType *BT = T->getAs<BuiltinType>()) { |
1650 | BuiltinType::Kind K = BT->getKind(); |
1651 | if (K == BuiltinType::Float || K == BuiltinType::Double) |
1652 | return Float; |
1653 | } |
1654 | return Integer; |
1655 | } |
1656 | |
1657 | bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { |
1658 | if (!IsSoftFloatABI) { |
1659 | Class C = classify(Ty); |
1660 | if (C == Float) |
1661 | return false; |
1662 | } |
1663 | |
1664 | unsigned Size = getContext().getTypeSize(Ty); |
1665 | unsigned SizeInRegs = (Size + 31) / 32; |
1666 | |
1667 | if (SizeInRegs == 0) |
1668 | return false; |
1669 | |
1670 | if (!IsMCUABI) { |
1671 | if (SizeInRegs > State.FreeRegs) { |
1672 | State.FreeRegs = 0; |
1673 | return false; |
1674 | } |
1675 | } else { |
1676 | |
1677 | |
1678 | |
1679 | |
1680 | if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) |
1681 | return false; |
1682 | } |
1683 | |
1684 | State.FreeRegs -= SizeInRegs; |
1685 | return true; |
1686 | } |
1687 | |
1688 | bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, |
1689 | bool &InReg, |
1690 | bool &NeedsPadding) const { |
1691 | |
1692 | |
1693 | |
1694 | if (IsWin32StructABI && isAggregateTypeForABI(Ty)) |
1695 | return false; |
1696 | |
1697 | NeedsPadding = false; |
1698 | InReg = !IsMCUABI; |
1699 | |
1700 | if (!updateFreeRegs(Ty, State)) |
1701 | return false; |
1702 | |
1703 | if (IsMCUABI) |
1704 | return true; |
1705 | |
1706 | if (State.CC == llvm::CallingConv::X86_FastCall || |
1707 | State.CC == llvm::CallingConv::X86_VectorCall || |
1708 | State.CC == llvm::CallingConv::X86_RegCall) { |
1709 | if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) |
1710 | NeedsPadding = true; |
1711 | |
1712 | return false; |
1713 | } |
1714 | |
1715 | return true; |
1716 | } |
1717 | |
1718 | bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { |
1719 | if (!updateFreeRegs(Ty, State)) |
1720 | return false; |
1721 | |
1722 | if (IsMCUABI) |
1723 | return false; |
1724 | |
1725 | if (State.CC == llvm::CallingConv::X86_FastCall || |
1726 | State.CC == llvm::CallingConv::X86_VectorCall || |
1727 | State.CC == llvm::CallingConv::X86_RegCall) { |
1728 | if (getContext().getTypeSize(Ty) > 32) |
1729 | return false; |
1730 | |
1731 | return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || |
1732 | Ty->isReferenceType()); |
1733 | } |
1734 | |
1735 | return true; |
1736 | } |
1737 | |
1738 | void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const { |
1739 | |
1740 | |
1741 | |
1742 | |
1743 | |
1744 | |
1745 | |
1746 | |
1747 | |
1748 | MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); |
1749 | for (int I = 0, E = Args.size(); I < E; ++I) { |
1750 | const Type *Base = nullptr; |
1751 | uint64_t NumElts = 0; |
1752 | const QualType &Ty = Args[I].type; |
1753 | if ((Ty->isVectorType() || Ty->isBuiltinType()) && |
1754 | isHomogeneousAggregate(Ty, Base, NumElts)) { |
1755 | if (State.FreeSSERegs >= NumElts) { |
1756 | State.FreeSSERegs -= NumElts; |
1757 | Args[I].info = ABIArgInfo::getDirectInReg(); |
1758 | State.IsPreassigned.set(I); |
1759 | } |
1760 | } |
1761 | } |
1762 | } |
1763 | |
1764 | ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, |
1765 | CCState &State) const { |
1766 | |
1767 | bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall; |
1768 | bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall; |
1769 | bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall; |
1770 | |
1771 | Ty = useFirstFieldIfTransparentUnion(Ty); |
1772 | TypeInfo TI = getContext().getTypeInfo(Ty); |
1773 | |
1774 | |
1775 | const RecordType *RT = Ty->getAs<RecordType>(); |
1776 | if (RT) { |
1777 | CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); |
1778 | if (RAA == CGCXXABI::RAA_Indirect) { |
1779 | return getIndirectResult(Ty, false, State); |
1780 | } else if (RAA == CGCXXABI::RAA_DirectInMemory) { |
1781 | |
1782 | return ABIArgInfo::getInAlloca(0); |
1783 | } |
1784 | } |
1785 | |
1786 | |
1787 | |
1788 | const Type *Base = nullptr; |
1789 | uint64_t NumElts = 0; |
1790 | if ((IsRegCall || IsVectorCall) && |
1791 | isHomogeneousAggregate(Ty, Base, NumElts)) { |
1792 | if (State.FreeSSERegs >= NumElts) { |
1793 | State.FreeSSERegs -= NumElts; |
1794 | |
1795 | |
1796 | |
1797 | if (IsVectorCall) |
1798 | return getDirectX86Hva(); |
1799 | |
1800 | if (Ty->isBuiltinType() || Ty->isVectorType()) |
1801 | return ABIArgInfo::getDirect(); |
1802 | return ABIArgInfo::getExpand(); |
1803 | } |
1804 | return getIndirectResult(Ty, false, State); |
1805 | } |
1806 | |
1807 | if (isAggregateTypeForABI(Ty)) { |
1808 | |
1809 | |
1810 | if (RT && RT->getDecl()->hasFlexibleArrayMember()) |
1811 | return getIndirectResult(Ty, true, State); |
1812 | |
1813 | |
1814 | if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) |
1815 | return ABIArgInfo::getIgnore(); |
1816 | |
1817 | llvm::LLVMContext &LLVMContext = getVMContext(); |
1818 | llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); |
1819 | bool NeedsPadding = false; |
1820 | bool InReg; |
1821 | if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { |
1822 | unsigned SizeInRegs = (TI.Width + 31) / 32; |
1823 | SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); |
1824 | llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); |
1825 | if (InReg) |
1826 | return ABIArgInfo::getDirectInReg(Result); |
1827 | else |
1828 | return ABIArgInfo::getDirect(Result); |
1829 | } |
1830 | llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; |
1831 | |
1832 | |
1833 | |
1834 | if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32) |
1835 | return getIndirectResult(Ty, false, State); |
1836 | |
1837 | |
1838 | |
1839 | |
1840 | |
1841 | |
1842 | |
1843 | if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) && |
1844 | canExpandIndirectArgument(Ty)) |
1845 | return ABIArgInfo::getExpandWithPadding( |
1846 | IsFastCall || IsVectorCall || IsRegCall, PaddingType); |
1847 | |
1848 | return getIndirectResult(Ty, true, State); |
1849 | } |
1850 | |
1851 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
1852 | |
1853 | |
1854 | |
1855 | if (IsWin32StructABI) { |
1856 | if (TI.Width <= 512 && State.FreeSSERegs > 0) { |
1857 | --State.FreeSSERegs; |
1858 | return ABIArgInfo::getDirectInReg(); |
1859 | } |
1860 | return getIndirectResult(Ty, false, State); |
1861 | } |
1862 | |
1863 | |
1864 | |
1865 | if (IsDarwinVectorABI) { |
1866 | if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) || |
1867 | (TI.Width == 64 && VT->getNumElements() == 1)) |
1868 | return ABIArgInfo::getDirect( |
1869 | llvm::IntegerType::get(getVMContext(), TI.Width)); |
1870 | } |
1871 | |
1872 | if (IsX86_MMXType(CGT.ConvertType(Ty))) |
1873 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); |
1874 | |
1875 | return ABIArgInfo::getDirect(); |
1876 | } |
1877 | |
1878 | |
1879 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
1880 | Ty = EnumTy->getDecl()->getIntegerType(); |
1881 | |
1882 | bool InReg = shouldPrimitiveUseInReg(Ty, State); |
1883 | |
1884 | if (isPromotableIntegerTypeForABI(Ty)) { |
1885 | if (InReg) |
1886 | return ABIArgInfo::getExtendInReg(Ty); |
1887 | return ABIArgInfo::getExtend(Ty); |
1888 | } |
1889 | |
1890 | if (const auto * EIT = Ty->getAs<ExtIntType>()) { |
1891 | if (EIT->getNumBits() <= 64) { |
1892 | if (InReg) |
1893 | return ABIArgInfo::getDirectInReg(); |
1894 | return ABIArgInfo::getDirect(); |
1895 | } |
1896 | return getIndirectResult(Ty, false, State); |
1897 | } |
1898 | |
1899 | if (InReg) |
1900 | return ABIArgInfo::getDirectInReg(); |
1901 | return ABIArgInfo::getDirect(); |
1902 | } |
1903 | |
1904 | void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { |
1905 | CCState State(FI); |
1906 | if (IsMCUABI) |
1907 | State.FreeRegs = 3; |
1908 | else if (State.CC == llvm::CallingConv::X86_FastCall) { |
1909 | State.FreeRegs = 2; |
1910 | State.FreeSSERegs = 3; |
1911 | } else if (State.CC == llvm::CallingConv::X86_VectorCall) { |
1912 | State.FreeRegs = 2; |
1913 | State.FreeSSERegs = 6; |
1914 | } else if (FI.getHasRegParm()) |
1915 | State.FreeRegs = FI.getRegParm(); |
1916 | else if (State.CC == llvm::CallingConv::X86_RegCall) { |
1917 | State.FreeRegs = 5; |
1918 | State.FreeSSERegs = 8; |
1919 | } else if (IsWin32StructABI) { |
1920 | |
1921 | |
1922 | State.FreeRegs = DefaultNumRegisterParameters; |
1923 | State.FreeSSERegs = 3; |
1924 | } else |
1925 | State.FreeRegs = DefaultNumRegisterParameters; |
1926 | |
1927 | if (!::classifyReturnType(getCXXABI(), FI, *this)) { |
1928 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); |
1929 | } else if (FI.getReturnInfo().isIndirect()) { |
1930 | |
1931 | |
1932 | if (State.FreeRegs) { |
1933 | --State.FreeRegs; |
1934 | if (!IsMCUABI) |
1935 | FI.getReturnInfo().setInReg(true); |
1936 | } |
1937 | } |
1938 | |
1939 | |
1940 | if (FI.isChainCall()) |
1941 | ++State.FreeRegs; |
1942 | |
1943 | |
1944 | |
1945 | if (State.CC == llvm::CallingConv::X86_VectorCall) |
1946 | runVectorCallFirstPass(FI, State); |
1947 | |
1948 | bool UsedInAlloca = false; |
1949 | MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); |
1950 | for (int I = 0, E = Args.size(); I < E; ++I) { |
1951 | |
1952 | if (State.IsPreassigned.test(I)) |
1953 | continue; |
1954 | |
1955 | Args[I].info = classifyArgumentType(Args[I].type, State); |
1956 | UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca); |
1957 | } |
1958 | |
1959 | |
1960 | |
1961 | if (UsedInAlloca) |
1962 | rewriteWithInAlloca(FI); |
1963 | } |
1964 | |
1965 | void |
1966 | X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, |
1967 | CharUnits &StackOffset, ABIArgInfo &Info, |
1968 | QualType Type) const { |
1969 | |
1970 | CharUnits WordSize = CharUnits::fromQuantity(4); |
1971 | assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct"); |
1972 | |
1973 | |
1974 | |
1975 | |
1976 | bool IsIndirect = false; |
1977 | if (Info.isIndirect() && !Info.getIndirectByVal()) |
1978 | IsIndirect = true; |
1979 | Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect); |
1980 | llvm::Type *LLTy = CGT.ConvertTypeForMem(Type); |
1981 | if (IsIndirect) |
1982 | LLTy = LLTy->getPointerTo(0); |
1983 | FrameFields.push_back(LLTy); |
1984 | StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type); |
1985 | |
1986 | |
1987 | CharUnits FieldEnd = StackOffset; |
1988 | StackOffset = FieldEnd.alignTo(WordSize); |
1989 | if (StackOffset != FieldEnd) { |
1990 | CharUnits NumBytes = StackOffset - FieldEnd; |
1991 | llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); |
1992 | Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); |
1993 | FrameFields.push_back(Ty); |
1994 | } |
1995 | } |
1996 | |
1997 | static bool isArgInAlloca(const ABIArgInfo &Info) { |
1998 | |
1999 | switch (Info.getKind()) { |
2000 | case ABIArgInfo::InAlloca: |
2001 | return true; |
2002 | case ABIArgInfo::Ignore: |
2003 | case ABIArgInfo::IndirectAliased: |
2004 | return false; |
2005 | case ABIArgInfo::Indirect: |
2006 | case ABIArgInfo::Direct: |
2007 | case ABIArgInfo::Extend: |
2008 | return !Info.getInReg(); |
2009 | case ABIArgInfo::Expand: |
2010 | case ABIArgInfo::CoerceAndExpand: |
2011 | |
2012 | |
2013 | return true; |
2014 | } |
2015 | llvm_unreachable("invalid enum"); |
2016 | } |
2017 | |
2018 | void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { |
2019 | assert(IsWin32StructABI && "inalloca only supported on win32"); |
2020 | |
2021 | |
2022 | SmallVector<llvm::Type *, 6> FrameFields; |
2023 | |
2024 | |
2025 | CharUnits StackAlign = CharUnits::fromQuantity(4); |
2026 | |
2027 | CharUnits StackOffset; |
2028 | CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); |
2029 | |
2030 | |
2031 | bool IsThisCall = |
2032 | FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; |
2033 | ABIArgInfo &Ret = FI.getReturnInfo(); |
2034 | if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && |
2035 | isArgInAlloca(I->info)) { |
2036 | addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); |
2037 | ++I; |
2038 | } |
2039 | |
2040 | |
2041 | if (Ret.isIndirect() && !Ret.getInReg()) { |
2042 | addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType()); |
2043 | |
2044 | Ret.setInAllocaSRet(IsWin32StructABI); |
2045 | } |
2046 | |
2047 | |
2048 | if (IsThisCall) |
2049 | ++I; |
2050 | |
2051 | |
2052 | for (; I != E; ++I) { |
2053 | if (isArgInAlloca(I->info)) |
2054 | addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); |
2055 | } |
2056 | |
2057 | FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, |
2058 | true), |
2059 | StackAlign); |
2060 | } |
2061 | |
2062 | Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, |
2063 | Address VAListAddr, QualType Ty) const { |
2064 | |
2065 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); |
2066 | |
2067 | |
2068 | |
2069 | |
2070 | |
2071 | TypeInfo.Align = CharUnits::fromQuantity( |
2072 | getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity())); |
2073 | |
2074 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, |
2075 | TypeInfo, CharUnits::fromQuantity(4), |
2076 | true); |
2077 | } |
2078 | |
2079 | bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( |
2080 | const llvm::Triple &Triple, const CodeGenOptions &Opts) { |
2081 | assert(Triple.getArch() == llvm::Triple::x86); |
2082 | |
2083 | switch (Opts.getStructReturnConvention()) { |
2084 | case CodeGenOptions::SRCK_Default: |
2085 | break; |
2086 | case CodeGenOptions::SRCK_OnStack: |
2087 | return false; |
2088 | case CodeGenOptions::SRCK_InRegs: |
2089 | return true; |
2090 | } |
2091 | |
2092 | if (Triple.isOSDarwin() || Triple.isOSIAMCU()) |
2093 | return true; |
2094 | |
2095 | switch (Triple.getOS()) { |
2096 | case llvm::Triple::DragonFly: |
2097 | case llvm::Triple::FreeBSD: |
2098 | case llvm::Triple::OpenBSD: |
2099 | case llvm::Triple::Win32: |
2100 | return true; |
2101 | default: |
2102 | return false; |
2103 | } |
2104 | } |
2105 | |
2106 | static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, |
2107 | CodeGen::CodeGenModule &CGM) { |
2108 | if (!FD->hasAttr<AnyX86InterruptAttr>()) |
2109 | return; |
2110 | |
2111 | llvm::Function *Fn = cast<llvm::Function>(GV); |
2112 | Fn->setCallingConv(llvm::CallingConv::X86_INTR); |
2113 | if (FD->getNumParams() == 0) |
2114 | return; |
2115 | |
2116 | auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType()); |
2117 | llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType()); |
2118 | llvm::Attribute NewAttr = llvm::Attribute::getWithByValType( |
2119 | Fn->getContext(), ByValTy); |
2120 | Fn->addParamAttr(0, NewAttr); |
2121 | } |
2122 | |
2123 | void X86_32TargetCodeGenInfo::setTargetAttributes( |
2124 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
2125 | if (GV->isDeclaration()) |
2126 | return; |
2127 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
2128 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { |
2129 | llvm::Function *Fn = cast<llvm::Function>(GV); |
2130 | Fn->addFnAttr("stackrealign"); |
2131 | } |
2132 | |
2133 | addX86InterruptAttrs(FD, GV, CGM); |
2134 | } |
2135 | } |
2136 | |
2137 | bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( |
2138 | CodeGen::CodeGenFunction &CGF, |
2139 | llvm::Value *Address) const { |
2140 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
2141 | |
2142 | llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); |
2143 | |
2144 | |
2145 | |
2146 | |
2147 | AssignToArrayRange(Builder, Address, Four8, 0, 8); |
2148 | |
2149 | if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { |
2150 | |
2151 | |
2152 | |
2153 | llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); |
2154 | AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); |
2155 | |
2156 | } else { |
2157 | |
2158 | |
2159 | Builder.CreateAlignedStore( |
2160 | Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), |
2161 | CharUnits::One()); |
2162 | |
2163 | |
2164 | |
2165 | |
2166 | llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); |
2167 | AssignToArrayRange(Builder, Address, Twelve8, 11, 16); |
2168 | } |
2169 | |
2170 | return false; |
2171 | } |
2172 | |
2173 | |
2174 | |
2175 | |
2176 | |
2177 | |
2178 | namespace { |
2179 | |
2180 | enum class X86AVXABILevel { |
2181 | None, |
2182 | AVX, |
2183 | AVX512 |
2184 | }; |
2185 | |
2186 | |
2187 | static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { |
2188 | switch (AVXLevel) { |
2189 | case X86AVXABILevel::AVX512: |
2190 | return 512; |
2191 | case X86AVXABILevel::AVX: |
2192 | return 256; |
2193 | case X86AVXABILevel::None: |
2194 | return 128; |
2195 | } |
2196 | llvm_unreachable("Unknown AVXLevel"); |
2197 | } |
2198 | |
2199 | |
2200 | class X86_64ABIInfo : public SwiftABIInfo { |
2201 | enum Class { |
2202 | Integer = 0, |
2203 | SSE, |
2204 | SSEUp, |
2205 | X87, |
2206 | X87Up, |
2207 | ComplexX87, |
2208 | NoClass, |
2209 | Memory |
2210 | }; |
2211 | |
2212 | |
2213 | |
2214 | |
2215 | |
2216 | |
2217 | |
2218 | |
2219 | |
2220 | |
2221 | static Class merge(Class Accum, Class Field); |
2222 | |
2223 | |
2224 | |
2225 | |
2226 | |
2227 | |
2228 | |
2229 | |
2230 | |
2231 | |
2232 | |
2233 | |
2234 | |
2235 | |
2236 | |
2237 | void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; |
2238 | |
2239 | |
2240 | |
2241 | |
2242 | |
2243 | |
2244 | |
2245 | |
2246 | |
2247 | |
2248 | |
2249 | |
2250 | |
2251 | |
2252 | |
2253 | |
2254 | |
2255 | |
2256 | |
2257 | |
2258 | |
2259 | |
2260 | |
2261 | |
2262 | |
2263 | void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, |
2264 | bool isNamedArg) const; |
2265 | |
2266 | llvm::Type *GetByteVectorType(QualType Ty) const; |
2267 | llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, |
2268 | unsigned IROffset, QualType SourceTy, |
2269 | unsigned SourceOffset) const; |
2270 | llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, |
2271 | unsigned IROffset, QualType SourceTy, |
2272 | unsigned SourceOffset) const; |
2273 | |
2274 | |
2275 | |
2276 | ABIArgInfo getIndirectReturnResult(QualType Ty) const; |
2277 | |
2278 | |
2279 | |
2280 | |
2281 | |
2282 | |
2283 | ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; |
2284 | |
2285 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
2286 | |
2287 | ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, |
2288 | unsigned &neededInt, unsigned &neededSSE, |
2289 | bool isNamedArg) const; |
2290 | |
2291 | ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, |
2292 | unsigned &NeededSSE) const; |
2293 | |
2294 | ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, |
2295 | unsigned &NeededSSE) const; |
2296 | |
2297 | bool IsIllegalVectorType(QualType Ty) const; |
2298 | |
2299 | |
2300 | |
2301 | |
2302 | |
2303 | |
2304 | bool honorsRevision0_98() const { |
2305 | return !getTarget().getTriple().isOSDarwin(); |
2306 | } |
2307 | |
2308 | |
2309 | |
2310 | bool classifyIntegerMMXAsSSE() const { |
2311 | |
2312 | if (getContext().getLangOpts().getClangABICompat() <= |
2313 | LangOptions::ClangABI::Ver3_8) |
2314 | return false; |
2315 | |
2316 | const llvm::Triple &Triple = getTarget().getTriple(); |
2317 | if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) |
2318 | return false; |
2319 | if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) |
2320 | return false; |
2321 | return true; |
2322 | } |
2323 | |
2324 | |
2325 | bool passInt128VectorsInMem() const { |
2326 | |
2327 | if (getContext().getLangOpts().getClangABICompat() <= |
2328 | LangOptions::ClangABI::Ver9) |
2329 | return false; |
2330 | |
2331 | const llvm::Triple &T = getTarget().getTriple(); |
2332 | return T.isOSLinux() || T.isOSNetBSD(); |
2333 | } |
2334 | |
2335 | X86AVXABILevel AVXLevel; |
2336 | |
2337 | |
2338 | bool Has64BitPointers; |
2339 | |
2340 | public: |
2341 | X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : |
2342 | SwiftABIInfo(CGT), AVXLevel(AVXLevel), |
2343 | Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { |
2344 | } |
2345 | |
2346 | bool isPassedUsingAVXType(QualType type) const { |
2347 | unsigned neededInt, neededSSE; |
2348 | |
2349 | ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, |
2350 | true); |
2351 | if (info.isDirect()) { |
2352 | llvm::Type *ty = info.getCoerceToType(); |
2353 | if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) |
2354 | return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128; |
2355 | } |
2356 | return false; |
2357 | } |
2358 | |
2359 | void computeInfo(CGFunctionInfo &FI) const override; |
2360 | |
2361 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
2362 | QualType Ty) const override; |
2363 | Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
2364 | QualType Ty) const override; |
2365 | |
2366 | bool has64BitPointers() const { |
2367 | return Has64BitPointers; |
2368 | } |
2369 | |
2370 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
2371 | bool asReturnValue) const override { |
2372 | return occupiesMoreThan(CGT, scalars, 4); |
2373 | } |
2374 | bool isSwiftErrorInRegister() const override { |
2375 | return true; |
2376 | } |
2377 | }; |
2378 | |
2379 | |
2380 | class WinX86_64ABIInfo : public SwiftABIInfo { |
2381 | public: |
2382 | WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) |
2383 | : SwiftABIInfo(CGT), AVXLevel(AVXLevel), |
2384 | IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} |
2385 | |
2386 | void computeInfo(CGFunctionInfo &FI) const override; |
2387 | |
2388 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
2389 | QualType Ty) const override; |
2390 | |
2391 | bool isHomogeneousAggregateBaseType(QualType Ty) const override { |
2392 | |
2393 | return isX86VectorTypeForVectorCall(getContext(), Ty); |
2394 | } |
2395 | |
2396 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
2397 | uint64_t NumMembers) const override { |
2398 | |
2399 | return isX86VectorCallAggregateSmallEnough(NumMembers); |
2400 | } |
2401 | |
2402 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars, |
2403 | bool asReturnValue) const override { |
2404 | return occupiesMoreThan(CGT, scalars, 4); |
2405 | } |
2406 | |
2407 | bool isSwiftErrorInRegister() const override { |
2408 | return true; |
2409 | } |
2410 | |
2411 | private: |
2412 | ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, |
2413 | bool IsVectorCall, bool IsRegCall) const; |
2414 | ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs, |
2415 | const ABIArgInfo ¤t) const; |
2416 | |
2417 | X86AVXABILevel AVXLevel; |
2418 | |
2419 | bool IsMingw64; |
2420 | }; |
2421 | |
2422 | class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { |
2423 | public: |
2424 | X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) |
2425 | : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {} |
2426 | |
2427 | const X86_64ABIInfo &getABIInfo() const { |
2428 | return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); |
2429 | } |
2430 | |
2431 | |
2432 | |
2433 | bool markARCOptimizedReturnCallsAsNoTail() const override { return true; } |
2434 | |
2435 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { |
2436 | return 7; |
2437 | } |
2438 | |
2439 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
2440 | llvm::Value *Address) const override { |
2441 | llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); |
2442 | |
2443 | |
2444 | |
2445 | AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); |
2446 | return false; |
2447 | } |
2448 | |
2449 | llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, |
2450 | StringRef Constraint, |
2451 | llvm::Type* Ty) const override { |
2452 | return X86AdjustInlineAsmType(CGF, Constraint, Ty); |
2453 | } |
2454 | |
2455 | bool isNoProtoCallVariadic(const CallArgList &args, |
2456 | const FunctionNoProtoType *fnType) const override { |
2457 | |
2458 | |
2459 | |
2460 | |
2461 | |
2462 | |
2463 | if (fnType->getCallConv() == CC_C) { |
2464 | bool HasAVXType = false; |
2465 | for (CallArgList::const_iterator |
2466 | it = args.begin(), ie = args.end(); it != ie; ++it) { |
2467 | if (getABIInfo().isPassedUsingAVXType(it->Ty)) { |
2468 | HasAVXType = true; |
2469 | break; |
2470 | } |
2471 | } |
2472 | |
2473 | if (!HasAVXType) |
2474 | return true; |
2475 | } |
2476 | |
2477 | return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); |
2478 | } |
2479 | |
2480 | llvm::Constant * |
2481 | getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { |
2482 | unsigned Sig = (0xeb << 0) | |
2483 | (0x06 << 8) | |
2484 | ('v' << 16) | |
2485 | ('2' << 24); |
2486 | return llvm::ConstantInt::get(CGM.Int32Ty, Sig); |
2487 | } |
2488 | |
2489 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
2490 | CodeGen::CodeGenModule &CGM) const override { |
2491 | if (GV->isDeclaration()) |
2492 | return; |
2493 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
2494 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { |
2495 | llvm::Function *Fn = cast<llvm::Function>(GV); |
2496 | Fn->addFnAttr("stackrealign"); |
2497 | } |
2498 | |
2499 | addX86InterruptAttrs(FD, GV, CGM); |
2500 | } |
2501 | } |
2502 | |
2503 | void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, |
2504 | const FunctionDecl *Caller, |
2505 | const FunctionDecl *Callee, |
2506 | const CallArgList &Args) const override; |
2507 | }; |
2508 | |
2509 | static void initFeatureMaps(const ASTContext &Ctx, |
2510 | llvm::StringMap<bool> &CallerMap, |
2511 | const FunctionDecl *Caller, |
2512 | llvm::StringMap<bool> &CalleeMap, |
2513 | const FunctionDecl *Callee) { |
2514 | if (CalleeMap.empty() && CallerMap.empty()) { |
2515 | |
2516 | |
2517 | |
2518 | Ctx.getFunctionFeatureMap(CallerMap, Caller); |
2519 | Ctx.getFunctionFeatureMap(CalleeMap, Callee); |
2520 | } |
2521 | } |
2522 | |
2523 | static bool checkAVXParamFeature(DiagnosticsEngine &Diag, |
2524 | SourceLocation CallLoc, |
2525 | const llvm::StringMap<bool> &CallerMap, |
2526 | const llvm::StringMap<bool> &CalleeMap, |
2527 | QualType Ty, StringRef Feature, |
2528 | bool IsArgument) { |
2529 | bool CallerHasFeat = CallerMap.lookup(Feature); |
2530 | bool CalleeHasFeat = CalleeMap.lookup(Feature); |
2531 | if (!CallerHasFeat && !CalleeHasFeat) |
2532 | return Diag.Report(CallLoc, diag::warn_avx_calling_convention) |
2533 | << IsArgument << Ty << Feature; |
2534 | |
2535 | |
2536 | if (!CallerHasFeat || !CalleeHasFeat) |
2537 | return Diag.Report(CallLoc, diag::err_avx_calling_convention) |
2538 | << IsArgument << Ty << Feature; |
2539 | |
2540 | |
2541 | |
2542 | return false; |
2543 | } |
2544 | |
2545 | static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, |
2546 | SourceLocation CallLoc, |
2547 | const llvm::StringMap<bool> &CallerMap, |
2548 | const llvm::StringMap<bool> &CalleeMap, QualType Ty, |
2549 | bool IsArgument) { |
2550 | uint64_t Size = Ctx.getTypeSize(Ty); |
2551 | if (Size > 256) |
2552 | return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, |
2553 | "avx512f", IsArgument); |
2554 | |
2555 | if (Size > 128) |
2556 | return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx", |
2557 | IsArgument); |
2558 | |
2559 | return false; |
2560 | } |
2561 | |
2562 | void X86_64TargetCodeGenInfo::checkFunctionCallABI( |
2563 | CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, |
2564 | const FunctionDecl *Callee, const CallArgList &Args) const { |
2565 | llvm::StringMap<bool> CallerMap; |
2566 | llvm::StringMap<bool> CalleeMap; |
2567 | unsigned ArgIndex = 0; |
2568 | |
2569 | |
2570 | |
2571 | for (const CallArg &Arg : Args) { |
2572 | |
2573 | |
2574 | |
2575 | |
2576 | |
2577 | |
2578 | |
2579 | if (Arg.getType()->isVectorType() && |
2580 | CGM.getContext().getTypeSize(Arg.getType()) > 128) { |
2581 | initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); |
2582 | QualType Ty = Arg.getType(); |
2583 | |
2584 | |
2585 | if (ArgIndex < Callee->getNumParams()) |
2586 | Ty = Callee->getParamDecl(ArgIndex)->getType(); |
2587 | |
2588 | if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, |
2589 | CalleeMap, Ty, true)) |
2590 | return; |
2591 | } |
2592 | ++ArgIndex; |
2593 | } |
2594 | |
2595 | |
2596 | |
2597 | if (Callee->getReturnType()->isVectorType() && |
2598 | CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) { |
2599 | initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); |
2600 | checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, |
2601 | CalleeMap, Callee->getReturnType(), |
2602 | false); |
2603 | } |
2604 | } |
2605 | |
2606 | static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { |
2607 | |
2608 | |
2609 | |
2610 | bool Quote = (Lib.find(' ') != StringRef::npos); |
2611 | std::string ArgStr = Quote ? "\"" : ""; |
2612 | ArgStr += Lib; |
2613 | if (!Lib.endswith_insensitive(".lib") && !Lib.endswith_insensitive(".a")) |
2614 | ArgStr += ".lib"; |
2615 | ArgStr += Quote ? "\"" : ""; |
2616 | return ArgStr; |
2617 | } |
2618 | |
2619 | class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { |
2620 | public: |
2621 | WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, |
2622 | bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, |
2623 | unsigned NumRegisterParameters) |
2624 | : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, |
2625 | Win32StructABI, NumRegisterParameters, false) {} |
2626 | |
2627 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
2628 | CodeGen::CodeGenModule &CGM) const override; |
2629 | |
2630 | void getDependentLibraryOption(llvm::StringRef Lib, |
2631 | llvm::SmallString<24> &Opt) const override { |
2632 | Opt = "/DEFAULTLIB:"; |
2633 | Opt += qualifyWindowsLibrary(Lib); |
2634 | } |
2635 | |
2636 | void getDetectMismatchOption(llvm::StringRef Name, |
2637 | llvm::StringRef Value, |
2638 | llvm::SmallString<32> &Opt) const override { |
2639 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; |
2640 | } |
2641 | }; |
2642 | |
2643 | static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
2644 | CodeGen::CodeGenModule &CGM) { |
2645 | if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) { |
2646 | |
2647 | if (CGM.getCodeGenOpts().StackProbeSize != 4096) |
2648 | Fn->addFnAttr("stack-probe-size", |
2649 | llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); |
2650 | if (CGM.getCodeGenOpts().NoStackArgProbe) |
2651 | Fn->addFnAttr("no-stack-arg-probe"); |
2652 | } |
2653 | } |
2654 | |
2655 | void WinX86_32TargetCodeGenInfo::setTargetAttributes( |
2656 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
2657 | X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
2658 | if (GV->isDeclaration()) |
2659 | return; |
2660 | addStackProbeTargetAttributes(D, GV, CGM); |
2661 | } |
2662 | |
2663 | class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { |
2664 | public: |
2665 | WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, |
2666 | X86AVXABILevel AVXLevel) |
2667 | : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {} |
2668 | |
2669 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
2670 | CodeGen::CodeGenModule &CGM) const override; |
2671 | |
2672 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { |
2673 | return 7; |
2674 | } |
2675 | |
2676 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
2677 | llvm::Value *Address) const override { |
2678 | llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); |
2679 | |
2680 | |
2681 | |
2682 | AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); |
2683 | return false; |
2684 | } |
2685 | |
2686 | void getDependentLibraryOption(llvm::StringRef Lib, |
2687 | llvm::SmallString<24> &Opt) const override { |
2688 | Opt = "/DEFAULTLIB:"; |
2689 | Opt += qualifyWindowsLibrary(Lib); |
2690 | } |
2691 | |
2692 | void getDetectMismatchOption(llvm::StringRef Name, |
2693 | llvm::StringRef Value, |
2694 | llvm::SmallString<32> &Opt) const override { |
2695 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; |
2696 | } |
2697 | }; |
2698 | |
2699 | void WinX86_64TargetCodeGenInfo::setTargetAttributes( |
2700 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
2701 | TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
2702 | if (GV->isDeclaration()) |
2703 | return; |
2704 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
2705 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { |
2706 | llvm::Function *Fn = cast<llvm::Function>(GV); |
2707 | Fn->addFnAttr("stackrealign"); |
2708 | } |
2709 | |
2710 | addX86InterruptAttrs(FD, GV, CGM); |
2711 | } |
2712 | |
2713 | addStackProbeTargetAttributes(D, GV, CGM); |
2714 | } |
2715 | } |
2716 | |
2717 | void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, |
2718 | Class &Hi) const { |
2719 | |
2720 | |
2721 | |
2722 | |
2723 | |
2724 | |
2725 | |
2726 | |
2727 | |
2728 | |
2729 | |
2730 | |
2731 | |
2732 | |
2733 | |
2734 | |
2735 | |
2736 | |
2737 | |
2738 | |
2739 | |
2740 | if (Hi == Memory) |
2741 | Lo = Memory; |
2742 | if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) |
2743 | Lo = Memory; |
2744 | if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) |
2745 | Lo = Memory; |
2746 | if (Hi == SSEUp && Lo != SSE) |
2747 | Hi = SSE; |
2748 | } |
2749 | |
2750 | X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { |
2751 | |
2752 | |
2753 | |
2754 | |
2755 | |
2756 | |
2757 | |
2758 | |
2759 | |
2760 | |
2761 | |
2762 | |
2763 | |
2764 | |
2765 | |
2766 | |
2767 | |
2768 | |
2769 | |
2770 | |
2771 | |
2772 | |
2773 | |
2774 | assert((Accum != Memory && Accum != ComplexX87) && |
2775 | "Invalid accumulated classification during merge."); |
2776 | if (Accum == Field || Field == NoClass) |
2777 | return Accum; |
2778 | if (Field == Memory) |
2779 | return Memory; |
2780 | if (Accum == NoClass) |
2781 | return Field; |
2782 | if (Accum == Integer || Field == Integer) |
2783 | return Integer; |
2784 | if (Field == X87 || Field == X87Up || Field == ComplexX87 || |
2785 | Accum == X87 || Accum == X87Up) |
2786 | return Memory; |
2787 | return SSE; |
2788 | } |
2789 | |
2790 | void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, |
2791 | Class &Lo, Class &Hi, bool isNamedArg) const { |
2792 | |
2793 | |
2794 | |
2795 | |
2796 | |
2797 | |
2798 | |
2799 | |
2800 | Lo = Hi = NoClass; |
2801 | |
2802 | Class &Current = OffsetBase < 64 ? Lo : Hi; |
2803 | Current = Memory; |
2804 | |
2805 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
2806 | BuiltinType::Kind k = BT->getKind(); |
2807 | |
2808 | if (k == BuiltinType::Void) { |
2809 | Current = NoClass; |
2810 | } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { |
2811 | Lo = Integer; |
2812 | Hi = Integer; |
2813 | } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { |
2814 | Current = Integer; |
2815 | } else if (k == BuiltinType::Float || k == BuiltinType::Double) { |
2816 | Current = SSE; |
2817 | } else if (k == BuiltinType::LongDouble) { |
2818 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); |
2819 | if (LDF == &llvm::APFloat::IEEEquad()) { |
2820 | Lo = SSE; |
2821 | Hi = SSEUp; |
2822 | } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { |
2823 | Lo = X87; |
2824 | Hi = X87Up; |
2825 | } else if (LDF == &llvm::APFloat::IEEEdouble()) { |
2826 | Current = SSE; |
2827 | } else |
2828 | llvm_unreachable("unexpected long double representation!"); |
2829 | } |
2830 | |
2831 | |
2832 | return; |
2833 | } |
2834 | |
2835 | if (const EnumType *ET = Ty->getAs<EnumType>()) { |
2836 | |
2837 | classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); |
2838 | return; |
2839 | } |
2840 | |
2841 | if (Ty->hasPointerRepresentation()) { |
2842 | Current = Integer; |
2843 | return; |
2844 | } |
2845 | |
2846 | if (Ty->isMemberPointerType()) { |
2847 | if (Ty->isMemberFunctionPointerType()) { |
2848 | if (Has64BitPointers) { |
2849 | |
2850 | |
2851 | Lo = Hi = Integer; |
2852 | } else { |
2853 | |
2854 | |
2855 | uint64_t EB_FuncPtr = (OffsetBase) / 64; |
2856 | uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; |
2857 | if (EB_FuncPtr != EB_ThisAdj) { |
2858 | Lo = Hi = Integer; |
2859 | } else { |
2860 | Current = Integer; |
2861 | } |
2862 | } |
2863 | } else { |
2864 | Current = Integer; |
2865 | } |
2866 | return; |
2867 | } |
2868 | |
2869 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
2870 | uint64_t Size = getContext().getTypeSize(VT); |
2871 | if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { |
2872 | |
2873 | |
2874 | |
2875 | |
2876 | Current = Integer; |
2877 | |
2878 | |
2879 | |
2880 | uint64_t EB_Lo = (OffsetBase) / 64; |
2881 | uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; |
2882 | if (EB_Lo != EB_Hi) |
2883 | Hi = Lo; |
2884 | } else if (Size == 64) { |
2885 | QualType ElementType = VT->getElementType(); |
2886 | |
2887 | |
2888 | if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) |
2889 | return; |
2890 | |
2891 | |
2892 | |
2893 | |
2894 | if (!classifyIntegerMMXAsSSE() && |
2895 | (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || |
2896 | ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || |
2897 | ElementType->isSpecificBuiltinType(BuiltinType::Long) || |
2898 | ElementType->isSpecificBuiltinType(BuiltinType::ULong))) |
2899 | Current = Integer; |
2900 | else |
2901 | Current = SSE; |
2902 | |
2903 | |
2904 | |
2905 | if (OffsetBase && OffsetBase != 64) |
2906 | Hi = Lo; |
2907 | } else if (Size == 128 || |
2908 | (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { |
2909 | QualType ElementType = VT->getElementType(); |
2910 | |
2911 | |
2912 | if (passInt128VectorsInMem() && Size != 128 && |
2913 | (ElementType->isSpecificBuiltinType(BuiltinType::Int128) || |
2914 | ElementType->isSpecificBuiltinType(BuiltinType::UInt128))) |
2915 | return; |
2916 | |
2917 | |
2918 | |
2919 | |
2920 | |
2921 | |
2922 | |
2923 | |
2924 | |
2925 | |
2926 | |
2927 | |
2928 | |
2929 | |
2930 | |
2931 | Lo = SSE; |
2932 | Hi = SSEUp; |
2933 | } |
2934 | return; |
2935 | } |
2936 | |
2937 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
2938 | QualType ET = getContext().getCanonicalType(CT->getElementType()); |
2939 | |
2940 | uint64_t Size = getContext().getTypeSize(Ty); |
2941 | if (ET->isIntegralOrEnumerationType()) { |
2942 | if (Size <= 64) |
2943 | Current = Integer; |
2944 | else if (Size <= 128) |
2945 | Lo = Hi = Integer; |
2946 | } else if (ET == getContext().FloatTy) { |
2947 | Current = SSE; |
2948 | } else if (ET == getContext().DoubleTy) { |
2949 | Lo = Hi = SSE; |
2950 | } else if (ET == getContext().LongDoubleTy) { |
2951 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); |
2952 | if (LDF == &llvm::APFloat::IEEEquad()) |
2953 | Current = Memory; |
2954 | else if (LDF == &llvm::APFloat::x87DoubleExtended()) |
2955 | Current = ComplexX87; |
2956 | else if (LDF == &llvm::APFloat::IEEEdouble()) |
2957 | Lo = Hi = SSE; |
2958 | else |
2959 | llvm_unreachable("unexpected long double representation!"); |
2960 | } |
2961 | |
2962 | |
2963 | |
2964 | uint64_t EB_Real = (OffsetBase) / 64; |
2965 | uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; |
2966 | if (Hi == NoClass && EB_Real != EB_Imag) |
2967 | Hi = Lo; |
2968 | |
2969 | return; |
2970 | } |
2971 | |
2972 | if (const auto *EITy = Ty->getAs<ExtIntType>()) { |
2973 | if (EITy->getNumBits() <= 64) |
2974 | Current = Integer; |
2975 | else if (EITy->getNumBits() <= 128) |
2976 | Lo = Hi = Integer; |
2977 | |
2978 | return; |
2979 | } |
2980 | |
2981 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { |
2982 | |
2983 | |
2984 | uint64_t Size = getContext().getTypeSize(Ty); |
2985 | |
2986 | |
2987 | |
2988 | if (Size > 512) |
2989 | return; |
2990 | |
2991 | |
2992 | |
2993 | |
2994 | |
2995 | if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) |
2996 | return; |
2997 | |
2998 | |
2999 | |
3000 | Current = NoClass; |
3001 | uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); |
3002 | uint64_t ArraySize = AT->getSize().getZExtValue(); |
3003 | |
3004 | |
3005 | |
3006 | |
3007 | |
3008 | if (Size > 128 && |
3009 | (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) |
3010 | return; |
3011 | |
3012 | for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { |
3013 | Class FieldLo, FieldHi; |
3014 | classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); |
3015 | Lo = merge(Lo, FieldLo); |
3016 | Hi = merge(Hi, FieldHi); |
3017 | if (Lo == Memory || Hi == Memory) |
3018 | break; |
3019 | } |
3020 | |
3021 | postMerge(Size, Lo, Hi); |
3022 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); |
3023 | return; |
3024 | } |
3025 | |
3026 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
3027 | uint64_t Size = getContext().getTypeSize(Ty); |
3028 | |
3029 | |
3030 | |
3031 | if (Size > 512) |
3032 | return; |
3033 | |
3034 | |
3035 | |
3036 | |
3037 | if (getRecordArgABI(RT, getCXXABI())) |
3038 | return; |
3039 | |
3040 | const RecordDecl *RD = RT->getDecl(); |
3041 | |
3042 | |
3043 | if (RD->hasFlexibleArrayMember()) |
3044 | return; |
3045 | |
3046 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); |
3047 | |
3048 | |
3049 | Current = NoClass; |
3050 | |
3051 | |
3052 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
3053 | for (const auto &I : CXXRD->bases()) { |
3054 | assert(!I.isVirtual() && !I.getType()->isDependentType() && |
3055 | "Unexpected base class!"); |
3056 | const auto *Base = |
3057 | cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); |
3058 | |
3059 | |
3060 | |
3061 | |
3062 | |
3063 | |
3064 | Class FieldLo, FieldHi; |
3065 | uint64_t Offset = |
3066 | OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); |
3067 | classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); |
3068 | Lo = merge(Lo, FieldLo); |
3069 | Hi = merge(Hi, FieldHi); |
3070 | if (Lo == Memory || Hi == Memory) { |
3071 | postMerge(Size, Lo, Hi); |
3072 | return; |
3073 | } |
3074 | } |
3075 | } |
3076 | |
3077 | |
3078 | unsigned idx = 0; |
3079 | bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <= |
3080 | LangOptions::ClangABI::Ver11 || |
3081 | getContext().getTargetInfo().getTriple().isPS4(); |
3082 | bool IsUnion = RT->isUnionType() && !UseClang11Compat; |
3083 | |
3084 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); |
3085 | i != e; ++i, ++idx) { |
3086 | uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); |
3087 | bool BitField = i->isBitField(); |
3088 | |
3089 | |
3090 | if (BitField && i->isUnnamedBitfield()) |
3091 | continue; |
3092 | |
3093 | |
3094 | |
3095 | |
3096 | |
3097 | |
3098 | |
3099 | |
3100 | |
3101 | |
3102 | if (Size > 128 && |
3103 | ((!IsUnion && Size != getContext().getTypeSize(i->getType())) || |
3104 | Size > getNativeVectorSizeForAVXABI(AVXLevel))) { |
3105 | Lo = Memory; |
3106 | postMerge(Size, Lo, Hi); |
3107 | return; |
3108 | } |
3109 | |
3110 | if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { |
3111 | Lo = Memory; |
3112 | postMerge(Size, Lo, Hi); |
3113 | return; |
3114 | } |
3115 | |
3116 | |
3117 | |
3118 | |
3119 | |
3120 | |
3121 | |
3122 | Class FieldLo, FieldHi; |
3123 | |
3124 | |
3125 | |
3126 | |
3127 | if (BitField) { |
3128 | assert(!i->isUnnamedBitfield()); |
3129 | uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); |
3130 | uint64_t Size = i->getBitWidthValue(getContext()); |
3131 | |
3132 | uint64_t EB_Lo = Offset / 64; |
3133 | uint64_t EB_Hi = (Offset + Size - 1) / 64; |
3134 | |
3135 | if (EB_Lo) { |
3136 | assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); |
3137 | FieldLo = NoClass; |
3138 | FieldHi = Integer; |
3139 | } else { |
3140 | FieldLo = Integer; |
3141 | FieldHi = EB_Hi ? Integer : NoClass; |
3142 | } |
3143 | } else |
3144 | classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); |
3145 | Lo = merge(Lo, FieldLo); |
3146 | Hi = merge(Hi, FieldHi); |
3147 | if (Lo == Memory || Hi == Memory) |
3148 | break; |
3149 | } |
3150 | |
3151 | postMerge(Size, Lo, Hi); |
3152 | } |
3153 | } |
3154 | |
3155 | ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { |
3156 | |
3157 | |
3158 | if (!isAggregateTypeForABI(Ty)) { |
3159 | |
3160 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
3161 | Ty = EnumTy->getDecl()->getIntegerType(); |
3162 | |
3163 | if (Ty->isExtIntType()) |
3164 | return getNaturalAlignIndirect(Ty); |
3165 | |
3166 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
3167 | : ABIArgInfo::getDirect()); |
3168 | } |
3169 | |
3170 | return getNaturalAlignIndirect(Ty); |
3171 | } |
3172 | |
3173 | bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { |
3174 | if (const VectorType *VecTy = Ty->getAs<VectorType>()) { |
3175 | uint64_t Size = getContext().getTypeSize(VecTy); |
3176 | unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); |
3177 | if (Size <= 64 || Size > LargestVector) |
3178 | return true; |
3179 | QualType EltTy = VecTy->getElementType(); |
3180 | if (passInt128VectorsInMem() && |
3181 | (EltTy->isSpecificBuiltinType(BuiltinType::Int128) || |
3182 | EltTy->isSpecificBuiltinType(BuiltinType::UInt128))) |
3183 | return true; |
3184 | } |
3185 | |
3186 | return false; |
3187 | } |
3188 | |
3189 | ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, |
3190 | unsigned freeIntRegs) const { |
3191 | |
3192 | |
3193 | |
3194 | |
3195 | |
3196 | |
3197 | |
3198 | |
3199 | if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) && |
3200 | !Ty->isExtIntType()) { |
3201 | |
3202 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
3203 | Ty = EnumTy->getDecl()->getIntegerType(); |
3204 | |
3205 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
3206 | : ABIArgInfo::getDirect()); |
3207 | } |
3208 | |
3209 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
3210 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
3211 | |
3212 | |
3213 | |
3214 | unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); |
3215 | |
3216 | |
3217 | |
3218 | |
3219 | |
3220 | |
3221 | |
3222 | |
3223 | |
3224 | |
3225 | |
3226 | |
3227 | |
3228 | |
3229 | |
3230 | |
3231 | |
3232 | |
3233 | |
3234 | |
3235 | |
3236 | |
3237 | if (freeIntRegs == 0) { |
3238 | uint64_t Size = getContext().getTypeSize(Ty); |
3239 | |
3240 | |
3241 | |
3242 | if (Align == 8 && Size <= 64) |
3243 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), |
3244 | Size)); |
3245 | } |
3246 | |
3247 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); |
3248 | } |
3249 | |
3250 | |
3251 | |
3252 | llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { |
3253 | |
3254 | |
3255 | if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) |
3256 | Ty = QualType(InnerTy, 0); |
3257 | |
3258 | llvm::Type *IRType = CGT.ConvertType(Ty); |
3259 | if (isa<llvm::VectorType>(IRType)) { |
3260 | |
3261 | |
3262 | if (passInt128VectorsInMem() && |
3263 | cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) { |
3264 | |
3265 | uint64_t Size = getContext().getTypeSize(Ty); |
3266 | return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()), |
3267 | Size / 64); |
3268 | } |
3269 | |
3270 | return IRType; |
3271 | } |
3272 | |
3273 | if (IRType->getTypeID() == llvm::Type::FP128TyID) |
3274 | return IRType; |
3275 | |
3276 | |
3277 | uint64_t Size = getContext().getTypeSize(Ty); |
3278 | assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); |
3279 | |
3280 | |
3281 | |
3282 | return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()), |
3283 | Size / 64); |
3284 | } |
3285 | |
3286 | |
3287 | |
3288 | |
3289 | |
3290 | |
3291 | |
3292 | |
3293 | static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, |
3294 | unsigned EndBit, ASTContext &Context) { |
3295 | |
3296 | |
3297 | |
3298 | unsigned TySize = (unsigned)Context.getTypeSize(Ty); |
3299 | if (TySize <= StartBit) |
3300 | return true; |
3301 | |
3302 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { |
3303 | unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); |
3304 | unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); |
3305 | |
3306 | |
3307 | for (unsigned i = 0; i != NumElts; ++i) { |
3308 | |
3309 | unsigned EltOffset = i*EltSize; |
3310 | if (EltOffset >= EndBit) break; |
3311 | |
3312 | unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; |
3313 | if (!BitsContainNoUserData(AT->getElementType(), EltStart, |
3314 | EndBit-EltOffset, Context)) |
3315 | return false; |
3316 | } |
3317 | |
3318 | return true; |
3319 | } |
3320 | |
3321 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
3322 | const RecordDecl *RD = RT->getDecl(); |
3323 | const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); |
3324 | |
3325 | |
3326 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
3327 | for (const auto &I : CXXRD->bases()) { |
3328 | assert(!I.isVirtual() && !I.getType()->isDependentType() && |
3329 | "Unexpected base class!"); |
3330 | const auto *Base = |
3331 | cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); |
3332 | |
3333 | |
3334 | unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); |
3335 | if (BaseOffset >= EndBit) continue; |
3336 | |
3337 | unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; |
3338 | if (!BitsContainNoUserData(I.getType(), BaseStart, |
3339 | EndBit-BaseOffset, Context)) |
3340 | return false; |
3341 | } |
3342 | } |
3343 | |
3344 | |
3345 | |
3346 | |
3347 | |
3348 | unsigned idx = 0; |
3349 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); |
3350 | i != e; ++i, ++idx) { |
3351 | unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); |
3352 | |
3353 | |
3354 | if (FieldOffset >= EndBit) break; |
3355 | |
3356 | unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; |
3357 | if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, |
3358 | Context)) |
3359 | return false; |
3360 | } |
3361 | |
3362 | |
3363 | |
3364 | return true; |
3365 | } |
3366 | |
3367 | return false; |
3368 | } |
3369 | |
3370 | |
3371 | |
3372 | |
3373 | |
3374 | static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, |
3375 | const llvm::DataLayout &TD) { |
3376 | |
3377 | if (IROffset == 0 && IRType->isFloatTy()) |
3378 | return true; |
3379 | |
3380 | |
3381 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { |
3382 | const llvm::StructLayout *SL = TD.getStructLayout(STy); |
3383 | unsigned Elt = SL->getElementContainingOffset(IROffset); |
3384 | IROffset -= SL->getElementOffset(Elt); |
3385 | return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); |
3386 | } |
3387 | |
3388 | |
3389 | if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { |
3390 | llvm::Type *EltTy = ATy->getElementType(); |
3391 | unsigned EltSize = TD.getTypeAllocSize(EltTy); |
3392 | IROffset -= IROffset/EltSize*EltSize; |
3393 | return ContainsFloatAtOffset(EltTy, IROffset, TD); |
3394 | } |
3395 | |
3396 | return false; |
3397 | } |
3398 | |
3399 | |
3400 | |
3401 | |
3402 | llvm::Type *X86_64ABIInfo:: |
3403 | GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, |
3404 | QualType SourceTy, unsigned SourceOffset) const { |
3405 | |
3406 | |
3407 | |
3408 | if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, |
3409 | SourceOffset*8+64, getContext())) |
3410 | return llvm::Type::getFloatTy(getVMContext()); |
3411 | |
3412 | |
3413 | |
3414 | |
3415 | if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && |
3416 | ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) |
3417 | return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()), |
3418 | 2); |
3419 | |
3420 | return llvm::Type::getDoubleTy(getVMContext()); |
3421 | } |
3422 | |
3423 | |
3424 | |
3425 | |
3426 | |
3427 | |
3428 | |
3429 | |
3430 | |
3431 | |
3432 | |
3433 | |
3434 | |
3435 | |
3436 | |
3437 | |
3438 | llvm::Type *X86_64ABIInfo:: |
3439 | GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, |
3440 | QualType SourceTy, unsigned SourceOffset) const { |
3441 | |
3442 | |
3443 | if (IROffset == 0) { |
3444 | |
3445 | if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || |
3446 | IRType->isIntegerTy(64)) |
3447 | return IRType; |
3448 | |
3449 | |
3450 | |
3451 | |
3452 | |
3453 | |
3454 | |
3455 | if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || |
3456 | IRType->isIntegerTy(32) || |
3457 | (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { |
3458 | unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : |
3459 | cast<llvm::IntegerType>(IRType)->getBitWidth(); |
3460 | |
3461 | if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, |
3462 | SourceOffset*8+64, getContext())) |
3463 | return IRType; |
3464 | } |
3465 | } |
3466 | |
3467 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { |
3468 | |
3469 | const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); |
3470 | if (IROffset < SL->getSizeInBytes()) { |
3471 | unsigned FieldIdx = SL->getElementContainingOffset(IROffset); |
3472 | IROffset -= SL->getElementOffset(FieldIdx); |
3473 | |
3474 | return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, |
3475 | SourceTy, SourceOffset); |
3476 | } |
3477 | } |
3478 | |
3479 | if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { |
3480 | llvm::Type *EltTy = ATy->getElementType(); |
3481 | unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); |
3482 | unsigned EltOffset = IROffset/EltSize*EltSize; |
3483 | return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, |
3484 | SourceOffset); |
3485 | } |
3486 | |
3487 | |
3488 | |
3489 | unsigned TySizeInBytes = |
3490 | (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); |
3491 | |
3492 | assert(TySizeInBytes != SourceOffset && "Empty field?"); |
3493 | |
3494 | |
3495 | |
3496 | return llvm::IntegerType::get(getVMContext(), |
3497 | std::min(TySizeInBytes-SourceOffset, 8U)*8); |
3498 | } |
3499 | |
3500 | |
3501 | |
3502 | |
3503 | |
3504 | |
3505 | |
3506 | static llvm::Type * |
3507 | GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, |
3508 | const llvm::DataLayout &TD) { |
3509 | |
3510 | |
3511 | |
3512 | |
3513 | unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); |
3514 | unsigned HiAlign = TD.getABITypeAlignment(Hi); |
3515 | unsigned HiStart = llvm::alignTo(LoSize, HiAlign); |
3516 | assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); |
3517 | |
3518 | |
3519 | |
3520 | |
3521 | |
3522 | if (HiStart != 8) { |
3523 | |
3524 | |
3525 | |
3526 | |
3527 | |
3528 | if (Lo->isFloatTy()) |
3529 | Lo = llvm::Type::getDoubleTy(Lo->getContext()); |
3530 | else { |
3531 | assert((Lo->isIntegerTy() || Lo->isPointerTy()) |
3532 | && "Invalid/unknown lo type"); |
3533 | Lo = llvm::Type::getInt64Ty(Lo->getContext()); |
3534 | } |
3535 | } |
3536 | |
3537 | llvm::StructType *Result = llvm::StructType::get(Lo, Hi); |
3538 | |
3539 | |
3540 | assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && |
3541 | "Invalid x86-64 argument pair!"); |
3542 | return Result; |
3543 | } |
3544 | |
3545 | ABIArgInfo X86_64ABIInfo:: |
3546 | classifyReturnType(QualType RetTy) const { |
3547 | |
3548 | |
3549 | X86_64ABIInfo::Class Lo, Hi; |
3550 | classify(RetTy, 0, Lo, Hi, true); |
3551 | |
3552 | |
3553 | assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); |
3554 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); |
3555 | |
3556 | llvm::Type *ResType = nullptr; |
3557 | switch (Lo) { |
3558 | case NoClass: |
3559 | if (Hi == NoClass) |
3560 | return ABIArgInfo::getIgnore(); |
3561 | |
3562 | |
3563 | assert((Hi == SSE || Hi == Integer || Hi == X87Up) && |
3564 | "Unknown missing lo part"); |
3565 | break; |
3566 | |
3567 | case SSEUp: |
3568 | case X87Up: |
3569 | llvm_unreachable("Invalid classification for lo word."); |
3570 | |
3571 | |
3572 | |
3573 | case Memory: |
3574 | return getIndirectReturnResult(RetTy); |
3575 | |
3576 | |
3577 | |
3578 | case Integer: |
3579 | ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); |
3580 | |
3581 | |
3582 | |
3583 | if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { |
3584 | |
3585 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
3586 | RetTy = EnumTy->getDecl()->getIntegerType(); |
3587 | |
3588 | if (RetTy->isIntegralOrEnumerationType() && |
3589 | isPromotableIntegerTypeForABI(RetTy)) |
3590 | return ABIArgInfo::getExtend(RetTy); |
3591 | } |
3592 | break; |
3593 | |
3594 | |
3595 | |
3596 | case SSE: |
3597 | ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); |
3598 | break; |
3599 | |
3600 | |
3601 | |
3602 | case X87: |
3603 | ResType = llvm::Type::getX86_FP80Ty(getVMContext()); |
3604 | break; |
3605 | |
3606 | |
3607 | |
3608 | |
3609 | case ComplexX87: |
3610 | assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); |
3611 | ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), |
3612 | llvm::Type::getX86_FP80Ty(getVMContext())); |
3613 | break; |
3614 | } |
3615 | |
3616 | llvm::Type *HighPart = nullptr; |
3617 | switch (Hi) { |
3618 | |
3619 | |
3620 | case Memory: |
3621 | case X87: |
3622 | llvm_unreachable("Invalid classification for hi word."); |
3623 | |
3624 | case ComplexX87: |
3625 | case NoClass: |
3626 | break; |
3627 | |
3628 | case Integer: |
3629 | HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); |
3630 | if (Lo == NoClass) |
3631 | return ABIArgInfo::getDirect(HighPart, 8); |
3632 | break; |
3633 | case SSE: |
3634 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); |
3635 | if (Lo == NoClass) |
3636 | return ABIArgInfo::getDirect(HighPart, 8); |
3637 | break; |
3638 | |
3639 | |
3640 | |
3641 | |
3642 | |
3643 | |
3644 | case SSEUp: |
3645 | assert(Lo == SSE && "Unexpected SSEUp classification."); |
3646 | ResType = GetByteVectorType(RetTy); |
3647 | break; |
3648 | |
3649 | |
3650 | |
3651 | case X87Up: |
3652 | |
3653 | |
3654 | |
3655 | |
3656 | if (Lo != X87) { |
3657 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); |
3658 | if (Lo == NoClass) |
3659 | return ABIArgInfo::getDirect(HighPart, 8); |
3660 | } |
3661 | break; |
3662 | } |
3663 | |
3664 | |
3665 | |
3666 | |
3667 | if (HighPart) |
3668 | ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); |
3669 | |
3670 | return ABIArgInfo::getDirect(ResType); |
3671 | } |
3672 | |
3673 | ABIArgInfo X86_64ABIInfo::classifyArgumentType( |
3674 | QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, |
3675 | bool isNamedArg) |
3676 | const |
3677 | { |
3678 | Ty = useFirstFieldIfTransparentUnion(Ty); |
3679 | |
3680 | X86_64ABIInfo::Class Lo, Hi; |
3681 | classify(Ty, 0, Lo, Hi, isNamedArg); |
3682 | |
3683 | |
3684 | |
3685 | assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); |
3686 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); |
3687 | |
3688 | neededInt = 0; |
3689 | neededSSE = 0; |
3690 | llvm::Type *ResType = nullptr; |
3691 | switch (Lo) { |
3692 | case NoClass: |
3693 | if (Hi == NoClass) |
3694 | return ABIArgInfo::getIgnore(); |
3695 | |
3696 | |
3697 | assert((Hi == SSE || Hi == Integer || Hi == X87Up) && |
3698 | "Unknown missing lo part"); |
3699 | break; |
3700 | |
3701 | |
3702 | |
3703 | case Memory: |
3704 | |
3705 | |
3706 | |
3707 | case X87: |
3708 | case ComplexX87: |
3709 | if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) |
3710 | ++neededInt; |
3711 | return getIndirectResult(Ty, freeIntRegs); |
3712 | |
3713 | case SSEUp: |
3714 | case X87Up: |
3715 | llvm_unreachable("Invalid classification for lo word."); |
3716 | |
3717 | |
3718 | |
3719 | |
3720 | case Integer: |
3721 | ++neededInt; |
3722 | |
3723 | |
3724 | ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); |
3725 | |
3726 | |
3727 | |
3728 | if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { |
3729 | |
3730 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
3731 | Ty = EnumTy->getDecl()->getIntegerType(); |
3732 | |
3733 | if (Ty->isIntegralOrEnumerationType() && |
3734 | isPromotableIntegerTypeForABI(Ty)) |
3735 | return ABIArgInfo::getExtend(Ty); |
3736 | } |
3737 | |
3738 | break; |
3739 | |
3740 | |
3741 | |
3742 | |
3743 | case SSE: { |
3744 | llvm::Type *IRType = CGT.ConvertType(Ty); |
3745 | ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); |
3746 | ++neededSSE; |
3747 | break; |
3748 | } |
3749 | } |
3750 | |
3751 | llvm::Type *HighPart = nullptr; |
3752 | switch (Hi) { |
3753 | |
3754 | |
3755 | |
3756 | case Memory: |
3757 | case X87: |
3758 | case ComplexX87: |
3759 | llvm_unreachable("Invalid classification for hi word."); |
3760 | |
3761 | case NoClass: break; |
3762 | |
3763 | case Integer: |
3764 | ++neededInt; |
3765 | |
3766 | HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); |
3767 | |
3768 | if (Lo == NoClass) |
3769 | return ABIArgInfo::getDirect(HighPart, 8); |
3770 | break; |
3771 | |
3772 | |
3773 | |
3774 | case X87Up: |
3775 | case SSE: |
3776 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); |
3777 | |
3778 | if (Lo == NoClass) |
3779 | return ABIArgInfo::getDirect(HighPart, 8); |
3780 | |
3781 | ++neededSSE; |
3782 | break; |
3783 | |
3784 | |
3785 | |
3786 | |
3787 | case SSEUp: |
3788 | assert(Lo == SSE && "Unexpected SSEUp classification"); |
3789 | ResType = GetByteVectorType(Ty); |
3790 | break; |
3791 | } |
3792 | |
3793 | |
3794 | |
3795 | |
3796 | if (HighPart) |
3797 | ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); |
3798 | |
3799 | return ABIArgInfo::getDirect(ResType); |
3800 | } |
3801 | |
3802 | ABIArgInfo |
3803 | X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, |
3804 | unsigned &NeededSSE) const { |
3805 | auto RT = Ty->getAs<RecordType>(); |
| 12 | | Assuming the object is not a 'RecordType' | |
|
| 13 | | 'RT' initialized to a null pointer value | |
|
3806 | assert(RT && "classifyRegCallStructType only valid with struct types"); |
3807 | |
3808 | if (RT->getDecl()->hasFlexibleArrayMember()) |
| 14 | | Called C++ object pointer is null |
|
3809 | return getIndirectReturnResult(Ty); |
3810 | |
3811 | |
3812 | if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { |
3813 | if (CXXRD->isDynamicClass()) { |
3814 | NeededInt = NeededSSE = 0; |
3815 | return getIndirectReturnResult(Ty); |
3816 | } |
3817 | |
3818 | for (const auto &I : CXXRD->bases()) |
3819 | if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE) |
3820 | .isIndirect()) { |
3821 | NeededInt = NeededSSE = 0; |
3822 | return getIndirectReturnResult(Ty); |
3823 | } |
3824 | } |
3825 | |
3826 | |
3827 | for (const auto *FD : RT->getDecl()->fields()) { |
3828 | if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) { |
3829 | if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE) |
3830 | .isIndirect()) { |
3831 | NeededInt = NeededSSE = 0; |
3832 | return getIndirectReturnResult(Ty); |
3833 | } |
3834 | } else { |
3835 | unsigned LocalNeededInt, LocalNeededSSE; |
3836 | if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt, |
3837 | LocalNeededSSE, true) |
3838 | .isIndirect()) { |
3839 | NeededInt = NeededSSE = 0; |
3840 | return getIndirectReturnResult(Ty); |
3841 | } |
3842 | NeededInt += LocalNeededInt; |
3843 | NeededSSE += LocalNeededSSE; |
3844 | } |
3845 | } |
3846 | |
3847 | return ABIArgInfo::getDirect(); |
3848 | } |
3849 | |
3850 | ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty, |
3851 | unsigned &NeededInt, |
3852 | unsigned &NeededSSE) const { |
3853 | |
3854 | NeededInt = 0; |
3855 | NeededSSE = 0; |
3856 | |
3857 | return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE); |
| 11 | | Calling 'X86_64ABIInfo::classifyRegCallStructTypeImpl' | |
|
3858 | } |
3859 | |
3860 | void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { |
3861 | |
3862 | const unsigned CallingConv = FI.getCallingConvention(); |
3863 | |
3864 | |
3865 | |
3866 | if (CallingConv == llvm::CallingConv::Win64) { |
| 1 | Assuming 'CallingConv' is not equal to Win64 | |
|
| |
3867 | WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel); |
3868 | Win64ABIInfo.computeInfo(FI); |
3869 | return; |
3870 | } |
3871 | |
3872 | bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; |
| 3 | | Assuming 'CallingConv' is equal to X86_RegCall | |
|
3873 | |
3874 | |
3875 | unsigned FreeIntRegs = IsRegCall ? 11 : 6; |
| |
3876 | unsigned FreeSSERegs = IsRegCall ? 16 : 8; |
| |
3877 | unsigned NeededInt, NeededSSE; |
3878 | |
3879 | if (!::classifyReturnType(getCXXABI(), FI, *this)) { |
| 6 | | Assuming the condition is true | |
|
| |
3880 | if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && |
| |
3881 | !FI.getReturnType()->getTypePtr()->isUnionType()) { |
| 8 | | Assuming the condition is true | |
|
3882 | FI.getReturnInfo() = |
3883 | classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE); |
| 10 | | Calling 'X86_64ABIInfo::classifyRegCallStructType' | |
|
3884 | if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { |
3885 | FreeIntRegs -= NeededInt; |
3886 | FreeSSERegs -= NeededSSE; |
3887 | } else { |
3888 | FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); |
3889 | } |
3890 | } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() && |
3891 | getContext().getCanonicalType(FI.getReturnType() |
3892 | ->getAs<ComplexType>() |
3893 | ->getElementType()) == |
3894 | getContext().LongDoubleTy) |
3895 | |
3896 | |
3897 | FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); |
3898 | else |
3899 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
3900 | } |
3901 | |
3902 | |
3903 | |
3904 | if (FI.getReturnInfo().isIndirect()) |
3905 | --FreeIntRegs; |
3906 | |
3907 | |
3908 | if (FI.isChainCall()) |
3909 | ++FreeIntRegs; |
3910 | |
3911 | unsigned NumRequiredArgs = FI.getNumRequiredArgs(); |
3912 | |
3913 | |
3914 | unsigned ArgNo = 0; |
3915 | for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); |
3916 | it != ie; ++it, ++ArgNo) { |
3917 | bool IsNamedArg = ArgNo < NumRequiredArgs; |
3918 | |
3919 | if (IsRegCall && it->type->isStructureOrClassType()) |
3920 | it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); |
3921 | else |
3922 | it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, |
3923 | NeededSSE, IsNamedArg); |
3924 | |
3925 | |
3926 | |
3927 | |
3928 | |
3929 | if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { |
3930 | FreeIntRegs -= NeededInt; |
3931 | FreeSSERegs -= NeededSSE; |
3932 | } else { |
3933 | it->info = getIndirectResult(it->type, FreeIntRegs); |
3934 | } |
3935 | } |
3936 | } |
3937 | |
3938 | static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, |
3939 | Address VAListAddr, QualType Ty) { |
3940 | Address overflow_arg_area_p = |
3941 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); |
3942 | llvm::Value *overflow_arg_area = |
3943 | CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); |
3944 | |
3945 | |
3946 | |
3947 | |
3948 | |
3949 | CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); |
3950 | if (Align > CharUnits::fromQuantity(8)) { |
3951 | overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, |
3952 | Align); |
3953 | } |
3954 | |
3955 | |
3956 | llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); |
3957 | llvm::Value *Res = |
3958 | CGF.Builder.CreateBitCast(overflow_arg_area, |
3959 | llvm::PointerType::getUnqual(LTy)); |
3960 | |
3961 | |
3962 | |
3963 | |
3964 | |
3965 | |
3966 | uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; |
3967 | llvm::Value *Offset = |
3968 | llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); |
3969 | overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area, |
3970 | Offset, "overflow_arg_area.next"); |
3971 | CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); |
3972 | |
3973 | |
3974 | return Address(Res, Align); |
3975 | } |
3976 | |
3977 | Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
3978 | QualType Ty) const { |
3979 | |
3980 | |
3981 | |
3982 | |
3983 | |
3984 | |
3985 | |
3986 | unsigned neededInt, neededSSE; |
3987 | |
3988 | Ty = getContext().getCanonicalType(Ty); |
3989 | ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, |
3990 | false); |
3991 | |
3992 | |
3993 | |
3994 | if (!neededInt && !neededSSE) |
3995 | return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); |
3996 | |
3997 | |
3998 | |
3999 | |
4000 | |
4001 | |
4002 | |
4003 | |
4004 | |
4005 | |
4006 | |
4007 | |
4008 | llvm::Value *InRegs = nullptr; |
4009 | Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); |
4010 | llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; |
4011 | if (neededInt) { |
4012 | gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); |
4013 | gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); |
4014 | InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); |
4015 | InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); |
4016 | } |
4017 | |
4018 | if (neededSSE) { |
4019 | fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); |
4020 | fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); |
4021 | llvm::Value *FitsInFP = |
4022 | llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); |
4023 | FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); |
4024 | InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; |
4025 | } |
4026 | |
4027 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); |
4028 | llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); |
4029 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); |
4030 | CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); |
4031 | |
4032 | |
4033 | |
4034 | CGF.EmitBlock(InRegBlock); |
4035 | |
4036 | |
4037 | |
4038 | |
4039 | |
4040 | |
4041 | |
4042 | |
4043 | |
4044 | |
4045 | |
4046 | llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); |
4047 | llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( |
4048 | CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area"); |
4049 | |
4050 | Address RegAddr = Address::invalid(); |
4051 | if (neededInt && neededSSE) { |
4052 | |
4053 | assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); |
4054 | llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); |
4055 | Address Tmp = CGF.CreateMemTemp(Ty); |
4056 | Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); |
4057 | assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); |
4058 | llvm::Type *TyLo = ST->getElementType(0); |
4059 | llvm::Type *TyHi = ST->getElementType(1); |
4060 | assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && |
4061 | "Unexpected ABI info for mixed regs"); |
4062 | llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); |
4063 | llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); |
4064 | llvm::Value *GPAddr = |
4065 | CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset); |
4066 | llvm::Value *FPAddr = |
4067 | CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset); |
4068 | llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; |
4069 | llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; |
4070 | |
4071 | |
4072 | |
4073 | llvm::Value *V = CGF.Builder.CreateAlignedLoad( |
4074 | TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), |
4075 | CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); |
4076 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); |
4077 | |
4078 | |
4079 | V = CGF.Builder.CreateAlignedLoad( |
4080 | TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), |
4081 | CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); |
4082 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); |
4083 | |
4084 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); |
4085 | } else if (neededInt) { |
4086 | RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset), |
4087 | CharUnits::fromQuantity(8)); |
4088 | RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); |
4089 | |
4090 | |
4091 | auto TInfo = getContext().getTypeInfoInChars(Ty); |
4092 | uint64_t TySize = TInfo.Width.getQuantity(); |
4093 | CharUnits TyAlign = TInfo.Align; |
4094 | |
4095 | |
4096 | |
4097 | if (TyAlign.getQuantity() > 8) { |
4098 | Address Tmp = CGF.CreateMemTemp(Ty); |
4099 | CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); |
4100 | RegAddr = Tmp; |
4101 | } |
4102 | |
4103 | } else if (neededSSE == 1) { |
4104 | RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset), |
4105 | CharUnits::fromQuantity(16)); |
4106 | RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); |
4107 | } else { |
4108 | assert(neededSSE == 2 && "Invalid number of needed registers!"); |
4109 | |
4110 | |
4111 | |
4112 | |
4113 | |
4114 | |
4115 | Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, |
4116 | fp_offset), |
4117 | CharUnits::fromQuantity(16)); |
4118 | Address RegAddrHi = |
4119 | CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, |
4120 | CharUnits::fromQuantity(16)); |
4121 | llvm::Type *ST = AI.canHaveCoerceToType() |
4122 | ? AI.getCoerceToType() |
4123 | : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy); |
4124 | llvm::Value *V; |
4125 | Address Tmp = CGF.CreateMemTemp(Ty); |
4126 | Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); |
4127 | V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( |
4128 | RegAddrLo, ST->getStructElementType(0))); |
4129 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); |
4130 | V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( |
4131 | RegAddrHi, ST->getStructElementType(1))); |
4132 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); |
4133 | |
4134 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); |
4135 | } |
4136 | |
4137 | |
4138 | |
4139 | |
4140 | if (neededInt) { |
4141 | llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); |
4142 | CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), |
4143 | gp_offset_p); |
4144 | } |
4145 | if (neededSSE) { |
4146 | llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); |
4147 | CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), |
4148 | fp_offset_p); |
4149 | } |
4150 | CGF.EmitBranch(ContBlock); |
4151 | |
4152 | |
4153 | |
4154 | CGF.EmitBlock(InMemBlock); |
4155 | Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); |
4156 | |
4157 | |
4158 | |
4159 | CGF.EmitBlock(ContBlock); |
4160 | Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, |
4161 | "vaarg.addr"); |
4162 | return ResAddr; |
4163 | } |
4164 | |
4165 | Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4166 | QualType Ty) const { |
4167 | |
4168 | |
4169 | uint64_t Width = getContext().getTypeSize(Ty); |
4170 | bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); |
4171 | |
4172 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, |
4173 | CGF.getContext().getTypeInfoInChars(Ty), |
4174 | CharUnits::fromQuantity(8), |
4175 | false); |
4176 | } |
4177 | |
4178 | ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall( |
4179 | QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo ¤t) const { |
4180 | const Type *Base = nullptr; |
4181 | uint64_t NumElts = 0; |
4182 | |
4183 | if (!Ty->isBuiltinType() && !Ty->isVectorType() && |
4184 | isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { |
4185 | FreeSSERegs -= NumElts; |
4186 | return getDirectX86Hva(); |
4187 | } |
4188 | return current; |
4189 | } |
4190 | |
4191 | ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, |
4192 | bool IsReturnType, bool IsVectorCall, |
4193 | bool IsRegCall) const { |
4194 | |
4195 | if (Ty->isVoidType()) |
4196 | return ABIArgInfo::getIgnore(); |
4197 | |
4198 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
4199 | Ty = EnumTy->getDecl()->getIntegerType(); |
4200 | |
4201 | TypeInfo Info = getContext().getTypeInfo(Ty); |
4202 | uint64_t Width = Info.Width; |
4203 | CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); |
4204 | |
4205 | const RecordType *RT = Ty->getAs<RecordType>(); |
4206 | if (RT) { |
4207 | if (!IsReturnType) { |
4208 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) |
4209 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
4210 | } |
4211 | |
4212 | if (RT->getDecl()->hasFlexibleArrayMember()) |
4213 | return getNaturalAlignIndirect(Ty, false); |
4214 | |
4215 | } |
4216 | |
4217 | const Type *Base = nullptr; |
4218 | uint64_t NumElts = 0; |
4219 | |
4220 | |
4221 | if ((IsVectorCall || IsRegCall) && |
4222 | isHomogeneousAggregate(Ty, Base, NumElts)) { |
4223 | if (IsRegCall) { |
4224 | if (FreeSSERegs >= NumElts) { |
4225 | FreeSSERegs -= NumElts; |
4226 | if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) |
4227 | return ABIArgInfo::getDirect(); |
4228 | return ABIArgInfo::getExpand(); |
4229 | } |
4230 | return ABIArgInfo::getIndirect(Align, false); |
4231 | } else if (IsVectorCall) { |
4232 | if (FreeSSERegs >= NumElts && |
4233 | (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { |
4234 | FreeSSERegs -= NumElts; |
4235 | return ABIArgInfo::getDirect(); |
4236 | } else if (IsReturnType) { |
4237 | return ABIArgInfo::getExpand(); |
4238 | } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { |
4239 | |
4240 | return ABIArgInfo::getIndirect(Align, false); |
4241 | } |
4242 | } |
4243 | } |
4244 | |
4245 | if (Ty->isMemberPointerType()) { |
4246 | |
4247 | |
4248 | llvm::Type *LLTy = CGT.ConvertType(Ty); |
4249 | if (LLTy->isPointerTy() || LLTy->isIntegerTy()) |
4250 | return ABIArgInfo::getDirect(); |
4251 | } |
4252 | |
4253 | if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { |
4254 | |
4255 | |
4256 | if (Width > 64 || !llvm::isPowerOf2_64(Width)) |
4257 | return getNaturalAlignIndirect(Ty, false); |
4258 | |
4259 | |
4260 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); |
4261 | } |
4262 | |
4263 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
4264 | switch (BT->getKind()) { |
4265 | case BuiltinType::Bool: |
4266 | |
4267 | |
4268 | return ABIArgInfo::getExtend(Ty); |
4269 | |
4270 | case BuiltinType::LongDouble: |
4271 | |
4272 | |
4273 | if (IsMingw64) { |
4274 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); |
4275 | if (LDF == &llvm::APFloat::x87DoubleExtended()) |
4276 | return ABIArgInfo::getIndirect(Align, false); |
4277 | } |
4278 | break; |
4279 | |
4280 | case BuiltinType::Int128: |
4281 | case BuiltinType::UInt128: |
4282 | |
4283 | |
4284 | |
4285 | if (!IsReturnType) |
4286 | return ABIArgInfo::getIndirect(Align, false); |
4287 | |
4288 | |
4289 | |
4290 | return ABIArgInfo::getDirect(llvm::FixedVectorType::get( |
4291 | llvm::Type::getInt64Ty(getVMContext()), 2)); |
4292 | |
4293 | default: |
4294 | break; |
4295 | } |
4296 | } |
4297 | |
4298 | if (Ty->isExtIntType()) { |
4299 | |
4300 | |
4301 | |
4302 | |
4303 | |
4304 | if (Width <= 64) |
4305 | return ABIArgInfo::getDirect(); |
4306 | return ABIArgInfo::getIndirect(Align, false); |
4307 | } |
4308 | |
4309 | return ABIArgInfo::getDirect(); |
4310 | } |
4311 | |
4312 | void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { |
4313 | const unsigned CC = FI.getCallingConvention(); |
4314 | bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall; |
4315 | bool IsRegCall = CC == llvm::CallingConv::X86_RegCall; |
4316 | |
4317 | |
4318 | |
4319 | if (CC == llvm::CallingConv::X86_64_SysV) { |
4320 | X86_64ABIInfo SysVABIInfo(CGT, AVXLevel); |
4321 | SysVABIInfo.computeInfo(FI); |
4322 | return; |
4323 | } |
4324 | |
4325 | unsigned FreeSSERegs = 0; |
4326 | if (IsVectorCall) { |
4327 | |
4328 | FreeSSERegs = 4; |
4329 | } else if (IsRegCall) { |
4330 | |
4331 | FreeSSERegs = 16; |
4332 | } |
4333 | |
4334 | if (!getCXXABI().classifyReturnType(FI)) |
4335 | FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, |
4336 | IsVectorCall, IsRegCall); |
4337 | |
4338 | if (IsVectorCall) { |
4339 | |
4340 | FreeSSERegs = 6; |
4341 | } else if (IsRegCall) { |
4342 | |
4343 | FreeSSERegs = 16; |
4344 | } |
4345 | |
4346 | unsigned ArgNum = 0; |
4347 | unsigned ZeroSSERegs = 0; |
4348 | for (auto &I : FI.arguments()) { |
4349 | |
4350 | |
4351 | |
4352 | unsigned *MaybeFreeSSERegs = |
4353 | (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs; |
4354 | I.info = |
4355 | classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall); |
4356 | ++ArgNum; |
4357 | } |
4358 | |
4359 | if (IsVectorCall) { |
4360 | |
4361 | |
4362 | for (auto &I : FI.arguments()) |
4363 | I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info); |
4364 | } |
4365 | } |
4366 | |
4367 | Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4368 | QualType Ty) const { |
4369 | |
4370 | |
4371 | uint64_t Width = getContext().getTypeSize(Ty); |
4372 | bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); |
4373 | |
4374 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, |
4375 | CGF.getContext().getTypeInfoInChars(Ty), |
4376 | CharUnits::fromQuantity(8), |
4377 | false); |
4378 | } |
4379 | |
4380 | static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4381 | llvm::Value *Address, bool Is64Bit, |
4382 | bool IsAIX) { |
4383 | |
4384 | |
4385 | |
4386 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
4387 | |
4388 | llvm::IntegerType *i8 = CGF.Int8Ty; |
4389 | llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); |
4390 | llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); |
4391 | llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); |
4392 | |
4393 | |
4394 | AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31); |
4395 | |
4396 | |
4397 | AssignToArrayRange(Builder, Address, Eight8, 32, 63); |
4398 | |
4399 | |
4400 | |
4401 | |
4402 | |
4403 | |
4404 | AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67); |
4405 | |
4406 | |
4407 | |
4408 | |
4409 | AssignToArrayRange(Builder, Address, Four8, 68, 76); |
4410 | |
4411 | |
4412 | AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); |
4413 | |
4414 | |
4415 | |
4416 | AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110); |
4417 | |
4418 | |
4419 | if (IsAIX) |
4420 | return false; |
4421 | |
4422 | |
4423 | |
4424 | |
4425 | AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113); |
4426 | |
4427 | if (!Is64Bit) |
4428 | return false; |
4429 | |
4430 | |
4431 | |
4432 | |
4433 | |
4434 | |
4435 | |
4436 | AssignToArrayRange(Builder, Address, Eight8, 114, 116); |
4437 | |
4438 | return false; |
4439 | } |
4440 | |
4441 | |
4442 | namespace { |
4443 | |
4444 | class AIXABIInfo : public ABIInfo { |
4445 | const bool Is64Bit; |
4446 | const unsigned PtrByteSize; |
4447 | CharUnits getParamTypeAlignment(QualType Ty) const; |
4448 | |
4449 | public: |
4450 | AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) |
4451 | : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {} |
4452 | |
4453 | bool isPromotableTypeForABI(QualType Ty) const; |
4454 | |
4455 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
4456 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
4457 | |
4458 | void computeInfo(CGFunctionInfo &FI) const override { |
4459 | if (!getCXXABI().classifyReturnType(FI)) |
4460 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
4461 | |
4462 | for (auto &I : FI.arguments()) |
4463 | I.info = classifyArgumentType(I.type); |
4464 | } |
4465 | |
4466 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4467 | QualType Ty) const override; |
4468 | }; |
4469 | |
4470 | class AIXTargetCodeGenInfo : public TargetCodeGenInfo { |
4471 | const bool Is64Bit; |
4472 | |
4473 | public: |
4474 | AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) |
4475 | : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)), |
4476 | Is64Bit(Is64Bit) {} |
4477 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
4478 | return 1; |
4479 | } |
4480 | |
4481 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4482 | llvm::Value *Address) const override; |
4483 | }; |
4484 | } |
4485 | |
4486 | |
4487 | |
4488 | bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const { |
4489 | |
4490 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
4491 | Ty = EnumTy->getDecl()->getIntegerType(); |
4492 | |
4493 | |
4494 | if (Ty->isPromotableIntegerType()) |
4495 | return true; |
4496 | |
4497 | if (!Is64Bit) |
4498 | return false; |
4499 | |
4500 | |
4501 | |
4502 | |
4503 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) |
4504 | switch (BT->getKind()) { |
4505 | case BuiltinType::Int: |
4506 | case BuiltinType::UInt: |
4507 | return true; |
4508 | default: |
4509 | break; |
4510 | } |
4511 | |
4512 | return false; |
4513 | } |
4514 | |
4515 | ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const { |
4516 | if (RetTy->isAnyComplexType()) |
4517 | return ABIArgInfo::getDirect(); |
4518 | |
4519 | if (RetTy->isVectorType()) |
4520 | return ABIArgInfo::getDirect(); |
4521 | |
4522 | if (RetTy->isVoidType()) |
4523 | return ABIArgInfo::getIgnore(); |
4524 | |
4525 | if (isAggregateTypeForABI(RetTy)) |
4526 | return getNaturalAlignIndirect(RetTy); |
4527 | |
4528 | return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
4529 | : ABIArgInfo::getDirect()); |
4530 | } |
4531 | |
4532 | ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const { |
4533 | Ty = useFirstFieldIfTransparentUnion(Ty); |
4534 | |
4535 | if (Ty->isAnyComplexType()) |
4536 | return ABIArgInfo::getDirect(); |
4537 | |
4538 | if (Ty->isVectorType()) |
4539 | return ABIArgInfo::getDirect(); |
4540 | |
4541 | if (isAggregateTypeForABI(Ty)) { |
4542 | |
4543 | |
4544 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
4545 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
4546 | |
4547 | CharUnits CCAlign = getParamTypeAlignment(Ty); |
4548 | CharUnits TyAlign = getContext().getTypeAlignInChars(Ty); |
4549 | |
4550 | return ABIArgInfo::getIndirect(CCAlign, true, |
4551 | TyAlign > CCAlign); |
4552 | } |
4553 | |
4554 | return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
4555 | : ABIArgInfo::getDirect()); |
4556 | } |
4557 | |
4558 | CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const { |
4559 | |
4560 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
4561 | Ty = CTy->getElementType(); |
4562 | |
4563 | if (Ty->isVectorType()) |
4564 | return CharUnits::fromQuantity(16); |
4565 | |
4566 | |
4567 | if (isRecordWithSIMDVectorType(getContext(), Ty)) |
4568 | return CharUnits::fromQuantity(16); |
4569 | |
4570 | return CharUnits::fromQuantity(PtrByteSize); |
4571 | } |
4572 | |
4573 | Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4574 | QualType Ty) const { |
4575 | if (Ty->isAnyComplexType()) |
4576 | llvm::report_fatal_error("complex type is not supported on AIX yet"); |
4577 | |
4578 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); |
4579 | TypeInfo.Align = getParamTypeAlignment(Ty); |
4580 | |
4581 | CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize); |
4582 | |
4583 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TypeInfo, |
4584 | SlotSize, true); |
4585 | } |
4586 | |
4587 | bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable( |
4588 | CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { |
4589 | return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, true); |
4590 | } |
4591 | |
4592 | |
4593 | namespace { |
4594 | |
4595 | class PPC32_SVR4_ABIInfo : public DefaultABIInfo { |
4596 | bool IsSoftFloatABI; |
4597 | bool IsRetSmallStructInRegABI; |
4598 | |
4599 | CharUnits getParamTypeAlignment(QualType Ty) const; |
4600 | |
4601 | public: |
4602 | PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI, |
4603 | bool RetSmallStructInRegABI) |
4604 | : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI), |
4605 | IsRetSmallStructInRegABI(RetSmallStructInRegABI) {} |
4606 | |
4607 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
4608 | |
4609 | void computeInfo(CGFunctionInfo &FI) const override { |
4610 | if (!getCXXABI().classifyReturnType(FI)) |
4611 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
4612 | for (auto &I : FI.arguments()) |
4613 | I.info = classifyArgumentType(I.type); |
4614 | } |
4615 | |
4616 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4617 | QualType Ty) const override; |
4618 | }; |
4619 | |
4620 | class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { |
4621 | public: |
4622 | PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI, |
4623 | bool RetSmallStructInRegABI) |
4624 | : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>( |
4625 | CGT, SoftFloatABI, RetSmallStructInRegABI)) {} |
4626 | |
4627 | static bool isStructReturnInRegABI(const llvm::Triple &Triple, |
4628 | const CodeGenOptions &Opts); |
4629 | |
4630 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
4631 | |
4632 | return 1; |
4633 | } |
4634 | |
4635 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4636 | llvm::Value *Address) const override; |
4637 | }; |
4638 | } |
4639 | |
4640 | CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { |
4641 | |
4642 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
4643 | Ty = CTy->getElementType(); |
4644 | |
4645 | if (Ty->isVectorType()) |
4646 | return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 |
4647 | : 4); |
4648 | |
4649 | |
4650 | |
4651 | const Type *AlignTy = nullptr; |
4652 | if (const Type *EltType = isSingleElementStruct(Ty, getContext())) { |
4653 | const BuiltinType *BT = EltType->getAs<BuiltinType>(); |
4654 | if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || |
4655 | (BT && BT->isFloatingPoint())) |
4656 | AlignTy = EltType; |
4657 | } |
4658 | |
4659 | if (AlignTy) |
4660 | return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4); |
4661 | return CharUnits::fromQuantity(4); |
4662 | } |
4663 | |
4664 | ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { |
4665 | uint64_t Size; |
4666 | |
4667 | |
4668 | if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI && |
4669 | (Size = getContext().getTypeSize(RetTy)) <= 64) { |
4670 | |
4671 | |
4672 | |
4673 | |
4674 | |
4675 | |
4676 | |
4677 | |
4678 | |
4679 | |
4680 | |
4681 | if (Size == 0) |
4682 | return ABIArgInfo::getIgnore(); |
4683 | else { |
4684 | llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size); |
4685 | return ABIArgInfo::getDirect(CoerceTy); |
4686 | } |
4687 | } |
4688 | |
4689 | return DefaultABIInfo::classifyReturnType(RetTy); |
4690 | } |
4691 | |
4692 | |
4693 | |
4694 | Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, |
4695 | QualType Ty) const { |
4696 | if (getTarget().getTriple().isOSDarwin()) { |
4697 | auto TI = getContext().getTypeInfoInChars(Ty); |
4698 | TI.Align = getParamTypeAlignment(Ty); |
4699 | |
4700 | CharUnits SlotSize = CharUnits::fromQuantity(4); |
4701 | return emitVoidPtrVAArg(CGF, VAList, Ty, |
4702 | classifyArgumentType(Ty).isIndirect(), TI, SlotSize, |
4703 | true); |
4704 | } |
4705 | |
4706 | const unsigned OverflowLimit = 8; |
4707 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { |
4708 | |
4709 | (void)CTy; |
4710 | return Address::invalid(); |
4711 | } |
4712 | |
4713 | |
4714 | |
4715 | |
4716 | |
4717 | |
4718 | |
4719 | |
4720 | |
4721 | bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; |
4722 | bool isInt = !Ty->isFloatingType(); |
4723 | bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; |
4724 | |
4725 | |
4726 | |
4727 | bool isIndirect = isAggregateTypeForABI(Ty); |
4728 | |
4729 | CGBuilderTy &Builder = CGF.Builder; |
4730 | |
4731 | |
4732 | Address NumRegsAddr = Address::invalid(); |
4733 | if (isInt || IsSoftFloatABI) { |
4734 | NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr"); |
4735 | } else { |
4736 | NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr"); |
4737 | } |
4738 | |
4739 | llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); |
4740 | |
4741 | |
4742 | if (isI64 || (isF64 && IsSoftFloatABI)) { |
4743 | NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); |
4744 | NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); |
4745 | } |
4746 | |
4747 | llvm::Value *CC = |
4748 | Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); |
4749 | |
4750 | llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); |
4751 | llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); |
4752 | llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); |
4753 | |
4754 | Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); |
4755 | |
4756 | llvm::Type *DirectTy = CGF.ConvertType(Ty); |
4757 | if (isIndirect) DirectTy = DirectTy->getPointerTo(0); |
4758 | |
4759 | |
4760 | Address RegAddr = Address::invalid(); |
4761 | { |
4762 | CGF.EmitBlock(UsingRegs); |
4763 | |
4764 | Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4); |
4765 | RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), |
4766 | CharUnits::fromQuantity(8)); |
4767 | assert(RegAddr.getElementType() == CGF.Int8Ty); |
4768 | |
4769 | |
4770 | if (!(isInt || IsSoftFloatABI)) { |
4771 | RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, |
4772 | CharUnits::fromQuantity(32)); |
4773 | } |
4774 | |
4775 | |
4776 | |
4777 | CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); |
4778 | llvm::Value *RegOffset = |
4779 | Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); |
4780 | RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, |
4781 | RegAddr.getPointer(), RegOffset), |
4782 | RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); |
4783 | RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); |
4784 | |
4785 | |
4786 | NumRegs = |
4787 | Builder.CreateAdd(NumRegs, |
4788 | Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); |
4789 | Builder.CreateStore(NumRegs, NumRegsAddr); |
4790 | |
4791 | CGF.EmitBranch(Cont); |
4792 | } |
4793 | |
4794 | |
4795 | Address MemAddr = Address::invalid(); |
4796 | { |
4797 | CGF.EmitBlock(UsingOverflow); |
4798 | |
4799 | Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); |
4800 | |
4801 | |
4802 | CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); |
4803 | |
4804 | CharUnits Size; |
4805 | if (!isIndirect) { |
4806 | auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); |
4807 | Size = TypeInfo.Width.alignTo(OverflowAreaAlign); |
4808 | } else { |
4809 | Size = CGF.getPointerSize(); |
4810 | } |
4811 | |
4812 | Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3); |
4813 | Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), |
4814 | OverflowAreaAlign); |
4815 | |
4816 | CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); |
4817 | if (Align > OverflowAreaAlign) { |
4818 | llvm::Value *Ptr = OverflowArea.getPointer(); |
4819 | OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), |
4820 | Align); |
4821 | } |
4822 | |
4823 | MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); |
4824 | |
4825 | |
4826 | OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); |
4827 | Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); |
4828 | CGF.EmitBranch(Cont); |
4829 | } |
4830 | |
4831 | CGF.EmitBlock(Cont); |
4832 | |
4833 | |
4834 | Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, |
4835 | "vaarg.addr"); |
4836 | |
4837 | |
4838 | if (isIndirect) { |
4839 | Result = Address(Builder.CreateLoad(Result, "aggr"), |
4840 | getContext().getTypeAlignInChars(Ty)); |
4841 | } |
4842 | |
4843 | return Result; |
4844 | } |
4845 | |
4846 | bool PPC32TargetCodeGenInfo::isStructReturnInRegABI( |
4847 | const llvm::Triple &Triple, const CodeGenOptions &Opts) { |
4848 | assert(Triple.isPPC32()); |
4849 | |
4850 | switch (Opts.getStructReturnConvention()) { |
4851 | case CodeGenOptions::SRCK_Default: |
4852 | break; |
4853 | case CodeGenOptions::SRCK_OnStack: |
4854 | return false; |
4855 | case CodeGenOptions::SRCK_InRegs: |
4856 | return true; |
4857 | } |
4858 | |
4859 | if (Triple.isOSBinFormatELF() && !Triple.isOSLinux()) |
4860 | return true; |
4861 | |
4862 | return false; |
4863 | } |
4864 | |
4865 | bool |
4866 | PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4867 | llvm::Value *Address) const { |
4868 | return PPC_initDwarfEHRegSizeTable(CGF, Address, false, |
4869 | false); |
4870 | } |
4871 | |
4872 | |
4873 | |
4874 | namespace { |
4875 | |
4876 | class PPC64_SVR4_ABIInfo : public SwiftABIInfo { |
4877 | public: |
4878 | enum ABIKind { |
4879 | ELFv1 = 0, |
4880 | ELFv2 |
4881 | }; |
4882 | |
4883 | private: |
4884 | static const unsigned GPRBits = 64; |
4885 | ABIKind Kind; |
4886 | bool IsSoftFloatABI; |
4887 | |
4888 | public: |
4889 | PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, |
4890 | bool SoftFloatABI) |
4891 | : SwiftABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {} |
4892 | |
4893 | bool isPromotableTypeForABI(QualType Ty) const; |
4894 | CharUnits getParamTypeAlignment(QualType Ty) const; |
4895 | |
4896 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
4897 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
4898 | |
4899 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; |
4900 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
4901 | uint64_t Members) const override; |
4902 | |
4903 | |
4904 | |
4905 | |
4906 | |
4907 | |
4908 | |
4909 | void computeInfo(CGFunctionInfo &FI) const override { |
4910 | if (!getCXXABI().classifyReturnType(FI)) |
4911 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
4912 | for (auto &I : FI.arguments()) { |
4913 | |
4914 | |
4915 | |
4916 | const Type *T = isSingleElementStruct(I.type, getContext()); |
4917 | if (T) { |
4918 | const BuiltinType *BT = T->getAs<BuiltinType>(); |
4919 | if ((T->isVectorType() && getContext().getTypeSize(T) == 128) || |
4920 | (BT && BT->isFloatingPoint())) { |
4921 | QualType QT(T, 0); |
4922 | I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); |
4923 | continue; |
4924 | } |
4925 | } |
4926 | I.info = classifyArgumentType(I.type); |
4927 | } |
4928 | } |
4929 | |
4930 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4931 | QualType Ty) const override; |
4932 | |
4933 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
4934 | bool asReturnValue) const override { |
4935 | return occupiesMoreThan(CGT, scalars, 4); |
4936 | } |
4937 | |
4938 | bool isSwiftErrorInRegister() const override { |
4939 | return false; |
4940 | } |
4941 | }; |
4942 | |
4943 | class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { |
4944 | |
4945 | public: |
4946 | PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, |
4947 | PPC64_SVR4_ABIInfo::ABIKind Kind, |
4948 | bool SoftFloatABI) |
4949 | : TargetCodeGenInfo( |
4950 | std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {} |
4951 | |
4952 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
4953 | |
4954 | return 1; |
4955 | } |
4956 | |
4957 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4958 | llvm::Value *Address) const override; |
4959 | }; |
4960 | |
4961 | class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { |
4962 | public: |
4963 | PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} |
4964 | |
4965 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
4966 | |
4967 | return 1; |
4968 | } |
4969 | |
4970 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4971 | llvm::Value *Address) const override; |
4972 | }; |
4973 | |
4974 | } |
4975 | |
4976 | |
4977 | |
4978 | bool |
4979 | PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { |
4980 | |
4981 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
4982 | Ty = EnumTy->getDecl()->getIntegerType(); |
4983 | |
4984 | |
4985 | if (isPromotableIntegerTypeForABI(Ty)) |
4986 | return true; |
4987 | |
4988 | |
4989 | |
4990 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) |
4991 | switch (BT->getKind()) { |
4992 | case BuiltinType::Int: |
4993 | case BuiltinType::UInt: |
4994 | return true; |
4995 | default: |
4996 | break; |
4997 | } |
4998 | |
4999 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
5000 | if (EIT->getNumBits() < 64) |
5001 | return true; |
5002 | |
5003 | return false; |
5004 | } |
5005 | |
5006 | |
5007 | |
5008 | CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { |
5009 | |
5010 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
5011 | Ty = CTy->getElementType(); |
5012 | |
5013 | |
5014 | |
5015 | if (Ty->isVectorType()) { |
5016 | return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); |
5017 | } else if (Ty->isRealFloatingType() && |
5018 | &getContext().getFloatTypeSemantics(Ty) == |
5019 | &llvm::APFloat::IEEEquad()) { |
5020 | |
5021 | |
5022 | |
5023 | return CharUnits::fromQuantity(16); |
5024 | } |
5025 | |
5026 | |
5027 | |
5028 | const Type *AlignAsType = nullptr; |
5029 | const Type *EltType = isSingleElementStruct(Ty, getContext()); |
5030 | if (EltType) { |
5031 | const BuiltinType *BT = EltType->getAs<BuiltinType>(); |
5032 | if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || |
5033 | (BT && BT->isFloatingPoint())) |
5034 | AlignAsType = EltType; |
5035 | } |
5036 | |
5037 | |
5038 | const Type *Base = nullptr; |
5039 | uint64_t Members = 0; |
5040 | if (!AlignAsType && Kind == ELFv2 && |
5041 | isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) |
5042 | AlignAsType = Base; |
5043 | |
5044 | |
5045 | if (AlignAsType) { |
5046 | return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); |
5047 | } |
5048 | |
5049 | |
5050 | |
5051 | if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { |
5052 | return CharUnits::fromQuantity(16); |
5053 | } |
5054 | |
5055 | return CharUnits::fromQuantity(8); |
5056 | } |
5057 | |
5058 | |
5059 | |
5060 | |
5061 | bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, |
5062 | uint64_t &Members) const { |
5063 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { |
5064 | uint64_t NElements = AT->getSize().getZExtValue(); |
5065 | if (NElements == 0) |
5066 | return false; |
5067 | if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) |
5068 | return false; |
5069 | Members *= NElements; |
5070 | } else if (const RecordType *RT = Ty->getAs<RecordType>()) { |
5071 | const RecordDecl *RD = RT->getDecl(); |
5072 | if (RD->hasFlexibleArrayMember()) |
5073 | return false; |
5074 | |
5075 | Members = 0; |
5076 | |
5077 | |
5078 | |
5079 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
5080 | if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD)) |
5081 | return false; |
5082 | |
5083 | for (const auto &I : CXXRD->bases()) { |
5084 | |
5085 | if (isEmptyRecord(getContext(), I.getType(), true)) |
5086 | continue; |
5087 | |
5088 | uint64_t FldMembers; |
5089 | if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) |
5090 | return false; |
5091 | |
5092 | Members += FldMembers; |
5093 | } |
5094 | } |
5095 | |
5096 | for (const auto *FD : RD->fields()) { |
5097 | |
5098 | QualType FT = FD->getType(); |
5099 | while (const ConstantArrayType *AT = |
5100 | getContext().getAsConstantArrayType(FT)) { |
5101 | if (AT->getSize().getZExtValue() == 0) |
5102 | return false; |
5103 | FT = AT->getElementType(); |
5104 | } |
5105 | if (isEmptyRecord(getContext(), FT, true)) |
5106 | continue; |
5107 | |
5108 | |
5109 | if (getContext().getLangOpts().CPlusPlus && |
5110 | FD->isZeroLengthBitField(getContext())) |
5111 | continue; |
5112 | |
5113 | uint64_t FldMembers; |
5114 | if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) |
5115 | return false; |
5116 | |
5117 | Members = (RD->isUnion() ? |
5118 | std::max(Members, FldMembers) : Members + FldMembers); |
5119 | } |
5120 | |
5121 | if (!Base) |
5122 | return false; |
5123 | |
5124 | |
5125 | if (getContext().getTypeSize(Base) * Members != |
5126 | getContext().getTypeSize(Ty)) |
5127 | return false; |
5128 | } else { |
5129 | Members = 1; |
5130 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
5131 | Members = 2; |
5132 | Ty = CT->getElementType(); |
5133 | } |
5134 | |
5135 | |
5136 | if (!isHomogeneousAggregateBaseType(Ty)) |
5137 | return false; |
5138 | |
5139 | |
5140 | |
5141 | |
5142 | const Type *TyPtr = Ty.getTypePtr(); |
5143 | if (!Base) { |
5144 | Base = TyPtr; |
5145 | |
5146 | |
5147 | if (const VectorType *VT = Base->getAs<VectorType>()) { |
5148 | QualType EltTy = VT->getElementType(); |
5149 | unsigned NumElements = |
5150 | getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); |
5151 | Base = getContext() |
5152 | .getVectorType(EltTy, NumElements, VT->getVectorKind()) |
5153 | .getTypePtr(); |
5154 | } |
5155 | } |
5156 | |
5157 | if (Base->isVectorType() != TyPtr->isVectorType() || |
5158 | getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) |
5159 | return false; |
5160 | } |
5161 | return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); |
5162 | } |
5163 | |
5164 | bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
5165 | |
5166 | |
5167 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
5168 | if (BT->getKind() == BuiltinType::Float || |
5169 | BT->getKind() == BuiltinType::Double || |
5170 | BT->getKind() == BuiltinType::LongDouble || |
5171 | (getContext().getTargetInfo().hasFloat128Type() && |
5172 | (BT->getKind() == BuiltinType::Float128))) { |
5173 | if (IsSoftFloatABI) |
5174 | return false; |
5175 | return true; |
5176 | } |
5177 | } |
5178 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
5179 | if (getContext().getTypeSize(VT) == 128) |
5180 | return true; |
5181 | } |
5182 | return false; |
5183 | } |
5184 | |
5185 | bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( |
5186 | const Type *Base, uint64_t Members) const { |
5187 | |
5188 | |
5189 | uint32_t NumRegs = |
5190 | ((getContext().getTargetInfo().hasFloat128Type() && |
5191 | Base->isFloat128Type()) || |
5192 | Base->isVectorType()) ? 1 |
5193 | : (getContext().getTypeSize(Base) + 63) / 64; |
5194 | |
5195 | |
5196 | return Members * NumRegs <= 8; |
5197 | } |
5198 | |
5199 | ABIArgInfo |
5200 | PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { |
5201 | Ty = useFirstFieldIfTransparentUnion(Ty); |
5202 | |
5203 | if (Ty->isAnyComplexType()) |
5204 | return ABIArgInfo::getDirect(); |
5205 | |
5206 | |
5207 | |
5208 | if (Ty->isVectorType()) { |
5209 | uint64_t Size = getContext().getTypeSize(Ty); |
5210 | if (Size > 128) |
5211 | return getNaturalAlignIndirect(Ty, false); |
5212 | else if (Size < 128) { |
5213 | llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); |
5214 | return ABIArgInfo::getDirect(CoerceTy); |
5215 | } |
5216 | } |
5217 | |
5218 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
5219 | if (EIT->getNumBits() > 128) |
5220 | return getNaturalAlignIndirect(Ty, true); |
5221 | |
5222 | if (isAggregateTypeForABI(Ty)) { |
5223 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
5224 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
5225 | |
5226 | uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); |
5227 | uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); |
5228 | |
5229 | |
5230 | const Type *Base = nullptr; |
5231 | uint64_t Members = 0; |
5232 | if (Kind == ELFv2 && |
5233 | isHomogeneousAggregate(Ty, Base, Members)) { |
5234 | llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); |
5235 | llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); |
5236 | return ABIArgInfo::getDirect(CoerceTy); |
5237 | } |
5238 | |
5239 | |
5240 | |
5241 | |
5242 | |
5243 | uint64_t Bits = getContext().getTypeSize(Ty); |
5244 | if (Bits > 0 && Bits <= 8 * GPRBits) { |
5245 | llvm::Type *CoerceTy; |
5246 | |
5247 | |
5248 | |
5249 | if (Bits <= GPRBits) |
5250 | CoerceTy = |
5251 | llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); |
5252 | |
5253 | |
5254 | else { |
5255 | uint64_t RegBits = ABIAlign * 8; |
5256 | uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; |
5257 | llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); |
5258 | CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); |
5259 | } |
5260 | |
5261 | return ABIArgInfo::getDirect(CoerceTy); |
5262 | } |
5263 | |
5264 | |
5265 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), |
5266 | true, |
5267 | TyAlign > ABIAlign); |
5268 | } |
5269 | |
5270 | return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
5271 | : ABIArgInfo::getDirect()); |
5272 | } |
5273 | |
5274 | ABIArgInfo |
5275 | PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { |
5276 | if (RetTy->isVoidType()) |
5277 | return ABIArgInfo::getIgnore(); |
5278 | |
5279 | if (RetTy->isAnyComplexType()) |
5280 | return ABIArgInfo::getDirect(); |
5281 | |
5282 | |
5283 | |
5284 | if (RetTy->isVectorType()) { |
5285 | uint64_t Size = getContext().getTypeSize(RetTy); |
5286 | if (Size > 128) |
5287 | return getNaturalAlignIndirect(RetTy); |
5288 | else if (Size < 128) { |
5289 | llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); |
5290 | return ABIArgInfo::getDirect(CoerceTy); |
5291 | } |
5292 | } |
5293 | |
5294 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
5295 | if (EIT->getNumBits() > 128) |
5296 | return getNaturalAlignIndirect(RetTy, false); |
5297 | |
5298 | if (isAggregateTypeForABI(RetTy)) { |
5299 | |
5300 | const Type *Base = nullptr; |
5301 | uint64_t Members = 0; |
5302 | if (Kind == ELFv2 && |
5303 | isHomogeneousAggregate(RetTy, Base, Members)) { |
5304 | llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); |
5305 | llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); |
5306 | return ABIArgInfo::getDirect(CoerceTy); |
5307 | } |
5308 | |
5309 | |
5310 | uint64_t Bits = getContext().getTypeSize(RetTy); |
5311 | if (Kind == ELFv2 && Bits <= 2 * GPRBits) { |
5312 | if (Bits == 0) |
5313 | return ABIArgInfo::getIgnore(); |
5314 | |
5315 | llvm::Type *CoerceTy; |
5316 | if (Bits > GPRBits) { |
5317 | CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); |
5318 | CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); |
5319 | } else |
5320 | CoerceTy = |
5321 | llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); |
5322 | return ABIArgInfo::getDirect(CoerceTy); |
5323 | } |
5324 | |
5325 | |
5326 | return getNaturalAlignIndirect(RetTy); |
5327 | } |
5328 | |
5329 | return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
5330 | : ABIArgInfo::getDirect()); |
5331 | } |
5332 | |
5333 | |
5334 | Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
5335 | QualType Ty) const { |
5336 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); |
5337 | TypeInfo.Align = getParamTypeAlignment(Ty); |
5338 | |
5339 | CharUnits SlotSize = CharUnits::fromQuantity(8); |
5340 | |
5341 | |
5342 | |
5343 | |
5344 | |
5345 | |
5346 | |
5347 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { |
5348 | CharUnits EltSize = TypeInfo.Width / 2; |
5349 | if (EltSize < SlotSize) { |
5350 | Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, |
5351 | SlotSize * 2, SlotSize, |
5352 | SlotSize, true); |
5353 | |
5354 | Address RealAddr = Addr; |
5355 | Address ImagAddr = RealAddr; |
5356 | if (CGF.CGM.getDataLayout().isBigEndian()) { |
5357 | RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, |
5358 | SlotSize - EltSize); |
5359 | ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, |
5360 | 2 * SlotSize - EltSize); |
5361 | } else { |
5362 | ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); |
5363 | } |
5364 | |
5365 | llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); |
5366 | RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); |
5367 | ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); |
5368 | llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); |
5369 | llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); |
5370 | |
5371 | Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); |
5372 | CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), |
5373 | true); |
5374 | return Temp; |
5375 | } |
5376 | } |
5377 | |
5378 | |
5379 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, |
5380 | TypeInfo, SlotSize, true); |
5381 | } |
5382 | |
5383 | bool |
5384 | PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( |
5385 | CodeGen::CodeGenFunction &CGF, |
5386 | llvm::Value *Address) const { |
5387 | return PPC_initDwarfEHRegSizeTable(CGF, Address, true, |
5388 | false); |
5389 | } |
5390 | |
5391 | bool |
5392 | PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
5393 | llvm::Value *Address) const { |
5394 | return PPC_initDwarfEHRegSizeTable(CGF, Address, true, |
5395 | false); |
5396 | } |
5397 | |
5398 | |
5399 | |
5400 | |
5401 | |
5402 | namespace { |
5403 | |
5404 | class AArch64ABIInfo : public SwiftABIInfo { |
5405 | public: |
5406 | enum ABIKind { |
5407 | AAPCS = 0, |
5408 | DarwinPCS, |
5409 | Win64 |
5410 | }; |
5411 | |
5412 | private: |
5413 | ABIKind Kind; |
5414 | |
5415 | public: |
5416 | AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) |
5417 | : SwiftABIInfo(CGT), Kind(Kind) {} |
5418 | |
5419 | private: |
5420 | ABIKind getABIKind() const { return Kind; } |
5421 | bool isDarwinPCS() const { return Kind == DarwinPCS; } |
5422 | |
5423 | ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; |
5424 | ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic, |
5425 | unsigned CallingConvention) const; |
5426 | ABIArgInfo coerceIllegalVector(QualType Ty) const; |
5427 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; |
5428 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
5429 | uint64_t Members) const override; |
5430 | |
5431 | bool isIllegalVectorType(QualType Ty) const; |
5432 | |
5433 | void computeInfo(CGFunctionInfo &FI) const override { |
5434 | if (!::classifyReturnType(getCXXABI(), FI, *this)) |
5435 | FI.getReturnInfo() = |
5436 | classifyReturnType(FI.getReturnType(), FI.isVariadic()); |
5437 | |
5438 | for (auto &it : FI.arguments()) |
5439 | it.info = classifyArgumentType(it.type, FI.isVariadic(), |
5440 | FI.getCallingConvention()); |
5441 | } |
5442 | |
5443 | Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, |
5444 | CodeGenFunction &CGF) const; |
5445 | |
5446 | Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, |
5447 | CodeGenFunction &CGF) const; |
5448 | |
5449 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
5450 | QualType Ty) const override { |
5451 | llvm::Type *BaseTy = CGF.ConvertType(Ty); |
5452 | if (isa<llvm::ScalableVectorType>(BaseTy)) |
5453 | llvm::report_fatal_error("Passing SVE types to variadic functions is " |
5454 | "currently not supported"); |
5455 | |
5456 | return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) |
5457 | : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) |
5458 | : EmitAAPCSVAArg(VAListAddr, Ty, CGF); |
5459 | } |
5460 | |
5461 | Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
5462 | QualType Ty) const override; |
5463 | |
5464 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
5465 | bool asReturnValue) const override { |
5466 | return occupiesMoreThan(CGT, scalars, 4); |
5467 | } |
5468 | bool isSwiftErrorInRegister() const override { |
5469 | return true; |
5470 | } |
5471 | |
5472 | bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, |
5473 | unsigned elts) const override; |
5474 | |
5475 | bool allowBFloatArgsAndRet() const override { |
5476 | return getTarget().hasBFloat16Type(); |
5477 | } |
5478 | }; |
5479 | |
5480 | class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { |
5481 | public: |
5482 | AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) |
5483 | : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {} |
5484 | |
5485 | StringRef getARCRetainAutoreleasedReturnValueMarker() const override { |
5486 | return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; |
5487 | } |
5488 | |
5489 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
5490 | return 31; |
5491 | } |
5492 | |
5493 | bool doesReturnSlotInterfereWithArgs() const override { return false; } |
5494 | |
5495 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
5496 | CodeGen::CodeGenModule &CGM) const override { |
5497 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
5498 | if (!FD) |
5499 | return; |
5500 | |
5501 | const auto *TA = FD->getAttr<TargetAttr>(); |
5502 | if (TA == nullptr) |
5503 | return; |
5504 | |
5505 | ParsedTargetAttr Attr = TA->parse(); |
5506 | if (Attr.BranchProtection.empty()) |
5507 | return; |
5508 | |
5509 | TargetInfo::BranchProtectionInfo BPI; |
5510 | StringRef Error; |
5511 | (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection, |
5512 | BPI, Error); |
5513 | assert(Error.empty()); |
5514 | |
5515 | auto *Fn = cast<llvm::Function>(GV); |
5516 | static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"}; |
5517 | Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]); |
5518 | |
5519 | if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) { |
5520 | Fn->addFnAttr("sign-return-address-key", |
5521 | BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey |
5522 | ? "a_key" |
5523 | : "b_key"); |
5524 | } |
5525 | |
5526 | Fn->addFnAttr("branch-target-enforcement", |
5527 | BPI.BranchTargetEnforcement ? "true" : "false"); |
5528 | } |
5529 | |
5530 | bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, |
5531 | llvm::Type *Ty) const override { |
5532 | if (CGF.getTarget().hasFeature("ls64")) { |
5533 | auto *ST = dyn_cast<llvm::StructType>(Ty); |
5534 | if (ST && ST->getNumElements() == 1) { |
5535 | auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0)); |
5536 | if (AT && AT->getNumElements() == 8 && |
5537 | AT->getElementType()->isIntegerTy(64)) |
5538 | return true; |
5539 | } |
5540 | } |
5541 | return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty); |
5542 | } |
5543 | }; |
5544 | |
5545 | class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { |
5546 | public: |
5547 | WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) |
5548 | : AArch64TargetCodeGenInfo(CGT, K) {} |
5549 | |
5550 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
5551 | CodeGen::CodeGenModule &CGM) const override; |
5552 | |
5553 | void getDependentLibraryOption(llvm::StringRef Lib, |
5554 | llvm::SmallString<24> &Opt) const override { |
5555 | Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); |
5556 | } |
5557 | |
5558 | void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, |
5559 | llvm::SmallString<32> &Opt) const override { |
5560 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; |
5561 | } |
5562 | }; |
5563 | |
5564 | void WindowsAArch64TargetCodeGenInfo::setTargetAttributes( |
5565 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
5566 | AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
5567 | if (GV->isDeclaration()) |
5568 | return; |
5569 | addStackProbeTargetAttributes(D, GV, CGM); |
5570 | } |
5571 | } |
5572 | |
5573 | ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const { |
5574 | assert(Ty->isVectorType() && "expected vector type!"); |
5575 | |
5576 | const auto *VT = Ty->castAs<VectorType>(); |
5577 | if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) { |
5578 | assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); |
5579 | assert(VT->getElementType()->castAs<BuiltinType>()->getKind() == |
5580 | BuiltinType::UChar && |
5581 | "unexpected builtin type for SVE predicate!"); |
5582 | return ABIArgInfo::getDirect(llvm::ScalableVectorType::get( |
5583 | llvm::Type::getInt1Ty(getVMContext()), 16)); |
5584 | } |
5585 | |
5586 | if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) { |
5587 | assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); |
5588 | |
5589 | const auto *BT = VT->getElementType()->castAs<BuiltinType>(); |
5590 | llvm::ScalableVectorType *ResType = nullptr; |
5591 | switch (BT->getKind()) { |
5592 | default: |
5593 | llvm_unreachable("unexpected builtin type for SVE vector!"); |
5594 | case BuiltinType::SChar: |
5595 | case BuiltinType::UChar: |
5596 | ResType = llvm::ScalableVectorType::get( |
5597 | llvm::Type::getInt8Ty(getVMContext()), 16); |
5598 | break; |
5599 | case BuiltinType::Short: |
5600 | case BuiltinType::UShort: |
5601 | ResType = llvm::ScalableVectorType::get( |
5602 | llvm::Type::getInt16Ty(getVMContext()), 8); |
5603 | break; |
5604 | case BuiltinType::Int: |
5605 | case BuiltinType::UInt: |
5606 | ResType = llvm::ScalableVectorType::get( |
5607 | llvm::Type::getInt32Ty(getVMContext()), 4); |
5608 | break; |
5609 | case BuiltinType::Long: |
5610 | case BuiltinType::ULong: |
5611 | ResType = llvm::ScalableVectorType::get( |
5612 | llvm::Type::getInt64Ty(getVMContext()), 2); |
5613 | break; |
5614 | case BuiltinType::Half: |
5615 | ResType = llvm::ScalableVectorType::get( |
5616 | llvm::Type::getHalfTy(getVMContext()), 8); |
5617 | break; |
5618 | case BuiltinType::Float: |
5619 | ResType = llvm::ScalableVectorType::get( |
5620 | llvm::Type::getFloatTy(getVMContext()), 4); |
5621 | break; |
5622 | case BuiltinType::Double: |
5623 | ResType = llvm::ScalableVectorType::get( |
5624 | llvm::Type::getDoubleTy(getVMContext()), 2); |
5625 | break; |
5626 | case BuiltinType::BFloat16: |
5627 | ResType = llvm::ScalableVectorType::get( |
5628 | llvm::Type::getBFloatTy(getVMContext()), 8); |
5629 | break; |
5630 | } |
5631 | return ABIArgInfo::getDirect(ResType); |
5632 | } |
5633 | |
5634 | uint64_t Size = getContext().getTypeSize(Ty); |
5635 | |
5636 | if (isAndroid() && (Size <= 16)) { |
5637 | llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); |
5638 | return ABIArgInfo::getDirect(ResType); |
5639 | } |
5640 | if (Size <= 32) { |
5641 | llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); |
5642 | return ABIArgInfo::getDirect(ResType); |
5643 | } |
5644 | if (Size == 64) { |
5645 | auto *ResType = |
5646 | llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); |
5647 | return ABIArgInfo::getDirect(ResType); |
5648 | } |
5649 | if (Size == 128) { |
5650 | auto *ResType = |
5651 | llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); |
5652 | return ABIArgInfo::getDirect(ResType); |
5653 | } |
5654 | return getNaturalAlignIndirect(Ty, false); |
5655 | } |
5656 | |
5657 | ABIArgInfo |
5658 | AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic, |
5659 | unsigned CallingConvention) const { |
5660 | Ty = useFirstFieldIfTransparentUnion(Ty); |
5661 | |
5662 | |
5663 | if (isIllegalVectorType(Ty)) |
5664 | return coerceIllegalVector(Ty); |
5665 | |
5666 | if (!isAggregateTypeForABI(Ty)) { |
5667 | |
5668 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
5669 | Ty = EnumTy->getDecl()->getIntegerType(); |
5670 | |
5671 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
5672 | if (EIT->getNumBits() > 128) |
5673 | return getNaturalAlignIndirect(Ty); |
5674 | |
5675 | return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() |
5676 | ? ABIArgInfo::getExtend(Ty) |
5677 | : ABIArgInfo::getDirect()); |
5678 | } |
5679 | |
5680 | |
5681 | |
5682 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { |
5683 | return getNaturalAlignIndirect(Ty, RAA == |
5684 | CGCXXABI::RAA_DirectInMemory); |
5685 | } |
5686 | |
5687 | |
5688 | |
5689 | uint64_t Size = getContext().getTypeSize(Ty); |
5690 | bool IsEmpty = isEmptyRecord(getContext(), Ty, true); |
5691 | if (IsEmpty || Size == 0) { |
5692 | if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) |
5693 | return ABIArgInfo::getIgnore(); |
5694 | |
5695 | |
5696 | |
5697 | if (IsEmpty && Size == 0) |
5698 | return ABIArgInfo::getIgnore(); |
5699 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); |
5700 | } |
5701 | |
5702 | |
5703 | const Type *Base = nullptr; |
5704 | uint64_t Members = 0; |
5705 | bool IsWin64 = Kind == Win64 || CallingConvention == llvm::CallingConv::Win64; |
5706 | bool IsWinVariadic = IsWin64 && IsVariadic; |
5707 | |
5708 | |
5709 | if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) { |
5710 | if (Kind != AArch64ABIInfo::AAPCS) |
5711 | return ABIArgInfo::getDirect( |
5712 | llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); |
5713 | |
5714 | |
5715 | |
5716 | unsigned Align = |
5717 | getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); |
5718 | unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity(); |
5719 | Align = (Align > BaseAlign && Align >= 16) ? 16 : 0; |
5720 | return ABIArgInfo::getDirect( |
5721 | llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0, |
5722 | nullptr, true, Align); |
5723 | } |
5724 | |
5725 | |
5726 | if (Size <= 128) { |
5727 | |
5728 | |
5729 | if (getTarget().isRenderScriptTarget()) { |
5730 | return coerceToIntArray(Ty, getContext(), getVMContext()); |
5731 | } |
5732 | unsigned Alignment; |
5733 | if (Kind == AArch64ABIInfo::AAPCS) { |
5734 | Alignment = getContext().getTypeUnadjustedAlign(Ty); |
5735 | Alignment = Alignment < 128 ? 64 : 128; |
5736 | } else { |
5737 | Alignment = std::max(getContext().getTypeAlign(Ty), |
5738 | (unsigned)getTarget().getPointerWidth(0)); |
5739 | } |
5740 | Size = llvm::alignTo(Size, Alignment); |
5741 | |
5742 | |
5743 | |
5744 | llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment); |
5745 | return ABIArgInfo::getDirect( |
5746 | Size == Alignment ? BaseTy |
5747 | : llvm::ArrayType::get(BaseTy, Size / Alignment)); |
5748 | } |
5749 | |
5750 | return getNaturalAlignIndirect(Ty, false); |
5751 | } |
5752 | |
5753 | ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy, |
5754 | bool IsVariadic) const { |
5755 | if (RetTy->isVoidType()) |
5756 | return ABIArgInfo::getIgnore(); |
5757 | |
5758 | if (const auto *VT = RetTy->getAs<VectorType>()) { |
5759 | if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || |
5760 | VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) |
5761 | return coerceIllegalVector(RetTy); |
5762 | } |
5763 | |
5764 | |
5765 | if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) |
5766 | return getNaturalAlignIndirect(RetTy); |
5767 | |
5768 | if (!isAggregateTypeForABI(RetTy)) { |
5769 | |
5770 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
5771 | RetTy = EnumTy->getDecl()->getIntegerType(); |
5772 | |
5773 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
5774 | if (EIT->getNumBits() > 128) |
5775 | return getNaturalAlignIndirect(RetTy); |
5776 | |
5777 | return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() |
5778 | ? ABIArgInfo::getExtend(RetTy) |
5779 | : ABIArgInfo::getDirect()); |
5780 | } |
5781 | |
5782 | uint64_t Size = getContext().getTypeSize(RetTy); |
5783 | if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) |
5784 | return ABIArgInfo::getIgnore(); |
5785 | |
5786 | const Type *Base = nullptr; |
5787 | uint64_t Members = 0; |
5788 | if (isHomogeneousAggregate(RetTy, Base, Members) && |
5789 | !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 && |
5790 | IsVariadic)) |
5791 | |
5792 | return ABIArgInfo::getDirect(); |
5793 | |
5794 | |
5795 | if (Size <= 128) { |
5796 | |
5797 | |
5798 | if (getTarget().isRenderScriptTarget()) { |
5799 | return coerceToIntArray(RetTy, getContext(), getVMContext()); |
5800 | } |
5801 | |
5802 | if (Size <= 64 && getDataLayout().isLittleEndian()) { |
5803 | |
5804 | |
5805 | |
5806 | |
5807 | |
5808 | |
5809 | return ABIArgInfo::getDirect( |
5810 | llvm::IntegerType::get(getVMContext(), Size)); |
5811 | } |
5812 | |
5813 | unsigned Alignment = getContext().getTypeAlign(RetTy); |
5814 | Size = llvm::alignTo(Size, 64); |
5815 | |
5816 | |
5817 | |
5818 | if (Alignment < 128 && Size == 128) { |
5819 | llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); |
5820 | return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); |
5821 | } |
5822 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); |
5823 | } |
5824 | |
5825 | return getNaturalAlignIndirect(RetTy); |
5826 | } |
5827 | |
5828 | |
5829 | bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { |
5830 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
5831 | |
5832 | |
5833 | |
5834 | if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || |
5835 | VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) |
5836 | return true; |
5837 | |
5838 | |
5839 | unsigned NumElements = VT->getNumElements(); |
5840 | uint64_t Size = getContext().getTypeSize(VT); |
5841 | |
5842 | if (!llvm::isPowerOf2_32(NumElements)) |
5843 | return true; |
5844 | |
5845 | |
5846 | |
5847 | llvm::Triple Triple = getTarget().getTriple(); |
5848 | if (Triple.getArch() == llvm::Triple::aarch64_32 && |
5849 | Triple.isOSBinFormatMachO()) |
5850 | return Size <= 32; |
5851 | |
5852 | return Size != 64 && (Size != 128 || NumElements == 1); |
5853 | } |
5854 | return false; |
5855 | } |
5856 | |
5857 | bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, |
5858 | llvm::Type *eltTy, |
5859 | unsigned elts) const { |
5860 | if (!llvm::isPowerOf2_32(elts)) |
5861 | return false; |
5862 | if (totalSize.getQuantity() != 8 && |
5863 | (totalSize.getQuantity() != 16 || elts == 1)) |
5864 | return false; |
5865 | return true; |
5866 | } |
5867 | |
5868 | bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
5869 | |
5870 | |
5871 | |
5872 | |
5873 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
5874 | if (BT->isFloatingPoint()) |
5875 | return true; |
5876 | } else if (const VectorType *VT = Ty->getAs<VectorType>()) { |
5877 | unsigned VecSize = getContext().getTypeSize(VT); |
5878 | if (VecSize == 64 || VecSize == 128) |
5879 | return true; |
5880 | } |
5881 | return false; |
5882 | } |
5883 | |
5884 | bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, |
5885 | uint64_t Members) const { |
5886 | return Members <= 4; |
5887 | } |
5888 | |
5889 | Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty, |
5890 | CodeGenFunction &CGF) const { |
5891 | ABIArgInfo AI = classifyArgumentType(Ty, true, |
5892 | CGF.CurFnInfo->getCallingConvention()); |
5893 | bool IsIndirect = AI.isIndirect(); |
5894 | |
5895 | llvm::Type *BaseTy = CGF.ConvertType(Ty); |
5896 | if (IsIndirect) |
5897 | BaseTy = llvm::PointerType::getUnqual(BaseTy); |
5898 | else if (AI.getCoerceToType()) |
5899 | BaseTy = AI.getCoerceToType(); |
5900 | |
5901 | unsigned NumRegs = 1; |
5902 | if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { |
5903 | BaseTy = ArrTy->getElementType(); |
5904 | NumRegs = ArrTy->getNumElements(); |
5905 | } |
5906 | bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); |
5907 | |
5908 | |
5909 | |
5910 | |
5911 | |
5912 | |
5913 | |
5914 | |
5915 | |
5916 | |
5917 | |
5918 | |
5919 | llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); |
5920 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); |
5921 | llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); |
5922 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); |
5923 | |
5924 | CharUnits TySize = getContext().getTypeSizeInChars(Ty); |
5925 | CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty); |
5926 | |
5927 | Address reg_offs_p = Address::invalid(); |
5928 | llvm::Value *reg_offs = nullptr; |
5929 | int reg_top_index; |
5930 | int RegSize = IsIndirect ? 8 : TySize.getQuantity(); |
5931 | if (!IsFPR) { |
5932 | |
5933 | reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); |
5934 | reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); |
5935 | reg_top_index = 1; |
5936 | RegSize = llvm::alignTo(RegSize, 8); |
5937 | } else { |
5938 | |
5939 | reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); |
5940 | reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); |
5941 | reg_top_index = 2; |
5942 | RegSize = 16 * NumRegs; |
5943 | } |
5944 | |
5945 | |
5946 | |
5947 | |
5948 | |
5949 | |
5950 | |
5951 | |
5952 | |
5953 | llvm::Value *UsingStack = nullptr; |
5954 | UsingStack = CGF.Builder.CreateICmpSGE( |
5955 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); |
5956 | |
5957 | CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); |
5958 | |
5959 | |
5960 | |
5961 | CGF.EmitBlock(MaybeRegBlock); |
5962 | |
5963 | |
5964 | |
5965 | |
5966 | if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { |
5967 | int Align = TyAlign.getQuantity(); |
5968 | |
5969 | reg_offs = CGF.Builder.CreateAdd( |
5970 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), |
5971 | "align_regoffs"); |
5972 | reg_offs = CGF.Builder.CreateAnd( |
5973 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), |
5974 | "aligned_regoffs"); |
5975 | } |
5976 | |
5977 | |
5978 | |
5979 | |
5980 | |
5981 | llvm::Value *NewOffset = nullptr; |
5982 | NewOffset = CGF.Builder.CreateAdd( |
5983 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); |
5984 | CGF.Builder.CreateStore(NewOffset, reg_offs_p); |
5985 | |
5986 | |
5987 | |
5988 | llvm::Value *InRegs = nullptr; |
5989 | InRegs = CGF.Builder.CreateICmpSLE( |
5990 | NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); |
5991 | |
5992 | CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); |
5993 | |
5994 | |
5995 | |
5996 | |
5997 | |
5998 | |
5999 | |
6000 | CGF.EmitBlock(InRegBlock); |
6001 | |
6002 | llvm::Value *reg_top = nullptr; |
6003 | Address reg_top_p = |
6004 | CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); |
6005 | reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); |
6006 | Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs), |
6007 | CharUnits::fromQuantity(IsFPR ? 16 : 8)); |
6008 | Address RegAddr = Address::invalid(); |
6009 | llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); |
6010 | |
6011 | if (IsIndirect) { |
6012 | |
6013 | |
6014 | MemTy = llvm::PointerType::getUnqual(MemTy); |
6015 | } |
6016 | |
6017 | const Type *Base = nullptr; |
6018 | uint64_t NumMembers = 0; |
6019 | bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); |
6020 | if (IsHFA && NumMembers > 1) { |
6021 | |
6022 | |
6023 | |
6024 | |
6025 | assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); |
6026 | auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); |
6027 | llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); |
6028 | llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); |
6029 | Address Tmp = CGF.CreateTempAlloca(HFATy, |
6030 | std::max(TyAlign, BaseTyInfo.Align)); |
6031 | |
6032 | |
6033 | int Offset = 0; |
6034 | if (CGF.CGM.getDataLayout().isBigEndian() && |
6035 | BaseTyInfo.Width.getQuantity() < 16) |
6036 | Offset = 16 - BaseTyInfo.Width.getQuantity(); |
6037 | |
6038 | for (unsigned i = 0; i < NumMembers; ++i) { |
6039 | CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); |
6040 | Address LoadAddr = |
6041 | CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); |
6042 | LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); |
6043 | |
6044 | Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); |
6045 | |
6046 | llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); |
6047 | CGF.Builder.CreateStore(Elem, StoreAddr); |
6048 | } |
6049 | |
6050 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); |
6051 | } else { |
6052 | |
6053 | |
6054 | |
6055 | CharUnits SlotSize = BaseAddr.getAlignment(); |
6056 | if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && |
6057 | (IsHFA || !isAggregateTypeForABI(Ty)) && |
6058 | TySize < SlotSize) { |
6059 | CharUnits Offset = SlotSize - TySize; |
6060 | BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); |
6061 | } |
6062 | |
6063 | RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); |
6064 | } |
6065 | |
6066 | CGF.EmitBranch(ContBlock); |
6067 | |
6068 | |
6069 | |
6070 | |
6071 | CGF.EmitBlock(OnStackBlock); |
6072 | |
6073 | Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); |
6074 | llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); |
6075 | |
6076 | |
6077 | |
6078 | if (!IsIndirect && TyAlign.getQuantity() > 8) { |
6079 | int Align = TyAlign.getQuantity(); |
6080 | |
6081 | OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); |
6082 | |
6083 | OnStackPtr = CGF.Builder.CreateAdd( |
6084 | OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), |
6085 | "align_stack"); |
6086 | OnStackPtr = CGF.Builder.CreateAnd( |
6087 | OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), |
6088 | "align_stack"); |
6089 | |
6090 | OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); |
6091 | } |
6092 | Address OnStackAddr(OnStackPtr, |
6093 | std::max(CharUnits::fromQuantity(8), TyAlign)); |
6094 | |
6095 | |
6096 | CharUnits StackSlotSize = CharUnits::fromQuantity(8); |
6097 | CharUnits StackSize; |
6098 | if (IsIndirect) |
6099 | StackSize = StackSlotSize; |
6100 | else |
6101 | StackSize = TySize.alignTo(StackSlotSize); |
6102 | |
6103 | llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); |
6104 | llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP( |
6105 | CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack"); |
6106 | |
6107 | |
6108 | CGF.Builder.CreateStore(NewStack, stack_p); |
6109 | |
6110 | if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && |
6111 | TySize < StackSlotSize) { |
6112 | CharUnits Offset = StackSlotSize - TySize; |
6113 | OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); |
6114 | } |
6115 | |
6116 | OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); |
6117 | |
6118 | CGF.EmitBranch(ContBlock); |
6119 | |
6120 | |
6121 | |
6122 | |
6123 | CGF.EmitBlock(ContBlock); |
6124 | |
6125 | Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, |
6126 | OnStackAddr, OnStackBlock, "vaargs.addr"); |
6127 | |
6128 | if (IsIndirect) |
6129 | return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), |
6130 | TyAlign); |
6131 | |
6132 | return ResAddr; |
6133 | } |
6134 | |
6135 | Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, |
6136 | CodeGenFunction &CGF) const { |
6137 | |
6138 | |
6139 | |
6140 | if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) |
6141 | return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); |
6142 | |
6143 | uint64_t PointerSize = getTarget().getPointerWidth(0) / 8; |
6144 | CharUnits SlotSize = CharUnits::fromQuantity(PointerSize); |
6145 | |
6146 | |
6147 | if (isEmptyRecord(getContext(), Ty, true)) { |
6148 | Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); |
6149 | Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); |
6150 | return Addr; |
6151 | } |
6152 | |
6153 | |
6154 | |
6155 | auto TyInfo = getContext().getTypeInfoInChars(Ty); |
6156 | |
6157 | |
6158 | |
6159 | bool IsIndirect = false; |
6160 | if (TyInfo.Width.getQuantity() > 16) { |
6161 | const Type *Base = nullptr; |
6162 | uint64_t Members = 0; |
6163 | IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); |
6164 | } |
6165 | |
6166 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, |
6167 | TyInfo, SlotSize, true); |
6168 | } |
6169 | |
6170 | Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
6171 | QualType Ty) const { |
6172 | bool IsIndirect = false; |
6173 | |
6174 | |
6175 | if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128) |
6176 | IsIndirect = true; |
6177 | |
6178 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, |
6179 | CGF.getContext().getTypeInfoInChars(Ty), |
6180 | CharUnits::fromQuantity(8), |
6181 | false); |
6182 | } |
6183 | |
6184 | |
6185 | |
6186 | |
6187 | |
6188 | namespace { |
6189 | |
6190 | class ARMABIInfo : public SwiftABIInfo { |
6191 | public: |
6192 | enum ABIKind { |
6193 | APCS = 0, |
6194 | AAPCS = 1, |
6195 | AAPCS_VFP = 2, |
6196 | AAPCS16_VFP = 3, |
6197 | }; |
6198 | |
6199 | private: |
6200 | ABIKind Kind; |
6201 | bool IsFloatABISoftFP; |
6202 | |
6203 | public: |
6204 | ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) |
6205 | : SwiftABIInfo(CGT), Kind(_Kind) { |
6206 | setCCs(); |
6207 | IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" || |
6208 | CGT.getCodeGenOpts().FloatABI == ""; |
6209 | } |
6210 | |
6211 | bool isEABI() const { |
6212 | switch (getTarget().getTriple().getEnvironment()) { |
6213 | case llvm::Triple::Android: |
6214 | case llvm::Triple::EABI: |
6215 | case llvm::Triple::EABIHF: |
6216 | case llvm::Triple::GNUEABI: |
6217 | case llvm::Triple::GNUEABIHF: |
6218 | case llvm::Triple::MuslEABI: |
6219 | case llvm::Triple::MuslEABIHF: |
6220 | return true; |
6221 | default: |
6222 | return false; |
6223 | } |
6224 | } |
6225 | |
6226 | bool isEABIHF() const { |
6227 | switch (getTarget().getTriple().getEnvironment()) { |
6228 | case llvm::Triple::EABIHF: |
6229 | case llvm::Triple::GNUEABIHF: |
6230 | case llvm::Triple::MuslEABIHF: |
6231 | return true; |
6232 | default: |
6233 | return false; |
6234 | } |
6235 | } |
6236 | |
6237 | ABIKind getABIKind() const { return Kind; } |
6238 | |
6239 | bool allowBFloatArgsAndRet() const override { |
6240 | return !IsFloatABISoftFP && getTarget().hasBFloat16Type(); |
6241 | } |
6242 | |
6243 | private: |
6244 | ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, |
6245 | unsigned functionCallConv) const; |
6246 | ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, |
6247 | unsigned functionCallConv) const; |
6248 | ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, |
6249 | uint64_t Members) const; |
6250 | ABIArgInfo coerceIllegalVector(QualType Ty) const; |
6251 | bool isIllegalVectorType(QualType Ty) const; |
6252 | bool containsAnyFP16Vectors(QualType Ty) const; |
6253 | |
6254 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; |
6255 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
6256 | uint64_t Members) const override; |
6257 | |
6258 | bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; |
6259 | |
6260 | void computeInfo(CGFunctionInfo &FI) const override; |
6261 | |
6262 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
6263 | QualType Ty) const override; |
6264 | |
6265 | llvm::CallingConv::ID getLLVMDefaultCC() const; |
6266 | llvm::CallingConv::ID getABIDefaultCC() const; |
6267 | void setCCs(); |
6268 | |
6269 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
6270 | bool asReturnValue) const override { |
6271 | return occupiesMoreThan(CGT, scalars, 4); |
6272 | } |
6273 | bool isSwiftErrorInRegister() const override { |
6274 | return true; |
6275 | } |
6276 | bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, |
6277 | unsigned elts) const override; |
6278 | }; |
6279 | |
6280 | class ARMTargetCodeGenInfo : public TargetCodeGenInfo { |
6281 | public: |
6282 | ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) |
6283 | : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {} |
6284 | |
6285 | const ARMABIInfo &getABIInfo() const { |
6286 | return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); |
6287 | } |
6288 | |
6289 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
6290 | return 13; |
6291 | } |
6292 | |
6293 | StringRef getARCRetainAutoreleasedReturnValueMarker() const override { |
6294 | return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; |
6295 | } |
6296 | |
6297 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
6298 | llvm::Value *Address) const override { |
6299 | llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); |
6300 | |
6301 | |
6302 | AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); |
6303 | return false; |
6304 | } |
6305 | |
6306 | unsigned getSizeOfUnwindException() const override { |
6307 | if (getABIInfo().isEABI()) return 88; |
6308 | return TargetCodeGenInfo::getSizeOfUnwindException(); |
6309 | } |
6310 | |
6311 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
6312 | CodeGen::CodeGenModule &CGM) const override { |
6313 | if (GV->isDeclaration()) |
6314 | return; |
6315 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
6316 | if (!FD) |
6317 | return; |
6318 | |
6319 | const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); |
6320 | if (!Attr) |
6321 | return; |
6322 | |
6323 | const char *Kind; |
6324 | switch (Attr->getInterrupt()) { |
6325 | case ARMInterruptAttr::Generic: Kind = ""; break; |
6326 | case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; |
6327 | case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; |
6328 | case ARMInterruptAttr::SWI: Kind = "SWI"; break; |
6329 | case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; |
6330 | case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; |
6331 | } |
6332 | |
6333 | llvm::Function *Fn = cast<llvm::Function>(GV); |
6334 | |
6335 | Fn->addFnAttr("interrupt", Kind); |
6336 | |
6337 | ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); |
6338 | if (ABI == ARMABIInfo::APCS) |
6339 | return; |
6340 | |
6341 | |
6342 | |
6343 | |
6344 | llvm::AttrBuilder B; |
6345 | B.addStackAlignmentAttr(8); |
6346 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); |
6347 | } |
6348 | }; |
6349 | |
6350 | class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { |
6351 | public: |
6352 | WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) |
6353 | : ARMTargetCodeGenInfo(CGT, K) {} |
6354 | |
6355 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
6356 | CodeGen::CodeGenModule &CGM) const override; |
6357 | |
6358 | void getDependentLibraryOption(llvm::StringRef Lib, |
6359 | llvm::SmallString<24> &Opt) const override { |
6360 | Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); |
6361 | } |
6362 | |
6363 | void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, |
6364 | llvm::SmallString<32> &Opt) const override { |
6365 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; |
6366 | } |
6367 | }; |
6368 | |
6369 | void WindowsARMTargetCodeGenInfo::setTargetAttributes( |
6370 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
6371 | ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
6372 | if (GV->isDeclaration()) |
6373 | return; |
6374 | addStackProbeTargetAttributes(D, GV, CGM); |
6375 | } |
6376 | } |
6377 | |
6378 | void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { |
6379 | if (!::classifyReturnType(getCXXABI(), FI, *this)) |
6380 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), |
6381 | FI.getCallingConvention()); |
6382 | |
6383 | for (auto &I : FI.arguments()) |
6384 | I.info = classifyArgumentType(I.type, FI.isVariadic(), |
6385 | FI.getCallingConvention()); |
6386 | |
6387 | |
6388 | |
6389 | if (FI.getCallingConvention() != llvm::CallingConv::C) |
6390 | return; |
6391 | |
6392 | llvm::CallingConv::ID cc = getRuntimeCC(); |
6393 | if (cc != llvm::CallingConv::C) |
6394 | FI.setEffectiveCallingConvention(cc); |
6395 | } |
6396 | |
6397 | |
6398 | llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { |
6399 | |
6400 | if (isEABIHF() || getTarget().getTriple().isWatchABI()) |
6401 | return llvm::CallingConv::ARM_AAPCS_VFP; |
6402 | else if (isEABI()) |
6403 | return llvm::CallingConv::ARM_AAPCS; |
6404 | else |
6405 | return llvm::CallingConv::ARM_APCS; |
6406 | } |
6407 | |
6408 | |
6409 | |
6410 | llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { |
6411 | switch (getABIKind()) { |
6412 | case APCS: return llvm::CallingConv::ARM_APCS; |
6413 | case AAPCS: return llvm::CallingConv::ARM_AAPCS; |
6414 | case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; |
6415 | case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; |
6416 | } |
6417 | llvm_unreachable("bad ABI kind"); |
6418 | } |
6419 | |
6420 | void ARMABIInfo::setCCs() { |
6421 | assert(getRuntimeCC() == llvm::CallingConv::C); |
6422 | |
6423 | |
6424 | |
6425 | llvm::CallingConv::ID abiCC = getABIDefaultCC(); |
6426 | if (abiCC != getLLVMDefaultCC()) |
6427 | RuntimeCC = abiCC; |
6428 | } |
6429 | |
6430 | ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { |
6431 | uint64_t Size = getContext().getTypeSize(Ty); |
6432 | if (Size <= 32) { |
6433 | llvm::Type *ResType = |
6434 | llvm::Type::getInt32Ty(getVMContext()); |
6435 | return ABIArgInfo::getDirect(ResType); |
6436 | } |
6437 | if (Size == 64 || Size == 128) { |
6438 | auto *ResType = llvm::FixedVectorType::get( |
6439 | llvm::Type::getInt32Ty(getVMContext()), Size / 32); |
6440 | return ABIArgInfo::getDirect(ResType); |
6441 | } |
6442 | return getNaturalAlignIndirect(Ty, false); |
6443 | } |
6444 | |
6445 | ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, |
6446 | const Type *Base, |
6447 | uint64_t Members) const { |
6448 | assert(Base && "Base class should be set for homogeneous aggregate"); |
6449 | |
6450 | if (const VectorType *VT = Base->getAs<VectorType>()) { |
6451 | |
6452 | if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) { |
6453 | uint64_t Size = getContext().getTypeSize(VT); |
6454 | auto *NewVecTy = llvm::FixedVectorType::get( |
6455 | llvm::Type::getInt32Ty(getVMContext()), Size / 32); |
6456 | llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members); |
6457 | return ABIArgInfo::getDirect(Ty, 0, nullptr, false); |
6458 | } |
6459 | } |
6460 | unsigned Align = 0; |
6461 | if (getABIKind() == ARMABIInfo::AAPCS || |
6462 | getABIKind() == ARMABIInfo::AAPCS_VFP) { |
6463 | |
6464 | |
6465 | Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); |
6466 | unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity(); |
6467 | Align = (Align > BaseAlign && Align >= 8) ? 8 : 0; |
6468 | } |
6469 | return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align); |
6470 | } |
6471 | |
6472 | ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, |
6473 | unsigned functionCallConv) const { |
6474 | |
6475 | |
6476 | |
6477 | |
6478 | |
6479 | |
6480 | |
6481 | |
6482 | bool IsAAPCS_VFP = |
6483 | !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, false); |
6484 | |
6485 | Ty = useFirstFieldIfTransparentUnion(Ty); |
6486 | |
6487 | |
6488 | if (isIllegalVectorType(Ty)) |
6489 | return coerceIllegalVector(Ty); |
6490 | |
6491 | if (!isAggregateTypeForABI(Ty)) { |
6492 | |
6493 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { |
6494 | Ty = EnumTy->getDecl()->getIntegerType(); |
6495 | } |
6496 | |
6497 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
6498 | if (EIT->getNumBits() > 64) |
6499 | return getNaturalAlignIndirect(Ty, true); |
6500 | |
6501 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
6502 | : ABIArgInfo::getDirect()); |
6503 | } |
6504 | |
6505 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { |
6506 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
6507 | } |
6508 | |
6509 | |
6510 | if (isEmptyRecord(getContext(), Ty, true)) |
6511 | return ABIArgInfo::getIgnore(); |
6512 | |
6513 | if (IsAAPCS_VFP) { |
6514 | |
6515 | |
6516 | const Type *Base = nullptr; |
6517 | uint64_t Members = 0; |
6518 | if (isHomogeneousAggregate(Ty, Base, Members)) |
6519 | return classifyHomogeneousAggregate(Ty, Base, Members); |
6520 | } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { |
6521 | |
6522 | |
6523 | |
6524 | const Type *Base = nullptr; |
6525 | uint64_t Members = 0; |
6526 | if (isHomogeneousAggregate(Ty, Base, Members)) { |
6527 | assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); |
6528 | llvm::Type *Ty = |
6529 | llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); |
6530 | return ABIArgInfo::getDirect(Ty, 0, nullptr, false); |
6531 | } |
6532 | } |
6533 | |
6534 | if (getABIKind() == ARMABIInfo::AAPCS16_VFP && |
6535 | getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { |
6536 | |
6537 | |
6538 | |
6539 | return ABIArgInfo::getIndirect( |
6540 | CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); |
6541 | } |
6542 | |
6543 | |
6544 | |
6545 | |
6546 | |
6547 | uint64_t ABIAlign = 4; |
6548 | uint64_t TyAlign; |
6549 | if (getABIKind() == ARMABIInfo::AAPCS_VFP || |
6550 | getABIKind() == ARMABIInfo::AAPCS) { |
6551 | TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); |
6552 | ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); |
6553 | } else { |
6554 | TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); |
6555 | } |
6556 | if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { |
6557 | assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"); |
6558 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), |
6559 | true, |
6560 | TyAlign > ABIAlign); |
6561 | } |
6562 | |
6563 | |
6564 | |
6565 | if (getTarget().isRenderScriptTarget()) { |
6566 | return coerceToIntArray(Ty, getContext(), getVMContext()); |
6567 | } |
6568 | |
6569 | |
6570 | llvm::Type* ElemTy; |
6571 | unsigned SizeRegs; |
6572 | |
6573 | |
6574 | if (TyAlign <= 4) { |
6575 | ElemTy = llvm::Type::getInt32Ty(getVMContext()); |
6576 | SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; |
6577 | } else { |
6578 | ElemTy = llvm::Type::getInt64Ty(getVMContext()); |
6579 | SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; |
6580 | } |
6581 | |
6582 | return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); |
6583 | } |
6584 | |
6585 | static bool isIntegerLikeType(QualType Ty, ASTContext &Context, |
6586 | llvm::LLVMContext &VMContext) { |
6587 | |
6588 | |
6589 | |
6590 | |
6591 | uint64_t Size = Context.getTypeSize(Ty); |
6592 | |
6593 | |
6594 | if (Size > 32) |
6595 | return false; |
6596 | |
6597 | |
6598 | if (Ty->isVectorType()) |
6599 | return false; |
6600 | |
6601 | |
6602 | if (Ty->isRealFloatingType()) |
6603 | return false; |
6604 | |
6605 | |
6606 | if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) |
6607 | return true; |
6608 | |
6609 | |
6610 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) |
6611 | return isIntegerLikeType(CT->getElementType(), Context, VMContext); |
6612 | |
6613 | |
6614 | |
6615 | |
6616 | |
6617 | const RecordType *RT = Ty->getAs<RecordType>(); |
6618 | if (!RT) return false; |
6619 | |
6620 | |
6621 | const RecordDecl *RD = RT->getDecl(); |
6622 | if (RD->hasFlexibleArrayMember()) |
6623 | return false; |
6624 | |
6625 | |
6626 | |
6627 | const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); |
6628 | |
6629 | bool HadField = false; |
6630 | unsigned idx = 0; |
6631 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); |
6632 | i != e; ++i, ++idx) { |
6633 | const FieldDecl *FD = *i; |
6634 | |
6635 | |
6636 | |
6637 | |
6638 | |
6639 | if (FD->isBitField()) { |
6640 | if (!RD->isUnion()) |
6641 | HadField = true; |
6642 | |
6643 | if (!isIntegerLikeType(FD->getType(), Context, VMContext)) |
6644 | return false; |
6645 | |
6646 | continue; |
6647 | } |
6648 | |
6649 | |
6650 | if (Layout.getFieldOffset(idx) != 0) |
6651 | return false; |
6652 | |
6653 | if (!isIntegerLikeType(FD->getType(), Context, VMContext)) |
6654 | return false; |
6655 | |
6656 | |
6657 | |
6658 | |
6659 | if (!RD->isUnion()) { |
6660 | if (HadField) |
6661 | return false; |
6662 | |
6663 | HadField = true; |
6664 | } |
6665 | } |
6666 | |
6667 | return true; |
6668 | } |
6669 | |
6670 | ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, |
6671 | unsigned functionCallConv) const { |
6672 | |
6673 | |
6674 | bool IsAAPCS_VFP = |
6675 | !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, true); |
6676 | |
6677 | if (RetTy->isVoidType()) |
6678 | return ABIArgInfo::getIgnore(); |
6679 | |
6680 | if (const VectorType *VT = RetTy->getAs<VectorType>()) { |
6681 | |
6682 | if (getContext().getTypeSize(RetTy) > 128) |
6683 | return getNaturalAlignIndirect(RetTy); |
6684 | |
6685 | |
6686 | if ((!getTarget().hasLegalHalfType() && |
6687 | (VT->getElementType()->isFloat16Type() || |
6688 | VT->getElementType()->isHalfType())) || |
6689 | (IsFloatABISoftFP && |
6690 | VT->getElementType()->isBFloat16Type())) |
6691 | return coerceIllegalVector(RetTy); |
6692 | } |
6693 | |
6694 | if (!isAggregateTypeForABI(RetTy)) { |
6695 | |
6696 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
6697 | RetTy = EnumTy->getDecl()->getIntegerType(); |
6698 | |
6699 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
6700 | if (EIT->getNumBits() > 64) |
6701 | return getNaturalAlignIndirect(RetTy, false); |
6702 | |
6703 | return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
6704 | : ABIArgInfo::getDirect(); |
6705 | } |
6706 | |
6707 | |
6708 | if (getABIKind() == APCS) { |
6709 | if (isEmptyRecord(getContext(), RetTy, false)) |
6710 | return ABIArgInfo::getIgnore(); |
6711 | |
6712 | |
6713 | |
6714 | |
6715 | |
6716 | if (RetTy->isAnyComplexType()) |
6717 | return ABIArgInfo::getDirect(llvm::IntegerType::get( |
6718 | getVMContext(), getContext().getTypeSize(RetTy))); |
6719 | |
6720 | |
6721 | if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { |
6722 | |
6723 | uint64_t Size = getContext().getTypeSize(RetTy); |
6724 | if (Size <= 8) |
6725 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); |
6726 | if (Size <= 16) |
6727 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); |
6728 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); |
6729 | } |
6730 | |
6731 | |
6732 | return getNaturalAlignIndirect(RetTy); |
6733 | } |
6734 | |
6735 | |
6736 | |
6737 | if (isEmptyRecord(getContext(), RetTy, true)) |
6738 | return ABIArgInfo::getIgnore(); |
6739 | |
6740 | |
6741 | if (IsAAPCS_VFP) { |
6742 | const Type *Base = nullptr; |
6743 | uint64_t Members = 0; |
6744 | if (isHomogeneousAggregate(RetTy, Base, Members)) |
6745 | return classifyHomogeneousAggregate(RetTy, Base, Members); |
6746 | } |
6747 | |
6748 | |
6749 | |
6750 | uint64_t Size = getContext().getTypeSize(RetTy); |
6751 | if (Size <= 32) { |
6752 | |
6753 | |
6754 | if (getTarget().isRenderScriptTarget()) { |
6755 | return coerceToIntArray(RetTy, getContext(), getVMContext()); |
6756 | } |
6757 | if (getDataLayout().isBigEndian()) |
6758 | |
6759 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); |
6760 | |
6761 | |
6762 | if (Size <= 8) |
6763 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); |
6764 | if (Size <= 16) |
6765 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); |
6766 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); |
6767 | } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { |
6768 | llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); |
6769 | llvm::Type *CoerceTy = |
6770 | llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); |
6771 | return ABIArgInfo::getDirect(CoerceTy); |
6772 | } |
6773 | |
6774 | return getNaturalAlignIndirect(RetTy); |
6775 | } |
6776 | |
6777 | |
6778 | bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { |
6779 | if (const VectorType *VT = Ty->getAs<VectorType> ()) { |
6780 | |
6781 | |
6782 | |
6783 | |
6784 | |
6785 | |
6786 | if ((!getTarget().hasLegalHalfType() && |
6787 | (VT->getElementType()->isFloat16Type() || |
6788 | VT->getElementType()->isHalfType())) || |
6789 | (IsFloatABISoftFP && |
6790 | VT->getElementType()->isBFloat16Type())) |
6791 | return true; |
6792 | if (isAndroid()) { |
6793 | |
6794 | |
6795 | |
6796 | |
6797 | |
6798 | unsigned NumElements = VT->getNumElements(); |
6799 | |
6800 | if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) |
6801 | return true; |
6802 | } else { |
6803 | |
6804 | unsigned NumElements = VT->getNumElements(); |
6805 | uint64_t Size = getContext().getTypeSize(VT); |
6806 | |
6807 | if (!llvm::isPowerOf2_32(NumElements)) |
6808 | return true; |
6809 | |
6810 | return Size <= 32; |
6811 | } |
6812 | } |
6813 | return false; |
6814 | } |
6815 | |
6816 | |
6817 | bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const { |
6818 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { |
6819 | uint64_t NElements = AT->getSize().getZExtValue(); |
6820 | if (NElements == 0) |
6821 | return false; |
6822 | return containsAnyFP16Vectors(AT->getElementType()); |
6823 | } else if (const RecordType *RT = Ty->getAs<RecordType>()) { |
6824 | const RecordDecl *RD = RT->getDecl(); |
6825 | |
6826 | |
6827 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) |
6828 | if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) { |
6829 | return containsAnyFP16Vectors(B.getType()); |
6830 | })) |
6831 | return true; |
6832 | |
6833 | if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) { |
6834 | return FD && containsAnyFP16Vectors(FD->getType()); |
6835 | })) |
6836 | return true; |
6837 | |
6838 | return false; |
6839 | } else { |
6840 | if (const VectorType *VT = Ty->getAs<VectorType>()) |
6841 | return (VT->getElementType()->isFloat16Type() || |
6842 | VT->getElementType()->isBFloat16Type() || |
6843 | VT->getElementType()->isHalfType()); |
6844 | return false; |
6845 | } |
6846 | } |
6847 | |
6848 | bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, |
6849 | llvm::Type *eltTy, |
6850 | unsigned numElts) const { |
6851 | if (!llvm::isPowerOf2_32(numElts)) |
6852 | return false; |
6853 | unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy); |
6854 | if (size > 64) |
6855 | return false; |
6856 | if (vectorSize.getQuantity() != 8 && |
6857 | (vectorSize.getQuantity() != 16 || numElts == 1)) |
6858 | return false; |
6859 | return true; |
6860 | } |
6861 | |
6862 | bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
6863 | |
6864 | |
6865 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
6866 | if (BT->getKind() == BuiltinType::Float || |
6867 | BT->getKind() == BuiltinType::Double || |
6868 | BT->getKind() == BuiltinType::LongDouble) |
6869 | return true; |
6870 | } else if (const VectorType *VT = Ty->getAs<VectorType>()) { |
6871 | unsigned VecSize = getContext().getTypeSize(VT); |
6872 | if (VecSize == 64 || VecSize == 128) |
6873 | return true; |
6874 | } |
6875 | return false; |
6876 | } |
6877 | |
6878 | bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, |
6879 | uint64_t Members) const { |
6880 | return Members <= 4; |
6881 | } |
6882 | |
6883 | bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, |
6884 | bool acceptHalf) const { |
6885 | |
6886 | if (callConvention != llvm::CallingConv::C) |
6887 | return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); |
6888 | else |
6889 | return (getABIKind() == AAPCS_VFP) || |
6890 | (acceptHalf && (getABIKind() == AAPCS16_VFP)); |
6891 | } |
6892 | |
6893 | Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
6894 | QualType Ty) const { |
6895 | CharUnits SlotSize = CharUnits::fromQuantity(4); |
6896 | |
6897 | |
6898 | if (isEmptyRecord(getContext(), Ty, true)) { |
6899 | Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); |
6900 | Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); |
6901 | return Addr; |
6902 | } |
6903 | |
6904 | CharUnits TySize = getContext().getTypeSizeInChars(Ty); |
6905 | CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty); |
6906 | |
6907 | |
6908 | bool IsIndirect = false; |
6909 | const Type *Base = nullptr; |
6910 | uint64_t Members = 0; |
6911 | if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { |
6912 | IsIndirect = true; |
6913 | |
6914 | |
6915 | |
6916 | } else if (TySize > CharUnits::fromQuantity(16) && |
6917 | getABIKind() == ARMABIInfo::AAPCS16_VFP && |
6918 | !isHomogeneousAggregate(Ty, Base, Members)) { |
6919 | IsIndirect = true; |
6920 | |
6921 | |
6922 | |
6923 | |
6924 | |
6925 | } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || |
6926 | getABIKind() == ARMABIInfo::AAPCS) { |
6927 | TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); |
6928 | TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); |
6929 | } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { |
6930 | |
6931 | TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); |
6932 | TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); |
6933 | } else { |
6934 | TyAlignForABI = CharUnits::fromQuantity(4); |
6935 | } |
6936 | |
6937 | TypeInfoChars TyInfo(TySize, TyAlignForABI, false); |
6938 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, |
6939 | SlotSize, true); |
6940 | } |
6941 | |
6942 | |
6943 | |
6944 | |
6945 | |
6946 | namespace { |
6947 | |
6948 | class NVPTXTargetCodeGenInfo; |
6949 | |
6950 | class NVPTXABIInfo : public ABIInfo { |
6951 | NVPTXTargetCodeGenInfo &CGInfo; |
6952 | |
6953 | public: |
6954 | NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info) |
6955 | : ABIInfo(CGT), CGInfo(Info) {} |
6956 | |
6957 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
6958 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
6959 | |
6960 | void computeInfo(CGFunctionInfo &FI) const override; |
6961 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
6962 | QualType Ty) const override; |
6963 | bool isUnsupportedType(QualType T) const; |
6964 | ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const; |
6965 | }; |
6966 | |
6967 | class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { |
6968 | public: |
6969 | NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) |
6970 | : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {} |
6971 | |
6972 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
6973 | CodeGen::CodeGenModule &M) const override; |
6974 | bool shouldEmitStaticExternCAliases() const override; |
6975 | |
6976 | llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override { |
6977 | |
6978 | |
6979 | return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); |
6980 | } |
6981 | |
6982 | llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override { |
6983 | |
6984 | |
6985 | return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); |
6986 | } |
6987 | |
6988 | bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst, |
6989 | LValue Src) const override { |
6990 | emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); |
6991 | return true; |
6992 | } |
6993 | |
6994 | bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst, |
6995 | LValue Src) const override { |
6996 | emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); |
6997 | return true; |
6998 | } |
6999 | |
7000 | private: |
7001 | |
7002 | |
7003 | static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name, |
7004 | int Operand); |
7005 | |
7006 | static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst, |
7007 | LValue Src) { |
7008 | llvm::Value *Handle = nullptr; |
7009 | llvm::Constant *C = |
7010 | llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer()); |
7011 | |
7012 | if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C)) |
7013 | C = llvm::cast<llvm::Constant>(ASC->getPointerOperand()); |
7014 | if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) { |
7015 | |
7016 | |
7017 | Handle = CGF.EmitRuntimeCall( |
7018 | CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal, |
7019 | {GV->getType()}), |
7020 | {GV}, "texsurf_handle"); |
7021 | } else |
7022 | Handle = CGF.EmitLoadOfScalar(Src, SourceLocation()); |
7023 | CGF.EmitStoreOfScalar(Handle, Dst); |
7024 | } |
7025 | }; |
7026 | |
7027 | |
7028 | bool NVPTXABIInfo::isUnsupportedType(QualType T) const { |
7029 | ASTContext &Context = getContext(); |
7030 | if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type()) |
7031 | return true; |
7032 | if (!Context.getTargetInfo().hasFloat128Type() && |
7033 | (T->isFloat128Type() || |
7034 | (T->isRealFloatingType() && Context.getTypeSize(T) == 128))) |
7035 | return true; |
7036 | if (const auto *EIT = T->getAs<ExtIntType>()) |
7037 | return EIT->getNumBits() > |
7038 | (Context.getTargetInfo().hasInt128Type() ? 128U : 64U); |
7039 | if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() && |
7040 | Context.getTypeSize(T) > 64U) |
7041 | return true; |
7042 | if (const auto *AT = T->getAsArrayTypeUnsafe()) |
7043 | return isUnsupportedType(AT->getElementType()); |
7044 | const auto *RT = T->getAs<RecordType>(); |
7045 | if (!RT) |
7046 | return false; |
7047 | const RecordDecl *RD = RT->getDecl(); |
7048 | |
7049 | |
7050 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) |
7051 | for (const CXXBaseSpecifier &I : CXXRD->bases()) |
7052 | if (isUnsupportedType(I.getType())) |
7053 | return true; |
7054 | |
7055 | for (const FieldDecl *I : RD->fields()) |
7056 | if (isUnsupportedType(I->getType())) |
7057 | return true; |
7058 | return false; |
7059 | } |
7060 | |
7061 | |
7062 | ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty, |
7063 | unsigned MaxSize) const { |
7064 | |
7065 | const uint64_t Size = getContext().getTypeSize(Ty); |
7066 | const uint64_t Alignment = getContext().getTypeAlign(Ty); |
7067 | const unsigned Div = std::min<unsigned>(MaxSize, Alignment); |
7068 | llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div); |
7069 | const uint64_t NumElements = (Size + Div - 1) / Div; |
7070 | return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); |
7071 | } |
7072 | |
7073 | ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { |
7074 | if (RetTy->isVoidType()) |
7075 | return ABIArgInfo::getIgnore(); |
7076 | |
7077 | if (getContext().getLangOpts().OpenMP && |
7078 | getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy)) |
7079 | return coerceToIntArrayWithLimit(RetTy, 64); |
7080 | |
7081 | |
7082 | if (!RetTy->isScalarType()) |
7083 | return ABIArgInfo::getDirect(); |
7084 | |
7085 | |
7086 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
7087 | RetTy = EnumTy->getDecl()->getIntegerType(); |
7088 | |
7089 | return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
7090 | : ABIArgInfo::getDirect()); |
7091 | } |
7092 | |
7093 | ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { |
7094 | |
7095 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
7096 | Ty = EnumTy->getDecl()->getIntegerType(); |
7097 | |
7098 | |
7099 | if (isAggregateTypeForABI(Ty)) { |
7100 | |
7101 | |
7102 | if (getContext().getLangOpts().CUDAIsDevice) { |
7103 | if (Ty->isCUDADeviceBuiltinSurfaceType()) |
7104 | return ABIArgInfo::getDirect( |
7105 | CGInfo.getCUDADeviceBuiltinSurfaceDeviceType()); |
7106 | if (Ty->isCUDADeviceBuiltinTextureType()) |
7107 | return ABIArgInfo::getDirect( |
7108 | CGInfo.getCUDADeviceBuiltinTextureDeviceType()); |
7109 | } |
7110 | return getNaturalAlignIndirect(Ty, true); |
7111 | } |
7112 | |
7113 | if (const auto *EIT = Ty->getAs<ExtIntType>()) { |
7114 | if ((EIT->getNumBits() > 128) || |
7115 | (!getContext().getTargetInfo().hasInt128Type() && |
7116 | EIT->getNumBits() > 64)) |
7117 | return getNaturalAlignIndirect(Ty, true); |
7118 | } |
7119 | |
7120 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
7121 | : ABIArgInfo::getDirect()); |
7122 | } |
7123 | |
7124 | void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { |
7125 | if (!getCXXABI().classifyReturnType(FI)) |
7126 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
7127 | for (auto &I : FI.arguments()) |
7128 | I.info = classifyArgumentType(I.type); |
7129 | |
7130 | |
7131 | if (FI.getCallingConvention() != llvm::CallingConv::C) |
7132 | return; |
7133 | |
7134 | FI.setEffectiveCallingConvention(getRuntimeCC()); |
7135 | } |
7136 | |
7137 | Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
7138 | QualType Ty) const { |
7139 | llvm_unreachable("NVPTX does not support varargs"); |
7140 | } |
7141 | |
7142 | void NVPTXTargetCodeGenInfo::setTargetAttributes( |
7143 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { |
7144 | if (GV->isDeclaration()) |
7145 | return; |
7146 | const VarDecl *VD = dyn_cast_or_null<VarDecl>(D); |
7147 | if (VD) { |
7148 | if (M.getLangOpts().CUDA) { |
7149 | if (VD->getType()->isCUDADeviceBuiltinSurfaceType()) |
7150 | addNVVMMetadata(GV, "surface", 1); |
7151 | else if (VD->getType()->isCUDADeviceBuiltinTextureType()) |
7152 | addNVVMMetadata(GV, "texture", 1); |
7153 | return; |
7154 | } |
7155 | } |
7156 | |
7157 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
7158 | if (!FD) return; |
7159 | |
7160 | llvm::Function *F = cast<llvm::Function>(GV); |
7161 | |
7162 | |
7163 | if (M.getLangOpts().OpenCL) { |
7164 | |
7165 | |
7166 | if (FD->hasAttr<OpenCLKernelAttr>()) { |
7167 | |
7168 | |
7169 | addNVVMMetadata(F, "kernel", 1); |
7170 | |
7171 | F->addFnAttr(llvm::Attribute::NoInline); |
7172 | } |
7173 | } |
7174 | |
7175 | |
7176 | if (M.getLangOpts().CUDA) { |
7177 | |
7178 | |
7179 | |
7180 | if (FD->hasAttr<CUDAGlobalAttr>()) { |
7181 | |
7182 | addNVVMMetadata(F, "kernel", 1); |
7183 | } |
7184 | if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { |
7185 | |
7186 | llvm::APSInt MaxThreads(32); |
7187 | MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); |
7188 | if (MaxThreads > 0) |
7189 | addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); |
7190 | |
7191 | |
7192 | |
7193 | |
7194 | if (Attr->getMinBlocks()) { |
7195 | llvm::APSInt MinBlocks(32); |
7196 | MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); |
7197 | if (MinBlocks > 0) |
7198 | |
7199 | addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); |
7200 | } |
7201 | } |
7202 | } |
7203 | } |
7204 | |
7205 | void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV, |
7206 | StringRef Name, int Operand) { |
7207 | llvm::Module *M = GV->getParent(); |
7208 | llvm::LLVMContext &Ctx = M->getContext(); |
7209 | |
7210 | |
7211 | llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); |
7212 | |
7213 | llvm::Metadata *MDVals[] = { |
7214 | llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name), |
7215 | llvm::ConstantAsMetadata::get( |
7216 | llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; |
7217 | |
7218 | MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); |
7219 | } |
7220 | |
7221 | bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { |
7222 | return false; |
7223 | } |
7224 | } |
7225 | |
7226 | |
7227 | |
7228 | |
7229 | |
7230 | namespace { |
7231 | |
7232 | class SystemZABIInfo : public SwiftABIInfo { |
7233 | bool HasVector; |
7234 | bool IsSoftFloatABI; |
7235 | |
7236 | public: |
7237 | SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF) |
7238 | : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {} |
7239 | |
7240 | bool isPromotableIntegerTypeForABI(QualType Ty) const; |
7241 | bool isCompoundType(QualType Ty) const; |
7242 | bool isVectorArgumentType(QualType Ty) const; |
7243 | bool isFPArgumentType(QualType Ty) const; |
7244 | QualType GetSingleElementType(QualType Ty) const; |
7245 | |
7246 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
7247 | ABIArgInfo classifyArgumentType(QualType ArgTy) const; |
7248 | |
7249 | void computeInfo(CGFunctionInfo &FI) const override { |
7250 | if (!getCXXABI().classifyReturnType(FI)) |
7251 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
7252 | for (auto &I : FI.arguments()) |
7253 | I.info = classifyArgumentType(I.type); |
7254 | } |
7255 | |
7256 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
7257 | QualType Ty) const override; |
7258 | |
7259 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
7260 | bool asReturnValue) const override { |
7261 | return occupiesMoreThan(CGT, scalars, 4); |
7262 | } |
7263 | bool isSwiftErrorInRegister() const override { |
7264 | return false; |
7265 | } |
7266 | }; |
7267 | |
7268 | class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { |
7269 | public: |
7270 | SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI) |
7271 | : TargetCodeGenInfo( |
7272 | std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {} |
7273 | |
7274 | llvm::Value *testFPKind(llvm::Value *V, unsigned BuiltinID, |
7275 | CGBuilderTy &Builder, |
7276 | CodeGenModule &CGM) const override { |
7277 | assert(V->getType()->isFloatingPointTy() && "V should have an FP type."); |
7278 | |
7279 | if (!Builder.getIsFPConstrained()) |
7280 | return nullptr; |
7281 | |
7282 | llvm::Type *Ty = V->getType(); |
7283 | if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) { |
7284 | llvm::Module &M = CGM.getModule(); |
7285 | auto &Ctx = M.getContext(); |
7286 | llvm::Function *TDCFunc = |
7287 | llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty); |
7288 | unsigned TDCBits = 0; |
7289 | switch (BuiltinID) { |
7290 | case Builtin::BI__builtin_isnan: |
7291 | TDCBits = 0xf; |
7292 | break; |
7293 | case Builtin::BIfinite: |
7294 | case Builtin::BI__finite: |
7295 | case Builtin::BIfinitef: |
7296 | case Builtin::BI__finitef: |
7297 | case Builtin::BIfinitel: |
7298 | case Builtin::BI__finitel: |
7299 | case Builtin::BI__builtin_isfinite: |
7300 | TDCBits = 0xfc0; |
7301 | break; |
7302 | case Builtin::BI__builtin_isinf: |
7303 | TDCBits = 0x30; |
7304 | break; |
7305 | default: |
7306 | break; |
7307 | } |
7308 | if (TDCBits) |
7309 | return Builder.CreateCall( |
7310 | TDCFunc, |
7311 | {V, llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), TDCBits)}); |
7312 | } |
7313 | return nullptr; |
7314 | } |
7315 | }; |
7316 | } |
7317 | |
7318 | bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { |
7319 | |
7320 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
7321 | Ty = EnumTy->getDecl()->getIntegerType(); |
7322 | |
7323 | |
7324 | if (ABIInfo::isPromotableIntegerTypeForABI(Ty)) |
7325 | return true; |
7326 | |
7327 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
7328 | if (EIT->getNumBits() < 64) |
7329 | return true; |
7330 | |
7331 | |
7332 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) |
7333 | switch (BT->getKind()) { |
7334 | case BuiltinType::Int: |
7335 | case BuiltinType::UInt: |
7336 | return true; |
7337 | default: |
7338 | return false; |
7339 | } |
7340 | return false; |
7341 | } |
7342 | |
7343 | bool SystemZABIInfo::isCompoundType(QualType Ty) const { |
7344 | return (Ty->isAnyComplexType() || |
7345 | Ty->isVectorType() || |
7346 | isAggregateTypeForABI(Ty)); |
7347 | } |
7348 | |
7349 | bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { |
7350 | return (HasVector && |
7351 | Ty->isVectorType() && |
7352 | getContext().getTypeSize(Ty) <= 128); |
7353 | } |
7354 | |
7355 | bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { |
7356 | if (IsSoftFloatABI) |
7357 | return false; |
7358 | |
7359 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) |
7360 | switch (BT->getKind()) { |
7361 | case BuiltinType::Float: |
7362 | case BuiltinType::Double: |
7363 | return true; |
7364 | default: |
7365 | return false; |
7366 | } |
7367 | |
7368 | return false; |
7369 | } |
7370 | |
7371 | QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { |
7372 | const RecordType *RT = Ty->getAs<RecordType>(); |
7373 | |
7374 | if (RT && RT->isStructureOrClassType()) { |
7375 | const RecordDecl *RD = RT->getDecl(); |
7376 | QualType Found; |
7377 | |
7378 | |
7379 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) |
7380 | for (const auto &I : CXXRD->bases()) { |
7381 | QualType Base = I.getType(); |
7382 | |
7383 | |
7384 | if (isEmptyRecord(getContext(), Base, true)) |
7385 | continue; |
7386 | |
7387 | if (!Found.isNull()) |
7388 | return Ty; |
7389 | Found = GetSingleElementType(Base); |
7390 | } |
7391 | |
7392 | |
7393 | for (const auto *FD : RD->fields()) { |
7394 | |
7395 | |
7396 | |
7397 | if (getContext().getLangOpts().CPlusPlus && |
7398 | FD->isZeroLengthBitField(getContext())) |
7399 | continue; |
7400 | |
7401 | if (FD->hasAttr<NoUniqueAddressAttr>() && |
7402 | isEmptyRecord(getContext(), FD->getType(), true)) |
7403 | continue; |
7404 | |
7405 | |
7406 | |
7407 | if (!Found.isNull()) |
7408 | return Ty; |
7409 | Found = GetSingleElementType(FD->getType()); |
7410 | } |
7411 | |
7412 | |
7413 | |
7414 | if (!Found.isNull()) |
7415 | return Found; |
7416 | } |
7417 | |
7418 | return Ty; |
7419 | } |
7420 | |
7421 | Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
7422 | QualType Ty) const { |
7423 | |
7424 | |
7425 | |
7426 | |
7427 | |
7428 | |
7429 | |
7430 | |
7431 | |
7432 | |
7433 | |
7434 | Ty = getContext().getCanonicalType(Ty); |
7435 | auto TyInfo = getContext().getTypeInfoInChars(Ty); |
7436 | llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); |
7437 | llvm::Type *DirectTy = ArgTy; |
7438 | ABIArgInfo AI = classifyArgumentType(Ty); |
7439 | bool IsIndirect = AI.isIndirect(); |
7440 | bool InFPRs = false; |
7441 | bool IsVector = false; |
7442 | CharUnits UnpaddedSize; |
7443 | CharUnits DirectAlign; |
7444 | if (IsIndirect) { |
7445 | DirectTy = llvm::PointerType::getUnqual(DirectTy); |
7446 | UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); |
7447 | } else { |
7448 | if (AI.getCoerceToType()) |
7449 | ArgTy = AI.getCoerceToType(); |
7450 | InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy())); |
7451 | IsVector = ArgTy->isVectorTy(); |
7452 | UnpaddedSize = TyInfo.Width; |
7453 | DirectAlign = TyInfo.Align; |
7454 | } |
7455 | CharUnits PaddedSize = CharUnits::fromQuantity(8); |
7456 | if (IsVector && UnpaddedSize > PaddedSize) |
7457 | PaddedSize = CharUnits::fromQuantity(16); |
7458 | assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); |
7459 | |
7460 | CharUnits Padding = (PaddedSize - UnpaddedSize); |
7461 | |
7462 | llvm::Type *IndexTy = CGF.Int64Ty; |
7463 | llvm::Value *PaddedSizeV = |
7464 | llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); |
7465 | |
7466 | if (IsVector) { |
7467 | |
7468 | |
7469 | |
7470 | Address OverflowArgAreaPtr = |
7471 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); |
7472 | Address OverflowArgArea = |
7473 | Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), |
7474 | TyInfo.Align); |
7475 | Address MemAddr = |
7476 | CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); |
7477 | |
7478 | |
7479 | llvm::Value *NewOverflowArgArea = |
7480 | CGF.Builder.CreateGEP(OverflowArgArea.getElementType(), |
7481 | OverflowArgArea.getPointer(), PaddedSizeV, |
7482 | "overflow_arg_area"); |
7483 | CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); |
7484 | |
7485 | return MemAddr; |
7486 | } |
7487 | |
7488 | assert(PaddedSize.getQuantity() == 8); |
7489 | |
7490 | unsigned MaxRegs, RegCountField, RegSaveIndex; |
7491 | CharUnits RegPadding; |
7492 | if (InFPRs) { |
7493 | MaxRegs = 4; |
7494 | RegCountField = 1; |
7495 | RegSaveIndex = 16; |
7496 | RegPadding = CharUnits(); |
7497 | } else { |
7498 | MaxRegs = 5; |
7499 | RegCountField = 0; |
7500 | RegSaveIndex = 2; |
7501 | RegPadding = Padding; |
7502 | } |
7503 | |
7504 | Address RegCountPtr = |
7505 | CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); |
7506 | llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); |
7507 | llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); |
7508 | llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, |
7509 | "fits_in_regs"); |
7510 | |
7511 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); |
7512 | llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); |
7513 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); |
7514 | CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); |
7515 | |
7516 | |
7517 | CGF.EmitBlock(InRegBlock); |
7518 | |
7519 | |
7520 | llvm::Value *ScaledRegCount = |
7521 | CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); |
7522 | llvm::Value *RegBase = |
7523 | llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() |
7524 | + RegPadding.getQuantity()); |
7525 | llvm::Value *RegOffset = |
7526 | CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); |
7527 | Address RegSaveAreaPtr = |
7528 | CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); |
7529 | llvm::Value *RegSaveArea = |
7530 | CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); |
7531 | Address RawRegAddr(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset, |
7532 | "raw_reg_addr"), |
7533 | PaddedSize); |
7534 | Address RegAddr = |
7535 | CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); |
7536 | |
7537 | |
7538 | llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); |
7539 | llvm::Value *NewRegCount = |
7540 | CGF.Builder.CreateAdd(RegCount, One, "reg_count"); |
7541 | CGF.Builder.CreateStore(NewRegCount, RegCountPtr); |
7542 | CGF.EmitBranch(ContBlock); |
7543 | |
7544 | |
7545 | CGF.EmitBlock(InMemBlock); |
7546 | |
7547 | |
7548 | Address OverflowArgAreaPtr = |
7549 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); |
7550 | Address OverflowArgArea = |
7551 | Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), |
7552 | PaddedSize); |
7553 | Address RawMemAddr = |
7554 | CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); |
7555 | Address MemAddr = |
7556 | CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); |
7557 | |
7558 | |
7559 | llvm::Value *NewOverflowArgArea = |
7560 | CGF.Builder.CreateGEP(OverflowArgArea.getElementType(), |
7561 | OverflowArgArea.getPointer(), PaddedSizeV, |
7562 | "overflow_arg_area"); |
7563 | CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); |
7564 | CGF.EmitBranch(ContBlock); |
7565 | |
7566 | |
7567 | CGF.EmitBlock(ContBlock); |
7568 | Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, |
7569 | MemAddr, InMemBlock, "va_arg.addr"); |
7570 | |
7571 | if (IsIndirect) |
7572 | ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), |
7573 | TyInfo.Align); |
7574 | |
7575 | return ResAddr; |
7576 | } |
7577 | |
7578 | ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { |
7579 | if (RetTy->isVoidType()) |
7580 | return ABIArgInfo::getIgnore(); |
7581 | if (isVectorArgumentType(RetTy)) |
7582 | return ABIArgInfo::getDirect(); |
7583 | if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) |
7584 | return getNaturalAlignIndirect(RetTy); |
7585 | return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
7586 | : ABIArgInfo::getDirect()); |
7587 | } |
7588 | |
7589 | ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { |
7590 | |
7591 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
7592 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
7593 | |
7594 | |
7595 | if (isPromotableIntegerTypeForABI(Ty)) |
7596 | return ABIArgInfo::getExtend(Ty); |
7597 | |
7598 | |
7599 | |
7600 | |
7601 | uint64_t Size = getContext().getTypeSize(Ty); |
7602 | QualType SingleElementTy = GetSingleElementType(Ty); |
7603 | if (isVectorArgumentType(SingleElementTy) && |
7604 | getContext().getTypeSize(SingleElementTy) == Size) |
7605 | return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); |
7606 | |
7607 | |
7608 | if (Size != 8 && Size != 16 && Size != 32 && Size != 64) |
7609 | return getNaturalAlignIndirect(Ty, false); |
7610 | |
7611 | |
7612 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
7613 | |
7614 | |
7615 | const RecordDecl *RD = RT->getDecl(); |
7616 | if (RD->hasFlexibleArrayMember()) |
7617 | return getNaturalAlignIndirect(Ty, false); |
7618 | |
7619 | |
7620 | llvm::Type *PassTy; |
7621 | if (isFPArgumentType(SingleElementTy)) { |
7622 | assert(Size == 32 || Size == 64); |
7623 | if (Size == 32) |
7624 | PassTy = llvm::Type::getFloatTy(getVMContext()); |
7625 | else |
7626 | PassTy = llvm::Type::getDoubleTy(getVMContext()); |
7627 | } else |
7628 | PassTy = llvm::IntegerType::get(getVMContext(), Size); |
7629 | return ABIArgInfo::getDirect(PassTy); |
7630 | } |
7631 | |
7632 | |
7633 | if (isCompoundType(Ty)) |
7634 | return getNaturalAlignIndirect(Ty, false); |
7635 | |
7636 | return ABIArgInfo::getDirect(nullptr); |
7637 | } |
7638 | |
7639 | |
7640 | |
7641 | |
7642 | |
7643 | namespace { |
7644 | |
7645 | class MSP430ABIInfo : public DefaultABIInfo { |
7646 | static ABIArgInfo complexArgInfo() { |
7647 | ABIArgInfo Info = ABIArgInfo::getDirect(); |
7648 | Info.setCanBeFlattened(false); |
7649 | return Info; |
7650 | } |
7651 | |
7652 | public: |
7653 | MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
7654 | |
7655 | ABIArgInfo classifyReturnType(QualType RetTy) const { |
7656 | if (RetTy->isAnyComplexType()) |
7657 | return complexArgInfo(); |
7658 | |
7659 | return DefaultABIInfo::classifyReturnType(RetTy); |
7660 | } |
7661 | |
7662 | ABIArgInfo classifyArgumentType(QualType RetTy) const { |
7663 | if (RetTy->isAnyComplexType()) |
7664 | return complexArgInfo(); |
7665 | |
7666 | return DefaultABIInfo::classifyArgumentType(RetTy); |
7667 | } |
7668 | |
7669 | |
7670 | |
7671 | void computeInfo(CGFunctionInfo &FI) const override { |
7672 | if (!getCXXABI().classifyReturnType(FI)) |
7673 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
7674 | for (auto &I : FI.arguments()) |
7675 | I.info = classifyArgumentType(I.type); |
7676 | } |
7677 | |
7678 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
7679 | QualType Ty) const override { |
7680 | return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); |
7681 | } |
7682 | }; |
7683 | |
7684 | class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { |
7685 | public: |
7686 | MSP430TargetCodeGenInfo(CodeGenTypes &CGT) |
7687 | : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {} |
7688 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
7689 | CodeGen::CodeGenModule &M) const override; |
7690 | }; |
7691 | |
7692 | } |
7693 | |
7694 | void MSP430TargetCodeGenInfo::setTargetAttributes( |
7695 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { |
7696 | if (GV->isDeclaration()) |
7697 | return; |
7698 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
7699 | const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>(); |
7700 | if (!InterruptAttr) |
7701 | return; |
7702 | |
7703 | |
7704 | llvm::Function *F = cast<llvm::Function>(GV); |
7705 | |
7706 | |
7707 | F->setCallingConv(llvm::CallingConv::MSP430_INTR); |
7708 | |
7709 | |
7710 | F->addFnAttr(llvm::Attribute::NoInline); |
7711 | F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber())); |
7712 | } |
7713 | } |
7714 | |
7715 | |
7716 | |
7717 | |
7718 | |
7719 | |
7720 | namespace { |
7721 | class MipsABIInfo : public ABIInfo { |
7722 | bool IsO32; |
7723 | unsigned MinABIStackAlignInBytes, StackAlignInBytes; |
7724 | void CoerceToIntArgs(uint64_t TySize, |
7725 | SmallVectorImpl<llvm::Type *> &ArgList) const; |
7726 | llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; |
7727 | llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; |
7728 | llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; |
7729 | public: |
7730 | MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : |
7731 | ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), |
7732 | StackAlignInBytes(IsO32 ? 8 : 16) {} |
7733 | |
7734 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
7735 | ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; |
7736 | void computeInfo(CGFunctionInfo &FI) const override; |
7737 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
7738 | QualType Ty) const override; |
7739 | ABIArgInfo extendType(QualType Ty) const; |
7740 | }; |
7741 | |
7742 | class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { |
7743 | unsigned SizeOfUnwindException; |
7744 | public: |
7745 | MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) |
7746 | : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)), |
7747 | SizeOfUnwindException(IsO32 ? 24 : 32) {} |
7748 | |
7749 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { |
7750 | return 29; |
7751 | } |
7752 | |
7753 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
7754 | CodeGen::CodeGenModule &CGM) const override { |
7755 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
7756 | if (!FD) return; |
7757 | llvm::Function *Fn = cast<llvm::Function>(GV); |
7758 | |
7759 | if (FD->hasAttr<MipsLongCallAttr>()) |
7760 | Fn->addFnAttr("long-call"); |
7761 | else if (FD->hasAttr<MipsShortCallAttr>()) |
7762 | Fn->addFnAttr("short-call"); |
7763 | |
7764 | |
7765 | if (GV->isDeclaration()) |
7766 | return; |
7767 | |
7768 | if (FD->hasAttr<Mips16Attr>()) { |
7769 | Fn->addFnAttr("mips16"); |
7770 | } |
7771 | else if (FD->hasAttr<NoMips16Attr>()) { |
7772 | Fn->addFnAttr("nomips16"); |
7773 | } |
7774 | |
7775 | if (FD->hasAttr<MicroMipsAttr>()) |
7776 | Fn->addFnAttr("micromips"); |
7777 | else if (FD->hasAttr<NoMicroMipsAttr>()) |
7778 | Fn->addFnAttr("nomicromips"); |
7779 | |
7780 | const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); |
7781 | if (!Attr) |
7782 | return; |
7783 | |
7784 | const char *Kind; |
7785 | switch (Attr->getInterrupt()) { |
7786 | case MipsInterruptAttr::eic: Kind = "eic"; break; |
7787 | case MipsInterruptAttr::sw0: Kind = "sw0"; break; |
7788 | case MipsInterruptAttr::sw1: Kind = "sw1"; break; |
7789 | case MipsInterruptAttr::hw0: Kind = "hw0"; break; |
7790 | case MipsInterruptAttr::hw1: Kind = "hw1"; break; |
7791 | case MipsInterruptAttr::hw2: Kind = "hw2"; break; |
7792 | case MipsInterruptAttr::hw3: Kind = "hw3"; break; |
7793 | case MipsInterruptAttr::hw4: Kind = "hw4"; break; |
7794 | case MipsInterruptAttr::hw5: Kind = "hw5"; break; |
7795 | } |
7796 | |
7797 | Fn->addFnAttr("interrupt", Kind); |
7798 | |
7799 | } |
7800 | |
7801 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
7802 | llvm::Value *Address) const override; |
7803 | |
7804 | unsigned getSizeOfUnwindException() const override { |
7805 | return SizeOfUnwindException; |
7806 | } |
7807 | }; |
7808 | } |
7809 | |
7810 | void MipsABIInfo::CoerceToIntArgs( |
7811 | uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { |
7812 | llvm::IntegerType *IntTy = |
7813 | llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); |
7814 | |
7815 | |
7816 | for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) |
7817 | ArgList.push_back(IntTy); |
7818 | |
7819 | |
7820 | unsigned R = TySize % (MinABIStackAlignInBytes * 8); |
7821 | |
7822 | if (R) |
7823 | ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); |
7824 | } |
7825 | |
7826 | |
7827 | |
7828 | llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { |
7829 | SmallVector<llvm::Type*, 8> ArgList, IntArgList; |
7830 | |
7831 | if (IsO32) { |
7832 | CoerceToIntArgs(TySize, ArgList); |
7833 | return llvm::StructType::get(getVMContext(), ArgList); |
7834 | } |
7835 | |
7836 | if (Ty->isComplexType()) |
7837 | return CGT.ConvertType(Ty); |
7838 | |
7839 | const RecordType *RT = Ty->getAs<RecordType>(); |
7840 | |
7841 | |
7842 | if (!RT || !RT->isStructureOrClassType()) { |
7843 | CoerceToIntArgs(TySize, ArgList); |
7844 | return llvm::StructType::get(getVMContext(), ArgList); |
7845 | } |
7846 | |
7847 | const RecordDecl *RD = RT->getDecl(); |
7848 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); |
7849 | assert(!(TySize % 8) && "Size of structure must be multiple of 8."); |
7850 | |
7851 | uint64_t LastOffset = 0; |
7852 | unsigned idx = 0; |
7853 | llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); |
7854 | |
7855 | |
7856 | |
7857 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); |
7858 | i != e; ++i, ++idx) { |
7859 | const QualType Ty = i->getType(); |
7860 | const BuiltinType *BT = Ty->getAs<BuiltinType>(); |
7861 | |
7862 | if (!BT || BT->getKind() != BuiltinType::Double) |
7863 | continue; |
7864 | |
7865 | uint64_t Offset = Layout.getFieldOffset(idx); |
7866 | if (Offset % 64) |
7867 | continue; |
7868 | |
7869 | |
7870 | for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) |
7871 | ArgList.push_back(I64); |
7872 | |
7873 | |
7874 | ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); |
7875 | LastOffset = Offset + 64; |
7876 | } |
7877 | |
7878 | CoerceToIntArgs(TySize - LastOffset, IntArgList); |
7879 | ArgList.append(IntArgList.begin(), IntArgList.end()); |
7880 | |
7881 | return llvm::StructType::get(getVMContext(), ArgList); |
7882 | } |
7883 | |
7884 | llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, |
7885 | uint64_t Offset) const { |
7886 | if (OrigOffset + MinABIStackAlignInBytes > Offset) |
7887 | return nullptr; |
7888 | |
7889 | return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); |
7890 | } |
7891 | |
7892 | ABIArgInfo |
7893 | MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { |
7894 | Ty = useFirstFieldIfTransparentUnion(Ty); |
7895 | |
7896 | uint64_t OrigOffset = Offset; |
7897 | uint64_t TySize = getContext().getTypeSize(Ty); |
7898 | uint64_t Align = getContext().getTypeAlign(Ty) / 8; |
7899 | |
7900 | Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), |
7901 | (uint64_t)StackAlignInBytes); |
7902 | unsigned CurrOffset = llvm::alignTo(Offset, Align); |
7903 | Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; |
7904 | |
7905 | if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { |
7906 | |
7907 | if (TySize == 0) |
7908 | return ABIArgInfo::getIgnore(); |
7909 | |
7910 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { |
7911 | Offset = OrigOffset + MinABIStackAlignInBytes; |
7912 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
7913 | } |
7914 | |
7915 | |
7916 | |
7917 | |
7918 | ABIArgInfo ArgInfo = |
7919 | ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, |
7920 | getPaddingType(OrigOffset, CurrOffset)); |
7921 | ArgInfo.setInReg(true); |
7922 | return ArgInfo; |
7923 | } |
7924 | |
7925 | |
7926 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
7927 | Ty = EnumTy->getDecl()->getIntegerType(); |
7928 | |
7929 | |
7930 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
7931 | if (EIT->getNumBits() > 128 || |
7932 | (EIT->getNumBits() > 64 && |
7933 | !getContext().getTargetInfo().hasInt128Type())) |
7934 | return getNaturalAlignIndirect(Ty); |
7935 | |
7936 | |
7937 | if (Ty->isIntegralOrEnumerationType()) |
7938 | return extendType(Ty); |
7939 | |
7940 | return ABIArgInfo::getDirect( |
7941 | nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); |
7942 | } |
7943 | |
7944 | llvm::Type* |
7945 | MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { |
7946 | const RecordType *RT = RetTy->getAs<RecordType>(); |
7947 | SmallVector<llvm::Type*, 8> RTList; |
7948 | |
7949 | if (RT && RT->isStructureOrClassType()) { |
7950 | const RecordDecl *RD = RT->getDecl(); |
7951 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); |
7952 | unsigned FieldCnt = Layout.getFieldCount(); |
7953 | |
7954 | |
7955 | |
7956 | |
7957 | |
7958 | |
7959 | |
7960 | |
7961 | |
7962 | |
7963 | if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { |
7964 | RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); |
7965 | for (; b != e; ++b) { |
7966 | const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); |
7967 | |
7968 | if (!BT || !BT->isFloatingPoint()) |
7969 | break; |
7970 | |
7971 | RTList.push_back(CGT.ConvertType(b->getType())); |
7972 | } |
7973 | |
7974 | if (b == e) |
7975 | return llvm::StructType::get(getVMContext(), RTList, |
7976 | RD->hasAttr<PackedAttr>()); |
7977 | |
7978 | RTList.clear(); |
7979 | } |
7980 | } |
7981 | |
7982 | CoerceToIntArgs(Size, RTList); |
7983 | return llvm::StructType::get(getVMContext(), RTList); |
7984 | } |
7985 | |
7986 | ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { |
7987 | uint64_t Size = getContext().getTypeSize(RetTy); |
7988 | |
7989 | if (RetTy->isVoidType()) |
7990 | return ABIArgInfo::getIgnore(); |
7991 | |
7992 | |
7993 | |
7994 | if (!IsO32 && Size == 0) |
7995 | return ABIArgInfo::getIgnore(); |
7996 | |
7997 | if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { |
7998 | if (Size <= 128) { |
7999 | if (RetTy->isAnyComplexType()) |
8000 | return ABIArgInfo::getDirect(); |
8001 | |
8002 | |
8003 | |
8004 | if (!IsO32 || |
8005 | (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { |
8006 | ABIArgInfo ArgInfo = |
8007 | ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); |
8008 | ArgInfo.setInReg(true); |
8009 | return ArgInfo; |
8010 | } |
8011 | } |
8012 | |
8013 | return getNaturalAlignIndirect(RetTy); |
8014 | } |
8015 | |
8016 | |
8017 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
8018 | RetTy = EnumTy->getDecl()->getIntegerType(); |
8019 | |
8020 | |
8021 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
8022 | if (EIT->getNumBits() > 128 || |
8023 | (EIT->getNumBits() > 64 && |
8024 | !getContext().getTargetInfo().hasInt128Type())) |
8025 | return getNaturalAlignIndirect(RetTy); |
8026 | |
8027 | if (isPromotableIntegerTypeForABI(RetTy)) |
8028 | return ABIArgInfo::getExtend(RetTy); |
8029 | |
8030 | if ((RetTy->isUnsignedIntegerOrEnumerationType() || |
8031 | RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32) |
8032 | return ABIArgInfo::getSignExtend(RetTy); |
8033 | |
8034 | return ABIArgInfo::getDirect(); |
8035 | } |
8036 | |
8037 | void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { |
8038 | ABIArgInfo &RetInfo = FI.getReturnInfo(); |
8039 | if (!getCXXABI().classifyReturnType(FI)) |
8040 | RetInfo = classifyReturnType(FI.getReturnType()); |
8041 | |
8042 | |
8043 | uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; |
8044 | |
8045 | for (auto &I : FI.arguments()) |
8046 | I.info = classifyArgumentType(I.type, Offset); |
8047 | } |
8048 | |
8049 | Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
8050 | QualType OrigTy) const { |
8051 | QualType Ty = OrigTy; |
8052 | |
8053 | |
8054 | |
8055 | unsigned SlotSizeInBits = IsO32 ? 32 : 64; |
8056 | unsigned PtrWidth = getTarget().getPointerWidth(0); |
8057 | bool DidPromote = false; |
8058 | if ((Ty->isIntegerType() && |
8059 | getContext().getIntWidth(Ty) < SlotSizeInBits) || |
8060 | (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { |
8061 | DidPromote = true; |
8062 | Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, |
8063 | Ty->isSignedIntegerType()); |
8064 | } |
8065 | |
8066 | auto TyInfo = getContext().getTypeInfoInChars(Ty); |
8067 | |
8068 | |
8069 | |
8070 | TyInfo.Align = |
8071 | std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes)); |
8072 | |
8073 | |
8074 | CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); |
8075 | |
8076 | Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, |
8077 | TyInfo, ArgSlotSize, true); |
8078 | |
8079 | |
8080 | |
8081 | |
8082 | if (DidPromote) { |
8083 | Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); |
8084 | llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); |
8085 | |
8086 | |
8087 | llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() |
8088 | : CGF.IntPtrTy); |
8089 | llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); |
8090 | if (OrigTy->isPointerType()) |
8091 | V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); |
8092 | |
8093 | CGF.Builder.CreateStore(V, Temp); |
8094 | Addr = Temp; |
8095 | } |
8096 | |
8097 | return Addr; |
8098 | } |
8099 | |
8100 | ABIArgInfo MipsABIInfo::extendType(QualType Ty) const { |
8101 | int TySize = getContext().getTypeSize(Ty); |
8102 | |
8103 | |
8104 | if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) |
8105 | return ABIArgInfo::getSignExtend(Ty); |
8106 | |
8107 | return ABIArgInfo::getExtend(Ty); |
8108 | } |
8109 | |
8110 | bool |
8111 | MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
8112 | llvm::Value *Address) const { |
8113 | |
8114 | |
8115 | |
8116 | |
8117 | |
8118 | llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); |
8119 | |
8120 | |
8121 | |
8122 | |
8123 | |
8124 | AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); |
8125 | |
8126 | |
8127 | |
8128 | |
8129 | |
8130 | |
8131 | |
8132 | |
8133 | |
8134 | AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); |
8135 | return false; |
8136 | } |
8137 | |
8138 | |
8139 | |
8140 | |
8141 | |
8142 | namespace { |
8143 | |
8144 | class M68kTargetCodeGenInfo : public TargetCodeGenInfo { |
8145 | public: |
8146 | M68kTargetCodeGenInfo(CodeGenTypes &CGT) |
8147 | : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} |
8148 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
8149 | CodeGen::CodeGenModule &M) const override; |
8150 | }; |
8151 | |
8152 | } |
8153 | |
8154 | void M68kTargetCodeGenInfo::setTargetAttributes( |
8155 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { |
8156 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
8157 | if (const auto *attr = FD->getAttr<M68kInterruptAttr>()) { |
8158 | |
8159 | llvm::Function *F = cast<llvm::Function>(GV); |
8160 | |
8161 | |
8162 | F->setCallingConv(llvm::CallingConv::M68k_INTR); |
8163 | |
8164 | |
8165 | F->addFnAttr(llvm::Attribute::NoInline); |
8166 | |
8167 | |
8168 | unsigned Num = attr->getNumber() / 2; |
8169 | llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, |
8170 | "__isr_" + Twine(Num), F); |
8171 | } |
8172 | } |
8173 | } |
8174 | |
8175 | |
8176 | |
8177 | |
8178 | |
8179 | |
8180 | |
8181 | namespace { |
8182 | class AVRABIInfo : public DefaultABIInfo { |
8183 | public: |
8184 | AVRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
8185 | |
8186 | ABIArgInfo classifyReturnType(QualType Ty) const { |
8187 | |
8188 | |
8189 | if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) <= 64) |
8190 | return ABIArgInfo::getDirect(); |
8191 | else |
8192 | return DefaultABIInfo::classifyReturnType(Ty); |
8193 | } |
8194 | |
8195 | |
8196 | |
8197 | void computeInfo(CGFunctionInfo &FI) const override { |
8198 | if (!getCXXABI().classifyReturnType(FI)) |
8199 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
8200 | for (auto &I : FI.arguments()) |
8201 | I.info = classifyArgumentType(I.type); |
8202 | } |
8203 | }; |
8204 | |
8205 | class AVRTargetCodeGenInfo : public TargetCodeGenInfo { |
8206 | public: |
8207 | AVRTargetCodeGenInfo(CodeGenTypes &CGT) |
8208 | : TargetCodeGenInfo(std::make_unique<AVRABIInfo>(CGT)) {} |
8209 | |
8210 | LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, |
8211 | const VarDecl *D) const override { |
8212 | |
8213 | |
8214 | LangAS AS = D->getType().getAddressSpace(); |
8215 | if (isTargetAddressSpace(AS) && toTargetAddressSpace(AS) == 1 && |
8216 | !D->getType().isConstQualified()) |
8217 | CGM.getDiags().Report(D->getLocation(), |
8218 | diag::err_verify_nonconst_addrspace) |
8219 | << "__flash"; |
8220 | return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D); |
8221 | } |
8222 | |
8223 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
8224 | CodeGen::CodeGenModule &CGM) const override { |
8225 | if (GV->isDeclaration()) |
8226 | return; |
8227 | const auto *FD = dyn_cast_or_null<FunctionDecl>(D); |
8228 | if (!FD) return; |
8229 | auto *Fn = cast<llvm::Function>(GV); |
8230 | |
8231 | if (FD->getAttr<AVRInterruptAttr>()) |
8232 | Fn->addFnAttr("interrupt"); |
8233 | |
8234 | if (FD->getAttr<AVRSignalAttr>()) |
8235 | Fn->addFnAttr("signal"); |
8236 | } |
8237 | }; |
8238 | } |
8239 | |
8240 | |
8241 | |
8242 | |
8243 | |
8244 | |
8245 | |
8246 | namespace { |
8247 | |
8248 | class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { |
8249 | public: |
8250 | TCETargetCodeGenInfo(CodeGenTypes &CGT) |
8251 | : DefaultTargetCodeGenInfo(CGT) {} |
8252 | |
8253 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
8254 | CodeGen::CodeGenModule &M) const override; |
8255 | }; |
8256 | |
8257 | void TCETargetCodeGenInfo::setTargetAttributes( |
8258 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { |
8259 | if (GV->isDeclaration()) |
8260 | return; |
8261 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
8262 | if (!FD) return; |
8263 | |
8264 | llvm::Function *F = cast<llvm::Function>(GV); |
8265 | |
8266 | if (M.getLangOpts().OpenCL) { |
8267 | if (FD->hasAttr<OpenCLKernelAttr>()) { |
8268 | |
8269 | F->addFnAttr(llvm::Attribute::NoInline); |
8270 | const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); |
8271 | if (Attr) { |
8272 | |
8273 | llvm::LLVMContext &Context = F->getContext(); |
8274 | llvm::NamedMDNode *OpenCLMetadata = |
8275 | M.getModule().getOrInsertNamedMetadata( |
8276 | "opencl.kernel_wg_size_info"); |
8277 | |
8278 | SmallVector<llvm::Metadata *, 5> Operands; |
8279 | Operands.push_back(llvm::ConstantAsMetadata::get(F)); |
8280 | |
8281 | Operands.push_back( |
8282 | llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( |
8283 | M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); |
8284 | Operands.push_back( |
8285 | llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( |
8286 | M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); |
8287 | Operands.push_back( |
8288 | llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( |
8289 | M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); |
8290 | |
8291 | |
8292 | |
8293 | |
8294 | Operands.push_back( |
8295 | llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); |
8296 | OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); |
8297 | } |
8298 | } |
8299 | } |
8300 | } |
8301 | |
8302 | } |
8303 | |
8304 | |
8305 | |
8306 | |
8307 | |
8308 | namespace { |
8309 | |
8310 | class HexagonABIInfo : public DefaultABIInfo { |
8311 | public: |
8312 | HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
8313 | |
8314 | private: |
8315 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
8316 | ABIArgInfo classifyArgumentType(QualType RetTy) const; |
8317 | ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const; |
8318 | |
8319 | void computeInfo(CGFunctionInfo &FI) const override; |
8320 | |
8321 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
8322 | QualType Ty) const override; |
8323 | Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr, |
8324 | QualType Ty) const; |
8325 | Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr, |
8326 | QualType Ty) const; |
8327 | Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr, |
8328 | QualType Ty) const; |
8329 | }; |
8330 | |
8331 | class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { |
8332 | public: |
8333 | HexagonTargetCodeGenInfo(CodeGenTypes &CGT) |
8334 | : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {} |
8335 | |
8336 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
8337 | return 29; |
8338 | } |
8339 | |
8340 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
8341 | CodeGen::CodeGenModule &GCM) const override { |
8342 | if (GV->isDeclaration()) |
8343 | return; |
8344 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
8345 | if (!FD) |
8346 | return; |
8347 | } |
8348 | }; |
8349 | |
8350 | } |
8351 | |
8352 | void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { |
8353 | unsigned RegsLeft = 6; |
8354 | if (!getCXXABI().classifyReturnType(FI)) |
8355 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
8356 | for (auto &I : FI.arguments()) |
8357 | I.info = classifyArgumentType(I.type, &RegsLeft); |
8358 | } |
8359 | |
8360 | static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) { |
8361 | assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits" |
8362 | " through registers"); |
8363 | |
8364 | if (*RegsLeft == 0) |
8365 | return false; |
8366 | |
8367 | if (Size <= 32) { |
8368 | (*RegsLeft)--; |
8369 | return true; |
8370 | } |
8371 | |
8372 | if (2 <= (*RegsLeft & (~1U))) { |
8373 | *RegsLeft = (*RegsLeft & (~1U)) - 2; |
8374 | return true; |
8375 | } |
8376 | |
8377 | |
8378 | |
8379 | if (*RegsLeft == 1) |
8380 | *RegsLeft = 0; |
8381 | |
8382 | return false; |
8383 | } |
8384 | |
8385 | ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty, |
8386 | unsigned *RegsLeft) const { |
8387 | if (!isAggregateTypeForABI(Ty)) { |
8388 | |
8389 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
8390 | Ty = EnumTy->getDecl()->getIntegerType(); |
8391 | |
8392 | uint64_t Size = getContext().getTypeSize(Ty); |
8393 | if (Size <= 64) |
8394 | HexagonAdjustRegsLeft(Size, RegsLeft); |
8395 | |
8396 | if (Size > 64 && Ty->isExtIntType()) |
8397 | return getNaturalAlignIndirect(Ty, true); |
8398 | |
8399 | return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
8400 | : ABIArgInfo::getDirect(); |
8401 | } |
8402 | |
8403 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
8404 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
8405 | |
8406 | |
8407 | if (isEmptyRecord(getContext(), Ty, true)) |
8408 | return ABIArgInfo::getIgnore(); |
8409 | |
8410 | uint64_t Size = getContext().getTypeSize(Ty); |
8411 | unsigned Align = getContext().getTypeAlign(Ty); |
8412 | |
8413 | if (Size > 64) |
8414 | return getNaturalAlignIndirect(Ty, true); |
8415 | |
8416 | if (HexagonAdjustRegsLeft(Size, RegsLeft)) |
8417 | Align = Size <= 32 ? 32 : 64; |
8418 | if (Size <= Align) { |
8419 | |
8420 | if (!llvm::isPowerOf2_64(Size)) |
8421 | Size = llvm::NextPowerOf2(Size); |
8422 | return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); |
8423 | } |
8424 | return DefaultABIInfo::classifyArgumentType(Ty); |
8425 | } |
8426 | |
8427 | ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { |
8428 | if (RetTy->isVoidType()) |
8429 | return ABIArgInfo::getIgnore(); |
8430 | |
8431 | const TargetInfo &T = CGT.getTarget(); |
8432 | uint64_t Size = getContext().getTypeSize(RetTy); |
8433 | |
8434 | if (RetTy->getAs<VectorType>()) { |
8435 | |
8436 | if (T.hasFeature("hvx")) { |
8437 | assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b")); |
8438 | uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8; |
8439 | if (Size == VecSize || Size == 2*VecSize) |
8440 | return ABIArgInfo::getDirectInReg(); |
8441 | } |
8442 | |
8443 | if (Size > 64) |
8444 | return getNaturalAlignIndirect(RetTy); |
8445 | } |
8446 | |
8447 | if (!isAggregateTypeForABI(RetTy)) { |
8448 | |
8449 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
8450 | RetTy = EnumTy->getDecl()->getIntegerType(); |
8451 | |
8452 | if (Size > 64 && RetTy->isExtIntType()) |
8453 | return getNaturalAlignIndirect(RetTy, false); |
8454 | |
8455 | return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
8456 | : ABIArgInfo::getDirect(); |
8457 | } |
8458 | |
8459 | if (isEmptyRecord(getContext(), RetTy, true)) |
8460 | return ABIArgInfo::getIgnore(); |
8461 | |
8462 | |
8463 | |
8464 | if (Size <= 64) { |
8465 | |
8466 | if (!llvm::isPowerOf2_64(Size)) |
8467 | Size = llvm::NextPowerOf2(Size); |
8468 | return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); |
8469 | } |
8470 | return getNaturalAlignIndirect(RetTy, true); |
8471 | } |
8472 | |
8473 | Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF, |
8474 | Address VAListAddr, |
8475 | QualType Ty) const { |
8476 | |
8477 | Address __overflow_area_pointer_p = |
8478 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); |
8479 | llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( |
8480 | __overflow_area_pointer_p, "__overflow_area_pointer"); |
8481 | |
8482 | uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; |
8483 | if (Align > 4) { |
8484 | |
8485 | assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!"); |
8486 | |
8487 | |
8488 | llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); |
8489 | |
8490 | |
8491 | __overflow_area_pointer = |
8492 | CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset); |
8493 | llvm::Value *AsInt = |
8494 | CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); |
8495 | |
8496 | |
8497 | |
8498 | llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align); |
8499 | __overflow_area_pointer = CGF.Builder.CreateIntToPtr( |
8500 | CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(), |
8501 | "__overflow_area_pointer.align"); |
8502 | } |
8503 | |
8504 | |
8505 | |
8506 | llvm::Type *PTy = CGF.ConvertTypeForMem(Ty); |
8507 | Address AddrTyped = CGF.Builder.CreateBitCast( |
8508 | Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)), |
8509 | llvm::PointerType::getUnqual(PTy)); |
8510 | |
8511 | |
8512 | uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); |
8513 | |
8514 | __overflow_area_pointer = CGF.Builder.CreateGEP( |
8515 | CGF.Int8Ty, __overflow_area_pointer, |
8516 | llvm::ConstantInt::get(CGF.Int32Ty, Offset), |
8517 | "__overflow_area_pointer.next"); |
8518 | CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p); |
8519 | |
8520 | return AddrTyped; |
8521 | } |
8522 | |
8523 | Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF, |
8524 | Address VAListAddr, |
8525 | QualType Ty) const { |
8526 | |
8527 | llvm::Type *BP = CGF.Int8PtrTy; |
8528 | llvm::Type *BPP = CGF.Int8PtrPtrTy; |
8529 | CGBuilderTy &Builder = CGF.Builder; |
8530 | Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); |
8531 | llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); |
8532 | |
8533 | uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; |
8534 | if (TyAlign > 4) { |
8535 | assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!"); |
8536 | llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); |
8537 | AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); |
8538 | AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); |
8539 | Addr = Builder.CreateIntToPtr(AddrAsInt, BP); |
8540 | } |
8541 | llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); |
8542 | Address AddrTyped = Builder.CreateBitCast( |
8543 | Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy); |
8544 | |
8545 | uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); |
8546 | llvm::Value *NextAddr = Builder.CreateGEP( |
8547 | CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); |
8548 | Builder.CreateStore(NextAddr, VAListAddrAsBPP); |
8549 | |
8550 | return AddrTyped; |
8551 | } |
8552 | |
8553 | Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, |
8554 | Address VAListAddr, |
8555 | QualType Ty) const { |
8556 | int ArgSize = CGF.getContext().getTypeSize(Ty) / 8; |
8557 | |
8558 | if (ArgSize > 8) |
8559 | return EmitVAArgFromMemory(CGF, VAListAddr, Ty); |
8560 | |
8561 | |
8562 | |
8563 | |
8564 | |
8565 | unsigned RegsLeft = 6; |
8566 | Ty = CGF.getContext().getCanonicalType(Ty); |
8567 | (void)classifyArgumentType(Ty, &RegsLeft); |
8568 | |
8569 | llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); |
8570 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); |
8571 | llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); |
8572 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); |
8573 | |
8574 | |
8575 | |
8576 | ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; |
8577 | int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; |
8578 | |
8579 | |
8580 | CGF.EmitBlock(MaybeRegBlock); |
8581 | |
8582 | |
8583 | Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP( |
8584 | VAListAddr, 0, "__current_saved_reg_area_pointer_p"); |
8585 | llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad( |
8586 | __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer"); |
8587 | |
8588 | |
8589 | Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP( |
8590 | VAListAddr, 1, "__saved_reg_area_end_pointer_p"); |
8591 | llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad( |
8592 | __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer"); |
8593 | |
8594 | |
8595 | |
8596 | if (ArgAlign > 4) { |
8597 | |
8598 | llvm::Value *__current_saved_reg_area_pointer_int = |
8599 | CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer, |
8600 | CGF.Int32Ty); |
8601 | |
8602 | __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd( |
8603 | __current_saved_reg_area_pointer_int, |
8604 | llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)), |
8605 | "align_current_saved_reg_area_pointer"); |
8606 | |
8607 | __current_saved_reg_area_pointer_int = |
8608 | CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int, |
8609 | llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), |
8610 | "align_current_saved_reg_area_pointer"); |
8611 | |
8612 | __current_saved_reg_area_pointer = |
8613 | CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int, |
8614 | __current_saved_reg_area_pointer->getType(), |
8615 | "align_current_saved_reg_area_pointer"); |
8616 | } |
8617 | |
8618 | llvm::Value *__new_saved_reg_area_pointer = |
8619 | CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer, |
8620 | llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), |
8621 | "__new_saved_reg_area_pointer"); |
8622 | |
8623 | llvm::Value *UsingStack = 0; |
8624 | UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer, |
8625 | __saved_reg_area_end_pointer); |
8626 | |
8627 | CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock); |
8628 | |
8629 | |
8630 | |
8631 | CGF.EmitBlock(InRegBlock); |
8632 | |
8633 | llvm::Type *PTy = CGF.ConvertType(Ty); |
8634 | llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast( |
8635 | __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy)); |
8636 | |
8637 | CGF.Builder.CreateStore(__new_saved_reg_area_pointer, |
8638 | __current_saved_reg_area_pointer_p); |
8639 | |
8640 | CGF.EmitBranch(ContBlock); |
8641 | |
8642 | |
8643 | |
8644 | CGF.EmitBlock(OnStackBlock); |
8645 | |
8646 | |
8647 | Address __overflow_area_pointer_p = |
8648 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); |
8649 | llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( |
8650 | __overflow_area_pointer_p, "__overflow_area_pointer"); |
8651 | |
8652 | |
8653 | if (ArgAlign > 4) { |
8654 | llvm::Value *__overflow_area_pointer_int = |
8655 | CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); |
8656 | |
8657 | __overflow_area_pointer_int = |
8658 | CGF.Builder.CreateAdd(__overflow_area_pointer_int, |
8659 | llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1), |
8660 | "align_overflow_area_pointer"); |
8661 | |
8662 | __overflow_area_pointer_int = |
8663 | CGF.Builder.CreateAnd(__overflow_area_pointer_int, |
8664 | llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), |
8665 | "align_overflow_area_pointer"); |
8666 | |
8667 | __overflow_area_pointer = CGF.Builder.CreateIntToPtr( |
8668 | __overflow_area_pointer_int, __overflow_area_pointer->getType(), |
8669 | "align_overflow_area_pointer"); |
8670 | } |
8671 | |
8672 | |
8673 | |
8674 | llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP( |
8675 | CGF.Int8Ty, __overflow_area_pointer, |
8676 | llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), |
8677 | "__overflow_area_pointer.next"); |
8678 | |
8679 | CGF.Builder.CreateStore(__new_overflow_area_pointer, |
8680 | __overflow_area_pointer_p); |
8681 | |
8682 | CGF.Builder.CreateStore(__new_overflow_area_pointer, |
8683 | __current_saved_reg_area_pointer_p); |
8684 | |
8685 | |
8686 | llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty); |
8687 | llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast( |
8688 | __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy)); |
8689 | |
8690 | CGF.EmitBranch(ContBlock); |
8691 | |
8692 | |
8693 | |
8694 | CGF.EmitBlock(ContBlock); |
8695 | |
8696 | llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); |
8697 | llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr"); |
8698 | ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock); |
8699 | ArgAddr->addIncoming(__overflow_area_p, OnStackBlock); |
8700 | |
8701 | return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign)); |
8702 | } |
8703 | |
8704 | Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
8705 | QualType Ty) const { |
8706 | |
8707 | if (getTarget().getTriple().isMusl()) |
8708 | return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty); |
8709 | |
8710 | return EmitVAArgForHexagon(CGF, VAListAddr, Ty); |
8711 | } |
8712 | |
8713 | |
8714 | |
8715 | |
8716 | |
8717 | namespace { |
8718 | class LanaiABIInfo : public DefaultABIInfo { |
8719 | public: |
8720 | LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
8721 | |
8722 | bool shouldUseInReg(QualType Ty, CCState &State) const; |
8723 | |
8724 | void computeInfo(CGFunctionInfo &FI) const override { |
8725 | CCState State(FI); |
8726 | |
8727 | |
8728 | if (FI.getHasRegParm()) { |
8729 | State.FreeRegs = FI.getRegParm(); |
8730 | } else { |
8731 | State.FreeRegs = 4; |
8732 | } |
8733 | |
8734 | if (!getCXXABI().classifyReturnType(FI)) |
8735 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
8736 | for (auto &I : FI.arguments()) |
8737 | I.info = classifyArgumentType(I.type, State); |
8738 | } |
8739 | |
8740 | ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; |
8741 | ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; |
8742 | }; |
8743 | } |
8744 | |
8745 | bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { |
8746 | unsigned Size = getContext().getTypeSize(Ty); |
8747 | unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; |
8748 | |
8749 | if (SizeInRegs == 0) |
8750 | return false; |
8751 | |
8752 | if (SizeInRegs > State.FreeRegs) { |
8753 | State.FreeRegs = 0; |
8754 | return false; |
8755 | } |
8756 | |
8757 | State.FreeRegs -= SizeInRegs; |
8758 | |
8759 | return true; |
8760 | } |
8761 | |
8762 | ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, |
8763 | CCState &State) const { |
8764 | if (!ByVal) { |
8765 | if (State.FreeRegs) { |
8766 | --State.FreeRegs; |
8767 | return getNaturalAlignIndirectInReg(Ty); |
8768 | } |
8769 | return getNaturalAlignIndirect(Ty, false); |
8770 | } |
8771 | |
8772 | |
8773 | const unsigned MinABIStackAlignInBytes = 4; |
8774 | unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; |
8775 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), true, |
8776 | TypeAlign > |
8777 | MinABIStackAlignInBytes); |
8778 | } |
8779 | |
8780 | ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, |
8781 | CCState &State) const { |
8782 | |
8783 | const RecordType *RT = Ty->getAs<RecordType>(); |
8784 | if (RT) { |
8785 | CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); |
8786 | if (RAA == CGCXXABI::RAA_Indirect) { |
8787 | return getIndirectResult(Ty, false, State); |
8788 | } else if (RAA == CGCXXABI::RAA_DirectInMemory) { |
8789 | return getNaturalAlignIndirect(Ty, true); |
8790 | } |
8791 | } |
8792 | |
8793 | if (isAggregateTypeForABI(Ty)) { |
8794 | |
8795 | if (RT && RT->getDecl()->hasFlexibleArrayMember()) |
8796 | return getIndirectResult(Ty, true, State); |
8797 | |
8798 | |
8799 | if (isEmptyRecord(getContext(), Ty, true)) |
8800 | return ABIArgInfo::getIgnore(); |
8801 | |
8802 | llvm::LLVMContext &LLVMContext = getVMContext(); |
8803 | unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; |
8804 | if (SizeInRegs <= State.FreeRegs) { |
8805 | llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); |
8806 | SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); |
8807 | llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); |
8808 | State.FreeRegs -= SizeInRegs; |
8809 | return ABIArgInfo::getDirectInReg(Result); |
8810 | } else { |
8811 | State.FreeRegs = 0; |
8812 | } |
8813 | return getIndirectResult(Ty, true, State); |
8814 | } |
8815 | |
8816 | |
8817 | if (const auto *EnumTy = Ty->getAs<EnumType>()) |
8818 | Ty = EnumTy->getDecl()->getIntegerType(); |
8819 | |
8820 | bool InReg = shouldUseInReg(Ty, State); |
8821 | |
8822 | |
8823 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
8824 | if (EIT->getNumBits() > 64) |
8825 | return getIndirectResult(Ty, true, State); |
8826 | |
8827 | if (isPromotableIntegerTypeForABI(Ty)) { |
8828 | if (InReg) |
8829 | return ABIArgInfo::getDirectInReg(); |
8830 | return ABIArgInfo::getExtend(Ty); |
8831 | } |
8832 | if (InReg) |
8833 | return ABIArgInfo::getDirectInReg(); |
8834 | return ABIArgInfo::getDirect(); |
8835 | } |
8836 | |
8837 | namespace { |
8838 | class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { |
8839 | public: |
8840 | LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) |
8841 | : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {} |
8842 | }; |
8843 | } |
8844 | |
8845 | |
8846 | |
8847 | |
8848 | |
8849 | namespace { |
8850 | |
8851 | class AMDGPUABIInfo final : public DefaultABIInfo { |
8852 | private: |
8853 | static const unsigned MaxNumRegsForArgsRet = 16; |
8854 | |
8855 | unsigned numRegsForType(QualType Ty) const; |
8856 | |
8857 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; |
8858 | bool isHomogeneousAggregateSmallEnough(const Type *Base, |
8859 | uint64_t Members) const override; |
8860 | |
8861 | |
8862 | llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS, |
8863 | unsigned ToAS) const { |
8864 | |
8865 | if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS) |
8866 | return llvm::PointerType::get( |
8867 | cast<llvm::PointerType>(Ty)->getElementType(), ToAS); |
8868 | return Ty; |
8869 | } |
8870 | |
8871 | public: |
8872 | explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : |
8873 | DefaultABIInfo(CGT) {} |
8874 | |
8875 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
8876 | ABIArgInfo classifyKernelArgumentType(QualType Ty) const; |
8877 | ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const; |
8878 | |
8879 | void computeInfo(CGFunctionInfo &FI) const override; |
8880 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
8881 | QualType Ty) const override; |
8882 | }; |
8883 | |
8884 | bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
8885 | return true; |
8886 | } |
8887 | |
8888 | bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough( |
8889 | const Type *Base, uint64_t Members) const { |
8890 | uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32; |
8891 | |
8892 | |
8893 | return Members * NumRegs <= MaxNumRegsForArgsRet; |
8894 | } |
8895 | |
8896 | |
8897 | unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const { |
8898 | unsigned NumRegs = 0; |
8899 | |
8900 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
8901 | |
8902 | |
8903 | QualType EltTy = VT->getElementType(); |
8904 | unsigned EltSize = getContext().getTypeSize(EltTy); |
8905 | |
8906 | |
8907 | if (EltSize == 16) |
8908 | return (VT->getNumElements() + 1) / 2; |
8909 | |
8910 | unsigned EltNumRegs = (EltSize + 31) / 32; |
8911 | return EltNumRegs * VT->getNumElements(); |
8912 | } |
8913 | |
8914 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
8915 | const RecordDecl *RD = RT->getDecl(); |
8916 | assert(!RD->hasFlexibleArrayMember()); |
8917 | |
8918 | for (const FieldDecl *Field : RD->fields()) { |
8919 | QualType FieldTy = Field->getType(); |
8920 | NumRegs += numRegsForType(FieldTy); |
8921 | } |
8922 | |
8923 | return NumRegs; |
8924 | } |
8925 | |
8926 | return (getContext().getTypeSize(Ty) + 31) / 32; |
8927 | } |
8928 | |
8929 | void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const { |
8930 | llvm::CallingConv::ID CC = FI.getCallingConvention(); |
8931 | |
8932 | if (!getCXXABI().classifyReturnType(FI)) |
8933 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
8934 | |
8935 | unsigned NumRegsLeft = MaxNumRegsForArgsRet; |
8936 | for (auto &Arg : FI.arguments()) { |
8937 | if (CC == llvm::CallingConv::AMDGPU_KERNEL) { |
8938 | Arg.info = classifyKernelArgumentType(Arg.type); |
8939 | } else { |
8940 | Arg.info = classifyArgumentType(Arg.type, NumRegsLeft); |
8941 | } |
8942 | } |
8943 | } |
8944 | |
8945 | Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
8946 | QualType Ty) const { |
8947 | llvm_unreachable("AMDGPU does not support varargs"); |
8948 | } |
8949 | |
8950 | ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const { |
8951 | if (isAggregateTypeForABI(RetTy)) { |
8952 | |
8953 | |
8954 | if (!getRecordArgABI(RetTy, getCXXABI())) { |
8955 | |
8956 | if (isEmptyRecord(getContext(), RetTy, true)) |
8957 | return ABIArgInfo::getIgnore(); |
8958 | |
8959 | |
8960 | if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) |
8961 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); |
8962 | |
8963 | if (const RecordType *RT = RetTy->getAs<RecordType>()) { |
8964 | const RecordDecl *RD = RT->getDecl(); |
8965 | if (RD->hasFlexibleArrayMember()) |
8966 | return DefaultABIInfo::classifyReturnType(RetTy); |
8967 | } |
8968 | |
8969 | |
8970 | uint64_t Size = getContext().getTypeSize(RetTy); |
8971 | if (Size <= 16) |
8972 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); |
8973 | |
8974 | if (Size <= 32) |
8975 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); |
8976 | |
8977 | if (Size <= 64) { |
8978 | llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); |
8979 | return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); |
8980 | } |
8981 | |
8982 | if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet) |
8983 | return ABIArgInfo::getDirect(); |
8984 | } |
8985 | } |
8986 | |
8987 | |
8988 | return DefaultABIInfo::classifyReturnType(RetTy); |
8989 | } |
8990 | |
8991 | |
8992 | |
8993 | ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const { |
8994 | Ty = useFirstFieldIfTransparentUnion(Ty); |
8995 | |
8996 | |
8997 | |
8998 | if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) |
8999 | Ty = QualType(SeltTy, 0); |
9000 | |
9001 | llvm::Type *OrigLTy = CGT.ConvertType(Ty); |
9002 | llvm::Type *LTy = OrigLTy; |
9003 | if (getContext().getLangOpts().HIP) { |
9004 | LTy = coerceKernelArgumentType( |
9005 | OrigLTy, getContext().getTargetAddressSpace(LangAS::Default), |
9006 | getContext().getTargetAddressSpace(LangAS::cuda_device)); |
9007 | } |
9008 | |
9009 | |
9010 | |
9011 | |
9012 | |
9013 | |
9014 | |
9015 | if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy && |
9016 | isAggregateTypeForABI(Ty)) { |
9017 | return ABIArgInfo::getIndirectAliased( |
9018 | getContext().getTypeAlignInChars(Ty), |
9019 | getContext().getTargetAddressSpace(LangAS::opencl_constant), |
9020 | false , nullptr ); |
9021 | } |
9022 | |
9023 | |
9024 | |
9025 | |
9026 | return ABIArgInfo::getDirect(LTy, 0, nullptr, false); |
9027 | } |
9028 | |
9029 | ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, |
9030 | unsigned &NumRegsLeft) const { |
9031 | assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow"); |
9032 | |
9033 | Ty = useFirstFieldIfTransparentUnion(Ty); |
9034 | |
9035 | if (isAggregateTypeForABI(Ty)) { |
9036 | |
9037 | |
9038 | if (auto RAA = getRecordArgABI(Ty, getCXXABI())) |
9039 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
9040 | |
9041 | |
9042 | if (isEmptyRecord(getContext(), Ty, true)) |
9043 | return ABIArgInfo::getIgnore(); |
9044 | |
9045 | |
9046 | |
9047 | |
9048 | if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) |
9049 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); |
9050 | |
9051 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
9052 | const RecordDecl *RD = RT->getDecl(); |
9053 | if (RD->hasFlexibleArrayMember()) |
9054 | return DefaultABIInfo::classifyArgumentType(Ty); |
9055 | } |
9056 | |
9057 | |
9058 | uint64_t Size = getContext().getTypeSize(Ty); |
9059 | if (Size <= 64) { |
9060 | unsigned NumRegs = (Size + 31) / 32; |
9061 | NumRegsLeft -= std::min(NumRegsLeft, NumRegs); |
9062 | |
9063 | if (Size <= 16) |
9064 | return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); |
9065 | |
9066 | if (Size <= 32) |
9067 | return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); |
9068 | |
9069 | |
9070 | llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); |
9071 | return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); |
9072 | } |
9073 | |
9074 | if (NumRegsLeft > 0) { |
9075 | unsigned NumRegs = numRegsForType(Ty); |
9076 | if (NumRegsLeft >= NumRegs) { |
9077 | NumRegsLeft -= NumRegs; |
9078 | return ABIArgInfo::getDirect(); |
9079 | } |
9080 | } |
9081 | } |
9082 | |
9083 | |
9084 | ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty); |
9085 | if (!ArgInfo.isIndirect()) { |
9086 | unsigned NumRegs = numRegsForType(Ty); |
9087 | NumRegsLeft -= std::min(NumRegs, NumRegsLeft); |
9088 | } |
9089 | |
9090 | return ArgInfo; |
9091 | } |
9092 | |
9093 | class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { |
9094 | public: |
9095 | AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) |
9096 | : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {} |
9097 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
9098 | CodeGen::CodeGenModule &M) const override; |
9099 | unsigned getOpenCLKernelCallingConv() const override; |
9100 | |
9101 | llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, |
9102 | llvm::PointerType *T, QualType QT) const override; |
9103 | |
9104 | LangAS getASTAllocaAddressSpace() const override { |
9105 | return getLangASFromTargetAS( |
9106 | getABIInfo().getDataLayout().getAllocaAddrSpace()); |
9107 | } |
9108 | LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, |
9109 | const VarDecl *D) const override; |
9110 | llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, |
9111 | SyncScope Scope, |
9112 | llvm::AtomicOrdering Ordering, |
9113 | llvm::LLVMContext &Ctx) const override; |
9114 | llvm::Function * |
9115 | createEnqueuedBlockKernel(CodeGenFunction &CGF, |
9116 | llvm::Function *BlockInvokeFunc, |
9117 | llvm::Value *BlockLiteral) const override; |
9118 | bool shouldEmitStaticExternCAliases() const override; |
9119 | void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; |
9120 | }; |
9121 | } |
9122 | |
9123 | static bool requiresAMDGPUProtectedVisibility(const Decl *D, |
9124 | llvm::GlobalValue *GV) { |
9125 | if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility) |
9126 | return false; |
9127 | |
9128 | return D->hasAttr<OpenCLKernelAttr>() || |
9129 | (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) || |
9130 | (isa<VarDecl>(D) && |
9131 | (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || |
9132 | cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() || |
9133 | cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType())); |
9134 | } |
9135 | |
9136 | void AMDGPUTargetCodeGenInfo::setTargetAttributes( |
9137 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { |
9138 | if (requiresAMDGPUProtectedVisibility(D, GV)) { |
9139 | GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); |
9140 | GV->setDSOLocal(true); |
9141 | } |
9142 | |
9143 | if (GV->isDeclaration()) |
9144 | return; |
9145 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
9146 | if (!FD) |
9147 | return; |
9148 | |
9149 | llvm::Function *F = cast<llvm::Function>(GV); |
9150 | |
9151 | const auto *ReqdWGS = M.getLangOpts().OpenCL ? |
9152 | FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr; |
9153 | |
9154 | |
9155 | const bool IsOpenCLKernel = M.getLangOpts().OpenCL && |
9156 | FD->hasAttr<OpenCLKernelAttr>(); |
9157 | const bool IsHIPKernel = M.getLangOpts().HIP && |
9158 | FD->hasAttr<CUDAGlobalAttr>(); |
9159 | if ((IsOpenCLKernel || IsHIPKernel) && |
9160 | (M.getTriple().getOS() == llvm::Triple::AMDHSA)) |
9161 | F->addFnAttr("amdgpu-implicitarg-num-bytes", "56"); |
9162 | |
9163 | if (IsHIPKernel) |
9164 | F->addFnAttr("uniform-work-group-size", "true"); |
9165 | |
9166 | |
9167 | const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>(); |
9168 | if (ReqdWGS || FlatWGS) { |
9169 | unsigned Min = 0; |
9170 | unsigned Max = 0; |
9171 | if (FlatWGS) { |
9172 | Min = FlatWGS->getMin() |
9173 | ->EvaluateKnownConstInt(M.getContext()) |
9174 | .getExtValue(); |
9175 | Max = FlatWGS->getMax() |
9176 | ->EvaluateKnownConstInt(M.getContext()) |
9177 | .getExtValue(); |
9178 | } |
9179 | if (ReqdWGS && Min == 0 && Max == 0) |
9180 | Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim(); |
9181 | |
9182 | if (Min != 0) { |
9183 | assert(Min <= Max && "Min must be less than or equal Max"); |
9184 | |
9185 | std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max); |
9186 | F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); |
9187 | } else |
9188 | assert(Max == 0 && "Max must be zero"); |
9189 | } else if (IsOpenCLKernel || IsHIPKernel) { |
9190 | |
9191 | |
9192 | const unsigned OpenCLDefaultMaxWorkGroupSize = 256; |
9193 | const unsigned DefaultMaxWorkGroupSize = |
9194 | IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize |
9195 | : M.getLangOpts().GPUMaxThreadsPerBlock; |
9196 | std::string AttrVal = |
9197 | std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize); |
9198 | F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); |
9199 | } |
9200 | |
9201 | if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) { |
9202 | unsigned Min = |
9203 | Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue(); |
9204 | unsigned Max = Attr->getMax() ? Attr->getMax() |
9205 | ->EvaluateKnownConstInt(M.getContext()) |
9206 | .getExtValue() |
9207 | : 0; |
9208 | |
9209 | if (Min != 0) { |
9210 | assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max"); |
9211 | |
9212 | std::string AttrVal = llvm::utostr(Min); |
9213 | if (Max != 0) |
9214 | AttrVal = AttrVal + "," + llvm::utostr(Max); |
9215 | F->addFnAttr("amdgpu-waves-per-eu", AttrVal); |
9216 | } else |
9217 | assert(Max == 0 && "Max must be zero"); |
9218 | } |
9219 | |
9220 | if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { |
9221 | unsigned NumSGPR = Attr->getNumSGPR(); |
9222 | |
9223 | if (NumSGPR != 0) |
9224 | F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR)); |
9225 | } |
9226 | |
9227 | if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { |
9228 | uint32_t NumVGPR = Attr->getNumVGPR(); |
9229 | |
9230 | if (NumVGPR != 0) |
9231 | F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR)); |
9232 | } |
9233 | |
9234 | if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics()) |
9235 | F->addFnAttr("amdgpu-unsafe-fp-atomics", "true"); |
9236 | |
9237 | if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts) |
9238 | F->addFnAttr("amdgpu-ieee", "false"); |
9239 | } |
9240 | |
9241 | unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { |
9242 | return llvm::CallingConv::AMDGPU_KERNEL; |
9243 | } |
9244 | |
9245 | |
9246 | |
9247 | |
9248 | |
9249 | |
9250 | llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( |
9251 | const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, |
9252 | QualType QT) const { |
9253 | if (CGM.getContext().getTargetNullPointerValue(QT) == 0) |
9254 | return llvm::ConstantPointerNull::get(PT); |
9255 | |
9256 | auto &Ctx = CGM.getContext(); |
9257 | auto NPT = llvm::PointerType::get(PT->getElementType(), |
9258 | Ctx.getTargetAddressSpace(LangAS::opencl_generic)); |
9259 | return llvm::ConstantExpr::getAddrSpaceCast( |
9260 | llvm::ConstantPointerNull::get(NPT), PT); |
9261 | } |
9262 | |
9263 | LangAS |
9264 | AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, |
9265 | const VarDecl *D) const { |
9266 | assert(!CGM.getLangOpts().OpenCL && |
9267 | !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && |
9268 | "Address space agnostic languages only"); |
9269 | LangAS DefaultGlobalAS = getLangASFromTargetAS( |
9270 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_global)); |
9271 | if (!D) |
9272 | return DefaultGlobalAS; |
9273 | |
9274 | LangAS AddrSpace = D->getType().getAddressSpace(); |
9275 | assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace)); |
9276 | if (AddrSpace != LangAS::Default) |
9277 | return AddrSpace; |
9278 | |
9279 | if (CGM.isTypeConstant(D->getType(), false)) { |
9280 | if (auto ConstAS = CGM.getTarget().getConstantAddressSpace()) |
9281 | return ConstAS.getValue(); |
9282 | } |
9283 | return DefaultGlobalAS; |
9284 | } |
9285 | |
9286 | llvm::SyncScope::ID |
9287 | AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, |
9288 | SyncScope Scope, |
9289 | llvm::AtomicOrdering Ordering, |
9290 | llvm::LLVMContext &Ctx) const { |
9291 | std::string Name; |
9292 | switch (Scope) { |
9293 | case SyncScope::OpenCLWorkGroup: |
9294 | Name = "workgroup"; |
9295 | break; |
9296 | case SyncScope::OpenCLDevice: |
9297 | Name = "agent"; |
9298 | break; |
9299 | case SyncScope::OpenCLAllSVMDevices: |
9300 | Name = ""; |
9301 | break; |
9302 | case SyncScope::OpenCLSubGroup: |
9303 | Name = "wavefront"; |
9304 | } |
9305 | |
9306 | if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) { |
9307 | if (!Name.empty()) |
9308 | Name = Twine(Twine(Name) + Twine("-")).str(); |
9309 | |
9310 | Name = Twine(Twine(Name) + Twine("one-as")).str(); |
9311 | } |
9312 | |
9313 | return Ctx.getOrInsertSyncScopeID(Name); |
9314 | } |
9315 | |
9316 | bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { |
9317 | return false; |
9318 | } |
9319 | |
9320 | void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention( |
9321 | const FunctionType *&FT) const { |
9322 | FT = getABIInfo().getContext().adjustFunctionType( |
9323 | FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel)); |
9324 | } |
9325 | |
9326 | |
9327 | |
9328 | |
9329 | |
9330 | |
9331 | |
9332 | namespace { |
9333 | class SparcV8ABIInfo : public DefaultABIInfo { |
9334 | public: |
9335 | SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
9336 | |
9337 | private: |
9338 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
9339 | void computeInfo(CGFunctionInfo &FI) const override; |
9340 | }; |
9341 | } |
9342 | |
9343 | |
9344 | ABIArgInfo |
9345 | SparcV8ABIInfo::classifyReturnType(QualType Ty) const { |
9346 | if (Ty->isAnyComplexType()) { |
9347 | return ABIArgInfo::getDirect(); |
9348 | } |
9349 | else { |
9350 | return DefaultABIInfo::classifyReturnType(Ty); |
9351 | } |
9352 | } |
9353 | |
9354 | void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { |
9355 | |
9356 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
9357 | for (auto &Arg : FI.arguments()) |
9358 | Arg.info = classifyArgumentType(Arg.type); |
9359 | } |
9360 | |
9361 | namespace { |
9362 | class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { |
9363 | public: |
9364 | SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) |
9365 | : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {} |
9366 | }; |
9367 | } |
9368 | |
9369 | |
9370 | |
9371 | |
9372 | |
9373 | |
9374 | |
9375 | |
9376 | |
9377 | |
9378 | |
9379 | |
9380 | |
9381 | |
9382 | |
9383 | |
9384 | |
9385 | |
9386 | |
9387 | |
9388 | |
9389 | |
9390 | |
9391 | |
9392 | |
9393 | |
9394 | |
9395 | namespace { |
9396 | class SparcV9ABIInfo : public ABIInfo { |
9397 | public: |
9398 | SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} |
9399 | |
9400 | private: |
9401 | ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; |
9402 | void computeInfo(CGFunctionInfo &FI) const override; |
9403 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
9404 | QualType Ty) const override; |
9405 | |
9406 | |
9407 | |
9408 | |
9409 | |
9410 | |
9411 | |
9412 | |
9413 | |
9414 | |
9415 | |
9416 | |
9417 | struct CoerceBuilder { |
9418 | llvm::LLVMContext &Context; |
9419 | const llvm::DataLayout &DL; |
9420 | SmallVector<llvm::Type*, 8> Elems; |
9421 | uint64_t Size; |
9422 | bool InReg; |
9423 | |
9424 | CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) |
9425 | : Context(c), DL(dl), Size(0), InReg(false) {} |
9426 | |
9427 | |
9428 | void pad(uint64_t ToSize) { |
9429 | assert(ToSize >= Size && "Cannot remove elements"); |
9430 | if (ToSize == Size) |
9431 | return; |
9432 | |
9433 | |
9434 | uint64_t Aligned = llvm::alignTo(Size, 64); |
9435 | if (Aligned > Size && Aligned <= ToSize) { |
9436 | Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); |
9437 | Size = Aligned; |
9438 | } |
9439 | |
9440 | |
9441 | while (Size + 64 <= ToSize) { |
9442 | Elems.push_back(llvm::Type::getInt64Ty(Context)); |
9443 | Size += 64; |
9444 | } |
9445 | |
9446 | |
9447 | if (Size < ToSize) { |
9448 | Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); |
9449 | Size = ToSize; |
9450 | } |
9451 | } |
9452 | |
9453 | |
9454 | void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { |
9455 | |
9456 | if (Offset % Bits) |
9457 | return; |
9458 | |
9459 | if (Bits < 64) |
9460 | InReg = true; |
9461 | pad(Offset); |
9462 | Elems.push_back(Ty); |
9463 | Size = Offset + Bits; |
9464 | } |
9465 | |
9466 | |
9467 | void addStruct(uint64_t Offset, llvm::StructType *StrTy) { |
9468 | const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); |
9469 | for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { |
9470 | llvm::Type *ElemTy = StrTy->getElementType(i); |
9471 | uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); |
9472 | switch (ElemTy->getTypeID()) { |
9473 | case llvm::Type::StructTyID: |
9474 | addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); |
9475 | break; |
9476 | case llvm::Type::FloatTyID: |
9477 | addFloat(ElemOffset, ElemTy, 32); |
9478 | break; |
9479 | case llvm::Type::DoubleTyID: |
9480 | addFloat(ElemOffset, ElemTy, 64); |
9481 | break; |
9482 | case llvm::Type::FP128TyID: |
9483 | addFloat(ElemOffset, ElemTy, 128); |
9484 | break; |
9485 | case llvm::Type::PointerTyID: |
9486 | if (ElemOffset % 64 == 0) { |
9487 | pad(ElemOffset); |
9488 | Elems.push_back(ElemTy); |
9489 | Size += 64; |
9490 | } |
9491 | break; |
9492 | default: |
9493 | break; |
9494 | } |
9495 | } |
9496 | } |
9497 | |
9498 | |
9499 | bool isUsableType(llvm::StructType *Ty) const { |
9500 | return llvm::makeArrayRef(Elems) == Ty->elements(); |
9501 | } |
9502 | |
9503 | |
9504 | llvm::Type *getType() const { |
9505 | if (Elems.size() == 1) |
9506 | return Elems.front(); |
9507 | else |
9508 | return llvm::StructType::get(Context, Elems); |
9509 | } |
9510 | }; |
9511 | }; |
9512 | } |
9513 | |
9514 | ABIArgInfo |
9515 | SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { |
9516 | if (Ty->isVoidType()) |
9517 | return ABIArgInfo::getIgnore(); |
9518 | |
9519 | uint64_t Size = getContext().getTypeSize(Ty); |
9520 | |
9521 | |
9522 | |
9523 | if (Size > SizeLimit) |
9524 | return getNaturalAlignIndirect(Ty, false); |
9525 | |
9526 | |
9527 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
9528 | Ty = EnumTy->getDecl()->getIntegerType(); |
9529 | |
9530 | |
9531 | if (Size < 64 && Ty->isIntegerType()) |
9532 | return ABIArgInfo::getExtend(Ty); |
9533 | |
9534 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
9535 | if (EIT->getNumBits() < 64) |
9536 | return ABIArgInfo::getExtend(Ty); |
9537 | |
9538 | |
9539 | if (!isAggregateTypeForABI(Ty)) |
9540 | return ABIArgInfo::getDirect(); |
9541 | |
9542 | |
9543 | |
9544 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
9545 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
9546 | |
9547 | |
9548 | |
9549 | llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); |
9550 | if (!StrTy) |
9551 | return ABIArgInfo::getDirect(); |
9552 | |
9553 | CoerceBuilder CB(getVMContext(), getDataLayout()); |
9554 | CB.addStruct(0, StrTy); |
9555 | CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); |
9556 | |
9557 | |
9558 | llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); |
9559 | |
9560 | if (CB.InReg) |
9561 | return ABIArgInfo::getDirectInReg(CoerceTy); |
9562 | else |
9563 | return ABIArgInfo::getDirect(CoerceTy); |
9564 | } |
9565 | |
9566 | Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
9567 | QualType Ty) const { |
9568 | ABIArgInfo AI = classifyType(Ty, 16 * 8); |
9569 | llvm::Type *ArgTy = CGT.ConvertType(Ty); |
9570 | if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) |
9571 | AI.setCoerceToType(ArgTy); |
9572 | |
9573 | CharUnits SlotSize = CharUnits::fromQuantity(8); |
9574 | |
9575 | CGBuilderTy &Builder = CGF.Builder; |
9576 | Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); |
9577 | llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); |
9578 | |
9579 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); |
9580 | |
9581 | Address ArgAddr = Address::invalid(); |
9582 | CharUnits Stride; |
9583 | switch (AI.getKind()) { |
9584 | case ABIArgInfo::Expand: |
9585 | case ABIArgInfo::CoerceAndExpand: |
9586 | case ABIArgInfo::InAlloca: |
9587 | llvm_unreachable("Unsupported ABI kind for va_arg"); |
9588 | |
9589 | case ABIArgInfo::Extend: { |
9590 | Stride = SlotSize; |
9591 | CharUnits Offset = SlotSize - TypeInfo.Width; |
9592 | ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); |
9593 | break; |
9594 | } |
9595 | |
9596 | case ABIArgInfo::Direct: { |
9597 | auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); |
9598 | Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); |
9599 | ArgAddr = Addr; |
9600 | break; |
9601 | } |
9602 | |
9603 | case ABIArgInfo::Indirect: |
9604 | case ABIArgInfo::IndirectAliased: |
9605 | Stride = SlotSize; |
9606 | ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); |
9607 | ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), |
9608 | TypeInfo.Align); |
9609 | break; |
9610 | |
9611 | case ABIArgInfo::Ignore: |
9612 | return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.Align); |
9613 | } |
9614 | |
9615 | |
9616 | Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); |
9617 | Builder.CreateStore(NextPtr.getPointer(), VAListAddr); |
9618 | |
9619 | return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); |
9620 | } |
9621 | |
9622 | void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { |
9623 | FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); |
9624 | for (auto &I : FI.arguments()) |
9625 | I.info = classifyType(I.type, 16 * 8); |
9626 | } |
9627 | |
9628 | namespace { |
9629 | class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { |
9630 | public: |
9631 | SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) |
9632 | : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {} |
9633 | |
9634 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
9635 | return 14; |
9636 | } |
9637 | |
9638 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
9639 | llvm::Value *Address) const override; |
9640 | }; |
9641 | } |
9642 | |
9643 | bool |
9644 | SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
9645 | llvm::Value *Address) const { |
9646 | |
9647 | |
9648 | |
9649 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
9650 | |
9651 | llvm::IntegerType *i8 = CGF.Int8Ty; |
9652 | llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); |
9653 | llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); |
9654 | |
9655 | |
9656 | AssignToArrayRange(Builder, Address, Eight8, 0, 31); |
9657 | |
9658 | |
9659 | AssignToArrayRange(Builder, Address, Four8, 32, 63); |
9660 | |
9661 | |
9662 | |
9663 | |
9664 | |
9665 | |
9666 | |
9667 | |
9668 | |
9669 | AssignToArrayRange(Builder, Address, Eight8, 64, 71); |
9670 | |
9671 | |
9672 | AssignToArrayRange(Builder, Address, Eight8, 72, 87); |
9673 | |
9674 | return false; |
9675 | } |
9676 | |
9677 | |
9678 | namespace { |
9679 | |
9680 | class ARCABIInfo : public DefaultABIInfo { |
9681 | public: |
9682 | using DefaultABIInfo::DefaultABIInfo; |
9683 | |
9684 | private: |
9685 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
9686 | QualType Ty) const override; |
9687 | |
9688 | void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const { |
9689 | if (!State.FreeRegs) |
9690 | return; |
9691 | if (Info.isIndirect() && Info.getInReg()) |
9692 | State.FreeRegs--; |
9693 | else if (Info.isDirect() && Info.getInReg()) { |
9694 | unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32; |
9695 | if (sz < State.FreeRegs) |
9696 | State.FreeRegs -= sz; |
9697 | else |
9698 | State.FreeRegs = 0; |
9699 | } |
9700 | } |
9701 | |
9702 | void computeInfo(CGFunctionInfo &FI) const override { |
9703 | CCState State(FI); |
9704 | |
9705 | State.FreeRegs = 8; |
9706 | |
9707 | if (!getCXXABI().classifyReturnType(FI)) |
9708 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
9709 | updateState(FI.getReturnInfo(), FI.getReturnType(), State); |
9710 | for (auto &I : FI.arguments()) { |
9711 | I.info = classifyArgumentType(I.type, State.FreeRegs); |
9712 | updateState(I.info, I.type, State); |
9713 | } |
9714 | } |
9715 | |
9716 | ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const; |
9717 | ABIArgInfo getIndirectByValue(QualType Ty) const; |
9718 | ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const; |
9719 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
9720 | }; |
9721 | |
9722 | class ARCTargetCodeGenInfo : public TargetCodeGenInfo { |
9723 | public: |
9724 | ARCTargetCodeGenInfo(CodeGenTypes &CGT) |
9725 | : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {} |
9726 | }; |
9727 | |
9728 | |
9729 | ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const { |
9730 | return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) : |
9731 | getNaturalAlignIndirect(Ty, false); |
9732 | } |
9733 | |
9734 | ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const { |
9735 | |
9736 | const unsigned MinABIStackAlignInBytes = 4; |
9737 | unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; |
9738 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), true, |
9739 | TypeAlign > MinABIStackAlignInBytes); |
9740 | } |
9741 | |
9742 | Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
9743 | QualType Ty) const { |
9744 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, |
9745 | getContext().getTypeInfoInChars(Ty), |
9746 | CharUnits::fromQuantity(4), true); |
9747 | } |
9748 | |
9749 | ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty, |
9750 | uint8_t FreeRegs) const { |
9751 | |
9752 | const RecordType *RT = Ty->getAs<RecordType>(); |
9753 | if (RT) { |
9754 | CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); |
9755 | if (RAA == CGCXXABI::RAA_Indirect) |
9756 | return getIndirectByRef(Ty, FreeRegs > 0); |
9757 | |
9758 | if (RAA == CGCXXABI::RAA_DirectInMemory) |
9759 | return getIndirectByValue(Ty); |
9760 | } |
9761 | |
9762 | |
9763 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
9764 | Ty = EnumTy->getDecl()->getIntegerType(); |
9765 | |
9766 | auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32; |
9767 | |
9768 | if (isAggregateTypeForABI(Ty)) { |
9769 | |
9770 | if (RT && RT->getDecl()->hasFlexibleArrayMember()) |
9771 | return getIndirectByValue(Ty); |
9772 | |
9773 | |
9774 | if (isEmptyRecord(getContext(), Ty, true)) |
9775 | return ABIArgInfo::getIgnore(); |
9776 | |
9777 | llvm::LLVMContext &LLVMContext = getVMContext(); |
9778 | |
9779 | llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); |
9780 | SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); |
9781 | llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); |
9782 | |
9783 | return FreeRegs >= SizeInRegs ? |
9784 | ABIArgInfo::getDirectInReg(Result) : |
9785 | ABIArgInfo::getDirect(Result, 0, nullptr, false); |
9786 | } |
9787 | |
9788 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
9789 | if (EIT->getNumBits() > 64) |
9790 | return getIndirectByValue(Ty); |
9791 | |
9792 | return isPromotableIntegerTypeForABI(Ty) |
9793 | ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) |
9794 | : ABIArgInfo::getExtend(Ty)) |
9795 | : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() |
9796 | : ABIArgInfo::getDirect()); |
9797 | } |
9798 | |
9799 | ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const { |
9800 | if (RetTy->isAnyComplexType()) |
9801 | return ABIArgInfo::getDirectInReg(); |
9802 | |
9803 | |
9804 | auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32; |
9805 | if (RetSize > 4) |
9806 | return getIndirectByRef(RetTy, true); |
9807 | |
9808 | return DefaultABIInfo::classifyReturnType(RetTy); |
9809 | } |
9810 | |
9811 | } |
9812 | |
9813 | |
9814 | |
9815 | |
9816 | |
9817 | namespace { |
9818 | |
9819 | |
9820 | |
9821 | typedef llvm::SmallString<128> SmallStringEnc; |
9822 | |
9823 | |
9824 | |
9825 | |
9826 | |
9827 | |
9828 | |
9829 | |
9830 | |
9831 | |
9832 | |
9833 | |
9834 | |
9835 | |
9836 | |
9837 | |
9838 | |
9839 | |
9840 | |
9841 | |
9842 | |
9843 | |
9844 | |
9845 | |
9846 | |
9847 | |
9848 | |
9849 | |
9850 | |
9851 | |
9852 | |
9853 | |
9854 | |
9855 | |
9856 | |
9857 | |
9858 | |
9859 | |
9860 | |
9861 | |
9862 | |
9863 | |
9864 | |
9865 | |
9866 | |
9867 | |
9868 | |
9869 | |
9870 | |
9871 | |
9872 | |
9873 | |
9874 | |
9875 | |
9876 | |
9877 | class TypeStringCache { |
9878 | enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; |
9879 | struct Entry { |
9880 | std::string Str; |
9881 | enum Status State; |
9882 | std::string Swapped; |
9883 | |
9884 | }; |
9885 | std::map<const IdentifierInfo *, struct Entry> Map; |
9886 | unsigned IncompleteCount; |
9887 | unsigned IncompleteUsedCount; |
9888 | public: |
9889 | TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} |
9890 | void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); |
9891 | bool removeIncomplete(const IdentifierInfo *ID); |
9892 | void addIfComplete(const IdentifierInfo *ID, StringRef Str, |
9893 | bool IsRecursive); |
9894 | StringRef lookupStr(const IdentifierInfo *ID); |
9895 | }; |
9896 | |
9897 | |
9898 | |
9899 | class FieldEncoding { |
9900 | bool HasName; |
9901 | std::string Enc; |
9902 | public: |
9903 | FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} |
9904 | StringRef str() { return Enc; } |
9905 | bool operator<(const FieldEncoding &rhs) const { |
9906 | if (HasName != rhs.HasName) return HasName; |
9907 | return Enc < rhs.Enc; |
9908 | } |
9909 | }; |
9910 | |
9911 | class XCoreABIInfo : public DefaultABIInfo { |
9912 | public: |
9913 | XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
9914 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
9915 | QualType Ty) const override; |
9916 | }; |
9917 | |
9918 | class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { |
9919 | mutable TypeStringCache TSC; |
9920 | void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, |
9921 | const CodeGen::CodeGenModule &M) const; |
9922 | |
9923 | public: |
9924 | XCoreTargetCodeGenInfo(CodeGenTypes &CGT) |
9925 | : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {} |
9926 | void emitTargetMetadata(CodeGen::CodeGenModule &CGM, |
9927 | const llvm::MapVector<GlobalDecl, StringRef> |
9928 | &MangledDeclNames) const override; |
9929 | }; |
9930 | |
9931 | } |
9932 | |
9933 | |
9934 | |
9935 | Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
9936 | QualType Ty) const { |
9937 | CGBuilderTy &Builder = CGF.Builder; |
9938 | |
9939 | |
9940 | CharUnits SlotSize = CharUnits::fromQuantity(4); |
9941 | Address AP(Builder.CreateLoad(VAListAddr), SlotSize); |
9942 | |
9943 | |
9944 | ABIArgInfo AI = classifyArgumentType(Ty); |
9945 | CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); |
9946 | llvm::Type *ArgTy = CGT.ConvertType(Ty); |
9947 | if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) |
9948 | AI.setCoerceToType(ArgTy); |
9949 | llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); |
9950 | |
9951 | Address Val = Address::invalid(); |
9952 | CharUnits ArgSize = CharUnits::Zero(); |
9953 | switch (AI.getKind()) { |
9954 | case ABIArgInfo::Expand: |
9955 | case ABIArgInfo::CoerceAndExpand: |
9956 | case ABIArgInfo::InAlloca: |
9957 | llvm_unreachable("Unsupported ABI kind for va_arg"); |
9958 | case ABIArgInfo::Ignore: |
9959 | Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); |
9960 | ArgSize = CharUnits::Zero(); |
9961 | break; |
9962 | case ABIArgInfo::Extend: |
9963 | case ABIArgInfo::Direct: |
9964 | Val = Builder.CreateBitCast(AP, ArgPtrTy); |
9965 | ArgSize = CharUnits::fromQuantity( |
9966 | getDataLayout().getTypeAllocSize(AI.getCoerceToType())); |
9967 | ArgSize = ArgSize.alignTo(SlotSize); |
9968 | break; |
9969 | case ABIArgInfo::Indirect: |
9970 | case ABIArgInfo::IndirectAliased: |
9971 | Val = Builder.CreateElementBitCast(AP, ArgPtrTy); |
9972 | Val = Address(Builder.CreateLoad(Val), TypeAlign); |
9973 | ArgSize = SlotSize; |
9974 | break; |
9975 | } |
9976 | |
9977 | |
9978 | if (!ArgSize.isZero()) { |
9979 | Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize); |
9980 | Builder.CreateStore(APN.getPointer(), VAListAddr); |
9981 | } |
9982 | |
9983 | return Val; |
9984 | } |
9985 | |
9986 | |
9987 | |
9988 | |
9989 | |
9990 | |
9991 | void TypeStringCache::addIncomplete(const IdentifierInfo *ID, |
9992 | std::string StubEnc) { |
9993 | if (!ID) |
9994 | return; |
9995 | Entry &E = Map[ID]; |
9996 | assert( (E.Str.empty() || E.State == Recursive) && |
9997 | "Incorrectly use of addIncomplete"); |
9998 | assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); |
9999 | E.Swapped.swap(E.Str); |
10000 | E.Str.swap(StubEnc); |
10001 | E.State = Incomplete; |
10002 | ++IncompleteCount; |
10003 | } |
10004 | |
10005 | |
10006 | |
10007 | |
10008 | |
10009 | bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { |
10010 | if (!ID) |
10011 | return false; |
10012 | auto I = Map.find(ID); |
10013 | assert(I != Map.end() && "Entry not present"); |
10014 | Entry &E = I->second; |
10015 | assert( (E.State == Incomplete || |
10016 | E.State == IncompleteUsed) && |
10017 | "Entry must be an incomplete type"); |
10018 | bool IsRecursive = false; |
10019 | if (E.State == IncompleteUsed) { |
10020 | |
10021 | IsRecursive = true; |
10022 | --IncompleteUsedCount; |
10023 | } |
10024 | if (E.Swapped.empty()) |
10025 | Map.erase(I); |
10026 | else { |
10027 | |
10028 | E.Swapped.swap(E.Str); |
10029 | E.Swapped.clear(); |
10030 | E.State = Recursive; |
10031 | } |
10032 | --IncompleteCount; |
10033 | return IsRecursive; |
10034 | } |
10035 | |
10036 | |
10037 | |
10038 | void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, |
10039 | bool IsRecursive) { |
10040 | if (!ID || IncompleteUsedCount) |
10041 | return; |
10042 | Entry &E = Map[ID]; |
10043 | if (IsRecursive && !E.Str.empty()) { |
10044 | assert(E.State==Recursive && E.Str.size() == Str.size() && |
10045 | "This is not the same Recursive entry"); |
10046 | |
10047 | |
10048 | |
10049 | return; |
10050 | } |
10051 | assert(E.Str.empty() && "Entry already present"); |
10052 | E.Str = Str.str(); |
10053 | E.State = IsRecursive? Recursive : NonRecursive; |
10054 | } |
10055 | |
10056 | |
10057 | |
10058 | |
10059 | StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { |
10060 | if (!ID) |
10061 | return StringRef(); |
10062 | auto I = Map.find(ID); |
10063 | if (I == Map.end()) |
10064 | return StringRef(); |
10065 | Entry &E = I->second; |
10066 | if (E.State == Recursive && IncompleteCount) |
10067 | return StringRef(); |
10068 | |
10069 | if (E.State == Incomplete) { |
10070 | |
10071 | E.State = IncompleteUsed; |
10072 | ++IncompleteUsedCount; |
10073 | } |
10074 | return E.Str; |
10075 | } |
10076 | |
10077 | |
10078 | |
10079 | |
10080 | |
10081 | |
10082 | |
10083 | |
10084 | |
10085 | |
10086 | |
10087 | |
10088 | |
10089 | static bool getTypeString(SmallStringEnc &Enc, const Decl *D, |
10090 | const CodeGen::CodeGenModule &CGM, |
10091 | TypeStringCache &TSC); |
10092 | |
10093 | |
10094 | void XCoreTargetCodeGenInfo::emitTargetMD( |
10095 | const Decl *D, llvm::GlobalValue *GV, |
10096 | const CodeGen::CodeGenModule &CGM) const { |
10097 | SmallStringEnc Enc; |
10098 | if (getTypeString(Enc, D, CGM, TSC)) { |
10099 | llvm::LLVMContext &Ctx = CGM.getModule().getContext(); |
10100 | llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), |
10101 | llvm::MDString::get(Ctx, Enc.str())}; |
10102 | llvm::NamedMDNode *MD = |
10103 | CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); |
10104 | MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); |
10105 | } |
10106 | } |
10107 | |
10108 | void XCoreTargetCodeGenInfo::emitTargetMetadata( |
10109 | CodeGen::CodeGenModule &CGM, |
10110 | const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const { |
10111 | |
10112 | |
10113 | |
10114 | for (unsigned I = 0; I != MangledDeclNames.size(); ++I) { |
10115 | auto Val = *(MangledDeclNames.begin() + I); |
10116 | llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second); |
10117 | if (GV) { |
10118 | const Decl *D = Val.first.getDecl()->getMostRecentDecl(); |
10119 | emitTargetMD(D, GV, CGM); |
10120 | } |
10121 | } |
10122 | } |
10123 | |
10124 | |
10125 | |
10126 | |
10127 | namespace { |
10128 | class SPIRABIInfo : public DefaultABIInfo { |
10129 | public: |
10130 | SPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); } |
10131 | |
10132 | private: |
10133 | void setCCs(); |
10134 | }; |
10135 | } |
10136 | namespace { |
10137 | class SPIRTargetCodeGenInfo : public TargetCodeGenInfo { |
10138 | public: |
10139 | SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) |
10140 | : TargetCodeGenInfo(std::make_unique<SPIRABIInfo>(CGT)) {} |
10141 | |
10142 | LangAS getASTAllocaAddressSpace() const override { |
10143 | return getLangASFromTargetAS( |
10144 | getABIInfo().getDataLayout().getAllocaAddrSpace()); |
10145 | } |
10146 | |
10147 | unsigned getOpenCLKernelCallingConv() const override; |
10148 | }; |
10149 | |
10150 | } |
10151 | void SPIRABIInfo::setCCs() { |
10152 | assert(getRuntimeCC() == llvm::CallingConv::C); |
10153 | RuntimeCC = llvm::CallingConv::SPIR_FUNC; |
10154 | } |
10155 | |
10156 | namespace clang { |
10157 | namespace CodeGen { |
10158 | void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { |
10159 | DefaultABIInfo SPIRABI(CGM.getTypes()); |
10160 | SPIRABI.computeInfo(FI); |
10161 | } |
10162 | } |
10163 | } |
10164 | |
10165 | unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { |
10166 | return llvm::CallingConv::SPIR_KERNEL; |
10167 | } |
10168 | |
10169 | static bool appendType(SmallStringEnc &Enc, QualType QType, |
10170 | const CodeGen::CodeGenModule &CGM, |
10171 | TypeStringCache &TSC); |
10172 | |
10173 | |
10174 | |
10175 | |
10176 | static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, |
10177 | const RecordDecl *RD, |
10178 | const CodeGen::CodeGenModule &CGM, |
10179 | TypeStringCache &TSC) { |
10180 | for (const auto *Field : RD->fields()) { |
10181 | SmallStringEnc Enc; |
10182 | Enc += "m("; |
10183 | Enc += Field->getName(); |
10184 | Enc += "){"; |
10185 | if (Field->isBitField()) { |
10186 | Enc += "b("; |
10187 | llvm::raw_svector_ostream OS(Enc); |
10188 | OS << Field->getBitWidthValue(CGM.getContext()); |
10189 | Enc += ':'; |
10190 | } |
10191 | if (!appendType(Enc, Field->getType(), CGM, TSC)) |
10192 | return false; |
10193 | if (Field->isBitField()) |
10194 | Enc += ')'; |
10195 | Enc += '}'; |
10196 | FE.emplace_back(!Field->getName().empty(), Enc); |
10197 | } |
10198 | return true; |
10199 | } |
10200 | |
10201 | |
10202 | |
10203 | |
10204 | static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, |
10205 | const CodeGen::CodeGenModule &CGM, |
10206 | TypeStringCache &TSC, const IdentifierInfo *ID) { |
10207 | |
10208 | StringRef TypeString = TSC.lookupStr(ID); |
10209 | if (!TypeString.empty()) { |
10210 | Enc += TypeString; |
10211 | return true; |
10212 | } |
10213 | |
10214 | |
10215 | size_t Start = Enc.size(); |
10216 | Enc += (RT->isUnionType()? 'u' : 's'); |
10217 | Enc += '('; |
10218 | if (ID) |
10219 | Enc += ID->getName(); |
10220 | Enc += "){"; |
10221 | |
10222 | |
10223 | bool IsRecursive = false; |
10224 | const RecordDecl *RD = RT->getDecl()->getDefinition(); |
10225 | if (RD && !RD->field_empty()) { |
10226 | |
10227 | |
10228 | |
10229 | SmallVector<FieldEncoding, 16> FE; |
10230 | std::string StubEnc(Enc.substr(Start).str()); |
10231 | StubEnc += '}'; |
10232 | TSC.addIncomplete(ID, std::move(StubEnc)); |
10233 | if (!extractFieldType(FE, RD, CGM, TSC)) { |
10234 | (void) TSC.removeIncomplete(ID); |
10235 | return false; |
10236 | } |
10237 | IsRecursive = TSC.removeIncomplete(ID); |
10238 | |
10239 | |
10240 | if (RT->isUnionType()) |
10241 | llvm::sort(FE); |
10242 | |
10243 | unsigned E = FE.size(); |
10244 | for (unsigned I = 0; I != E; ++I) { |
10245 | if (I) |
10246 | Enc += ','; |
10247 | Enc += FE[I].str(); |
10248 | } |
10249 | } |
10250 | Enc += '}'; |
10251 | TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); |
10252 | return true; |
10253 | } |
10254 | |
10255 | |
10256 | static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, |
10257 | TypeStringCache &TSC, |
10258 | const IdentifierInfo *ID) { |
10259 | |
10260 | StringRef TypeString = TSC.lookupStr(ID); |
10261 | if (!TypeString.empty()) { |
10262 | Enc += TypeString; |
10263 | return true; |
10264 | } |
10265 | |
10266 | size_t Start = Enc.size(); |
10267 | Enc += "e("; |
10268 | if (ID) |
10269 | Enc += ID->getName(); |
10270 | Enc += "){"; |
10271 | |
10272 | |
10273 | if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { |
10274 | SmallVector<FieldEncoding, 16> FE; |
10275 | for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; |
10276 | ++I) { |
10277 | SmallStringEnc EnumEnc; |
10278 | EnumEnc += "m("; |
10279 | EnumEnc += I->getName(); |
10280 | EnumEnc += "){"; |
10281 | I->getInitVal().toString(EnumEnc); |
10282 | EnumEnc += '}'; |
10283 | FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); |
10284 | } |
10285 | llvm::sort(FE); |
10286 | unsigned E = FE.size(); |
10287 | for (unsigned I = 0; I != E; ++I) { |
10288 | if (I) |
10289 | Enc += ','; |
10290 | Enc += FE[I].str(); |
10291 | } |
10292 | } |
10293 | Enc += '}'; |
10294 | TSC.addIfComplete(ID, Enc.substr(Start), false); |
10295 | return true; |
10296 | } |
10297 | |
10298 | |
10299 | |
10300 | static void appendQualifier(SmallStringEnc &Enc, QualType QT) { |
10301 | |
10302 | static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; |
10303 | int Lookup = 0; |
10304 | if (QT.isConstQualified()) |
10305 | Lookup += 1<<0; |
10306 | if (QT.isRestrictQualified()) |
10307 | Lookup += 1<<1; |
10308 | if (QT.isVolatileQualified()) |
10309 | Lookup += 1<<2; |
10310 | Enc += Table[Lookup]; |
10311 | } |
10312 | |
10313 | |
10314 | static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { |
10315 | const char *EncType; |
10316 | switch (BT->getKind()) { |
10317 | case BuiltinType::Void: |
10318 | EncType = "0"; |
10319 | break; |
10320 | case BuiltinType::Bool: |
10321 | EncType = "b"; |
10322 | break; |
10323 | case BuiltinType::Char_U: |
10324 | EncType = "uc"; |
10325 | break; |
10326 | case BuiltinType::UChar: |
10327 | EncType = "uc"; |
10328 | break; |
10329 | case BuiltinType::SChar: |
10330 | EncType = "sc"; |
10331 | break; |
10332 | case BuiltinType::UShort: |
10333 | EncType = "us"; |
10334 | break; |
10335 | case BuiltinType::Short: |
10336 | EncType = "ss"; |
10337 | break; |
10338 | case BuiltinType::UInt: |
10339 | EncType = "ui"; |
10340 | break; |
10341 | case BuiltinType::Int: |
10342 | EncType = "si"; |
10343 | break; |
10344 | case BuiltinType::ULong: |
10345 | EncType = "ul"; |
10346 | break; |
10347 | case BuiltinType::Long: |
10348 | EncType = "sl"; |
10349 | break; |
10350 | case BuiltinType::ULongLong: |
10351 | EncType = "ull"; |
10352 | break; |
10353 | case BuiltinType::LongLong: |
10354 | EncType = "sll"; |
10355 | break; |
10356 | case BuiltinType::Float: |
10357 | EncType = "ft"; |
10358 | break; |
10359 | case BuiltinType::Double: |
10360 | EncType = "d"; |
10361 | break; |
10362 | case BuiltinType::LongDouble: |
10363 | EncType = "ld"; |
10364 | break; |
10365 | default: |
10366 | return false; |
10367 | } |
10368 | Enc += EncType; |
10369 | return true; |
10370 | } |
10371 | |
10372 | |
10373 | static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, |
10374 | const CodeGen::CodeGenModule &CGM, |
10375 | TypeStringCache &TSC) { |
10376 | Enc += "p("; |
10377 | if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) |
10378 | return false; |
10379 | Enc += ')'; |
10380 | return true; |
10381 | } |
10382 | |
10383 | |
10384 | static bool appendArrayType(SmallStringEnc &Enc, QualType QT, |
10385 | const ArrayType *AT, |
10386 | const CodeGen::CodeGenModule &CGM, |
10387 | TypeStringCache &TSC, StringRef NoSizeEnc) { |
10388 | if (AT->getSizeModifier() != ArrayType::Normal) |
10389 | return false; |
10390 | Enc += "a("; |
10391 | if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) |
10392 | CAT->getSize().toStringUnsigned(Enc); |
10393 | else |
10394 | Enc += NoSizeEnc; |
10395 | Enc += ':'; |
10396 | |
10397 | appendQualifier(Enc, QT); |
10398 | if (!appendType(Enc, AT->getElementType(), CGM, TSC)) |
10399 | return false; |
10400 | Enc += ')'; |
10401 | return true; |
10402 | } |
10403 | |
10404 | |
10405 | |
10406 | static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, |
10407 | const CodeGen::CodeGenModule &CGM, |
10408 | TypeStringCache &TSC) { |
10409 | Enc += "f{"; |
10410 | if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) |
10411 | return false; |
10412 | Enc += "}("; |
10413 | if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { |
10414 | |
10415 | auto I = FPT->param_type_begin(); |
10416 | auto E = FPT->param_type_end(); |
10417 | if (I != E) { |
10418 | do { |
10419 | if (!appendType(Enc, *I, CGM, TSC)) |
10420 | return false; |
10421 | ++I; |
10422 | if (I != E) |
10423 | Enc += ','; |
10424 | } while (I != E); |
10425 | if (FPT->isVariadic()) |
10426 | Enc += ",va"; |
10427 | } else { |
10428 | if (FPT->isVariadic()) |
10429 | Enc += "va"; |
10430 | else |
10431 | Enc += '0'; |
10432 | } |
10433 | } |
10434 | Enc += ')'; |
10435 | return true; |
10436 | } |
10437 | |
10438 | |
10439 | |
10440 | static bool appendType(SmallStringEnc &Enc, QualType QType, |
10441 | const CodeGen::CodeGenModule &CGM, |
10442 | TypeStringCache &TSC) { |
10443 | |
10444 | QualType QT = QType.getCanonicalType(); |
10445 | |
10446 | if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) |
10447 | |
10448 | |
10449 | return appendArrayType(Enc, QT, AT, CGM, TSC, ""); |
10450 | |
10451 | appendQualifier(Enc, QT); |
10452 | |
10453 | if (const BuiltinType *BT = QT->getAs<BuiltinType>()) |
10454 | return appendBuiltinType(Enc, BT); |
10455 | |
10456 | if (const PointerType *PT = QT->getAs<PointerType>()) |
10457 | return appendPointerType(Enc, PT, CGM, TSC); |
10458 | |
10459 | if (const EnumType *ET = QT->getAs<EnumType>()) |
10460 | return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); |
10461 | |
10462 | if (const RecordType *RT = QT->getAsStructureType()) |
10463 | return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); |
10464 | |
10465 | if (const RecordType *RT = QT->getAsUnionType()) |
10466 | return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); |
10467 | |
10468 | if (const FunctionType *FT = QT->getAs<FunctionType>()) |
10469 | return appendFunctionType(Enc, FT, CGM, TSC); |
10470 | |
10471 | return false; |
10472 | } |
10473 | |
10474 | static bool getTypeString(SmallStringEnc &Enc, const Decl *D, |
10475 | const CodeGen::CodeGenModule &CGM, |
10476 | TypeStringCache &TSC) { |
10477 | if (!D) |
10478 | return false; |
10479 | |
10480 | if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { |
10481 | if (FD->getLanguageLinkage() != CLanguageLinkage) |
10482 | return false; |
10483 | return appendType(Enc, FD->getType(), CGM, TSC); |
10484 | } |
10485 | |
10486 | if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { |
10487 | if (VD->getLanguageLinkage() != CLanguageLinkage) |
10488 | return false; |
10489 | QualType QT = VD->getType().getCanonicalType(); |
10490 | if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { |
10491 | |
10492 | |
10493 | |
10494 | return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); |
10495 | } |
10496 | return appendType(Enc, QT, CGM, TSC); |
10497 | } |
10498 | return false; |
10499 | } |
10500 | |
10501 | |
10502 | |
10503 | |
10504 | |
10505 | namespace { |
10506 | class RISCVABIInfo : public DefaultABIInfo { |
10507 | private: |
10508 | |
10509 | unsigned XLen; |
10510 | |
10511 | |
10512 | |
10513 | unsigned FLen; |
10514 | static const int NumArgGPRs = 8; |
10515 | static const int NumArgFPRs = 8; |
10516 | bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, |
10517 | llvm::Type *&Field1Ty, |
10518 | CharUnits &Field1Off, |
10519 | llvm::Type *&Field2Ty, |
10520 | CharUnits &Field2Off) const; |
10521 | |
10522 | public: |
10523 | RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen) |
10524 | : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {} |
10525 | |
10526 | |
10527 | |
10528 | void computeInfo(CGFunctionInfo &FI) const override; |
10529 | |
10530 | ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, |
10531 | int &ArgFPRsLeft) const; |
10532 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
10533 | |
10534 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
10535 | QualType Ty) const override; |
10536 | |
10537 | ABIArgInfo extendType(QualType Ty) const; |
10538 | |
10539 | bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, |
10540 | CharUnits &Field1Off, llvm::Type *&Field2Ty, |
10541 | CharUnits &Field2Off, int &NeededArgGPRs, |
10542 | int &NeededArgFPRs) const; |
10543 | ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty, |
10544 | CharUnits Field1Off, |
10545 | llvm::Type *Field2Ty, |
10546 | CharUnits Field2Off) const; |
10547 | }; |
10548 | } |
10549 | |
10550 | void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { |
10551 | QualType RetTy = FI.getReturnType(); |
10552 | if (!getCXXABI().classifyReturnType(FI)) |
10553 | FI.getReturnInfo() = classifyReturnType(RetTy); |
10554 | |
10555 | |
10556 | |
10557 | |
10558 | |
10559 | |
10560 | bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect; |
10561 | if (!IsRetIndirect && RetTy->isScalarType() && |
10562 | getContext().getTypeSize(RetTy) > (2 * XLen)) { |
10563 | if (RetTy->isComplexType() && FLen) { |
10564 | QualType EltTy = RetTy->castAs<ComplexType>()->getElementType(); |
10565 | IsRetIndirect = getContext().getTypeSize(EltTy) > FLen; |
10566 | } else { |
10567 | |
10568 | IsRetIndirect = true; |
10569 | } |
10570 | } |
10571 | |
10572 | |
10573 | |
10574 | |
10575 | |
10576 | |
10577 | int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; |
10578 | int ArgFPRsLeft = FLen ? NumArgFPRs : 0; |
10579 | int NumFixedArgs = FI.getNumRequiredArgs(); |
10580 | |
10581 | int ArgNum = 0; |
10582 | for (auto &ArgInfo : FI.arguments()) { |
10583 | bool IsFixed = ArgNum < NumFixedArgs; |
10584 | ArgInfo.info = |
10585 | classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); |
10586 | ArgNum++; |
10587 | } |
10588 | } |
10589 | |
10590 | |
10591 | |
10592 | |
10593 | |
10594 | bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, |
10595 | llvm::Type *&Field1Ty, |
10596 | CharUnits &Field1Off, |
10597 | llvm::Type *&Field2Ty, |
10598 | CharUnits &Field2Off) const { |
10599 | bool IsInt = Ty->isIntegralOrEnumerationType(); |
10600 | bool IsFloat = Ty->isRealFloatingType(); |
10601 | |
10602 | if (IsInt || IsFloat) { |
10603 | uint64_t Size = getContext().getTypeSize(Ty); |
10604 | if (IsInt && Size > XLen) |
10605 | return false; |
10606 | |
10607 | |
10608 | |
10609 | if (IsFloat && (Size > FLen || Size < 32)) |
10610 | return false; |
10611 | |
10612 | |
10613 | if (IsInt && Field1Ty && Field1Ty->isIntegerTy()) |
10614 | return false; |
10615 | if (!Field1Ty) { |
10616 | Field1Ty = CGT.ConvertType(Ty); |
10617 | Field1Off = CurOff; |
10618 | return true; |
10619 | } |
10620 | if (!Field2Ty) { |
10621 | Field2Ty = CGT.ConvertType(Ty); |
10622 | Field2Off = CurOff; |
10623 | return true; |
10624 | } |
10625 | return false; |
10626 | } |
10627 | |
10628 | if (auto CTy = Ty->getAs<ComplexType>()) { |
10629 | if (Field1Ty) |
10630 | return false; |
10631 | QualType EltTy = CTy->getElementType(); |
10632 | if (getContext().getTypeSize(EltTy) > FLen) |
10633 | return false; |
10634 | Field1Ty = CGT.ConvertType(EltTy); |
10635 | Field1Off = CurOff; |
10636 | Field2Ty = Field1Ty; |
10637 | Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy); |
10638 | return true; |
10639 | } |
10640 | |
10641 | if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) { |
10642 | uint64_t ArraySize = ATy->getSize().getZExtValue(); |
10643 | QualType EltTy = ATy->getElementType(); |
10644 | CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); |
10645 | for (uint64_t i = 0; i < ArraySize; ++i) { |
10646 | bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty, |
10647 | Field1Off, Field2Ty, Field2Off); |
10648 | if (!Ret) |
10649 | return false; |
10650 | CurOff += EltSize; |
10651 | } |
10652 | return true; |
10653 | } |
10654 | |
10655 | if (const auto *RTy = Ty->getAs<RecordType>()) { |
10656 | |
10657 | |
10658 | if (getRecordArgABI(Ty, CGT.getCXXABI())) |
10659 | return false; |
10660 | if (isEmptyRecord(getContext(), Ty, true)) |
10661 | return true; |
10662 | const RecordDecl *RD = RTy->getDecl(); |
10663 | |
10664 | if (RD->isUnion()) |
10665 | return false; |
10666 | int ZeroWidthBitFieldCount = 0; |
10667 | for (const FieldDecl *FD : RD->fields()) { |
10668 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); |
10669 | uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex()); |
10670 | QualType QTy = FD->getType(); |
10671 | if (FD->isBitField()) { |
10672 | unsigned BitWidth = FD->getBitWidthValue(getContext()); |
10673 | |
10674 | |
10675 | if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen) |
10676 | QTy = getContext().getIntTypeForBitwidth(XLen, false); |
10677 | if (BitWidth == 0) { |
10678 | ZeroWidthBitFieldCount++; |
10679 | continue; |
10680 | } |
10681 | } |
10682 | |
10683 | bool Ret = detectFPCCEligibleStructHelper( |
10684 | QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits), |
10685 | Field1Ty, Field1Off, Field2Ty, Field2Off); |
10686 | if (!Ret) |
10687 | return false; |
10688 | |
10689 | |
10690 | |
10691 | |
10692 | if (Field2Ty && ZeroWidthBitFieldCount > 0) |
10693 | return false; |
10694 | } |
10695 | return Field1Ty != nullptr; |
10696 | } |
10697 | |
10698 | return false; |
10699 | } |
10700 | |
10701 | |
10702 | |
10703 | |
10704 | |
10705 | bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, |
10706 | CharUnits &Field1Off, |
10707 | llvm::Type *&Field2Ty, |
10708 | CharUnits &Field2Off, |
10709 | int &NeededArgGPRs, |
10710 | int &NeededArgFPRs) const { |
10711 | Field1Ty = nullptr; |
10712 | Field2Ty = nullptr; |
10713 | NeededArgGPRs = 0; |
10714 | NeededArgFPRs = 0; |
10715 | bool IsCandidate = detectFPCCEligibleStructHelper( |
10716 | Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off); |
10717 | |
10718 | if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy()) |
10719 | return false; |
10720 | if (!IsCandidate) |
10721 | return false; |
10722 | if (Field1Ty && Field1Ty->isFloatingPointTy()) |
10723 | NeededArgFPRs++; |
10724 | else if (Field1Ty) |
10725 | NeededArgGPRs++; |
10726 | if (Field2Ty && Field2Ty->isFloatingPointTy()) |
10727 | NeededArgFPRs++; |
10728 | else if (Field2Ty) |
10729 | NeededArgGPRs++; |
10730 | return true; |
10731 | } |
10732 | |
10733 | |
10734 | |
10735 | |
10736 | ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct( |
10737 | llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty, |
10738 | CharUnits Field2Off) const { |
10739 | SmallVector<llvm::Type *, 3> CoerceElts; |
10740 | SmallVector<llvm::Type *, 2> UnpaddedCoerceElts; |
10741 | if (!Field1Off.isZero()) |
10742 | CoerceElts.push_back(llvm::ArrayType::get( |
10743 | llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity())); |
10744 | |
10745 | CoerceElts.push_back(Field1Ty); |
10746 | UnpaddedCoerceElts.push_back(Field1Ty); |
10747 | |
10748 | if (!Field2Ty) { |
10749 | return ABIArgInfo::getCoerceAndExpand( |
10750 | llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()), |
10751 | UnpaddedCoerceElts[0]); |
10752 | } |
10753 | |
10754 | CharUnits Field2Align = |
10755 | CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty)); |
10756 | CharUnits Field1End = Field1Off + |
10757 | CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); |
10758 | CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align); |
10759 | |
10760 | CharUnits Padding = CharUnits::Zero(); |
10761 | if (Field2Off > Field2OffNoPadNoPack) |
10762 | Padding = Field2Off - Field2OffNoPadNoPack; |
10763 | else if (Field2Off != Field2Align && Field2Off > Field1End) |
10764 | Padding = Field2Off - Field1End; |
10765 | |
10766 | bool IsPacked = !Field2Off.isMultipleOf(Field2Align); |
10767 | |
10768 | if (!Padding.isZero()) |
10769 | CoerceElts.push_back(llvm::ArrayType::get( |
10770 | llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity())); |
10771 | |
10772 | CoerceElts.push_back(Field2Ty); |
10773 | UnpaddedCoerceElts.push_back(Field2Ty); |
10774 | |
10775 | auto CoerceToType = |
10776 | llvm::StructType::get(getVMContext(), CoerceElts, IsPacked); |
10777 | auto UnpaddedCoerceToType = |
10778 | llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked); |
10779 | |
10780 | return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType); |
10781 | } |
10782 | |
10783 | ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, |
10784 | int &ArgGPRsLeft, |
10785 | int &ArgFPRsLeft) const { |
10786 | assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); |
10787 | Ty = useFirstFieldIfTransparentUnion(Ty); |
10788 | |
10789 | |
10790 | |
10791 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { |
10792 | if (ArgGPRsLeft) |
10793 | ArgGPRsLeft -= 1; |
10794 | return getNaturalAlignIndirect(Ty, RAA == |
10795 | CGCXXABI::RAA_DirectInMemory); |
10796 | } |
10797 | |
10798 | |
10799 | if (isEmptyRecord(getContext(), Ty, true)) |
10800 | return ABIArgInfo::getIgnore(); |
10801 | |
10802 | uint64_t Size = getContext().getTypeSize(Ty); |
10803 | |
10804 | |
10805 | if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() && |
10806 | FLen >= Size && ArgFPRsLeft) { |
10807 | ArgFPRsLeft--; |
10808 | return ABIArgInfo::getDirect(); |
10809 | } |
10810 | |
10811 | |
10812 | |
10813 | if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) { |
10814 | QualType EltTy = Ty->castAs<ComplexType>()->getElementType(); |
10815 | if (getContext().getTypeSize(EltTy) <= FLen) { |
10816 | ArgFPRsLeft -= 2; |
10817 | return ABIArgInfo::getDirect(); |
10818 | } |
10819 | } |
10820 | |
10821 | if (IsFixed && FLen && Ty->isStructureOrClassType()) { |
10822 | llvm::Type *Field1Ty = nullptr; |
10823 | llvm::Type *Field2Ty = nullptr; |
10824 | CharUnits Field1Off = CharUnits::Zero(); |
10825 | CharUnits Field2Off = CharUnits::Zero(); |
10826 | int NeededArgGPRs = 0; |
10827 | int NeededArgFPRs = 0; |
10828 | bool IsCandidate = |
10829 | detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, |
10830 | NeededArgGPRs, NeededArgFPRs); |
10831 | if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft && |
10832 | NeededArgFPRs <= ArgFPRsLeft) { |
10833 | ArgGPRsLeft -= NeededArgGPRs; |
10834 | ArgFPRsLeft -= NeededArgFPRs; |
10835 | return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty, |
10836 | Field2Off); |
10837 | } |
10838 | } |
10839 | |
10840 | uint64_t NeededAlign = getContext().getTypeAlign(Ty); |
10841 | bool MustUseStack = false; |
10842 | |
10843 | |
10844 | |
10845 | int NeededArgGPRs = 1; |
10846 | if (!IsFixed && NeededAlign == 2 * XLen) |
10847 | NeededArgGPRs = 2 + (ArgGPRsLeft % 2); |
10848 | else if (Size > XLen && Size <= 2 * XLen) |
10849 | NeededArgGPRs = 2; |
10850 | |
10851 | if (NeededArgGPRs > ArgGPRsLeft) { |
10852 | MustUseStack = true; |
10853 | NeededArgGPRs = ArgGPRsLeft; |
10854 | } |
10855 | |
10856 | ArgGPRsLeft -= NeededArgGPRs; |
10857 | |
10858 | if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { |
10859 | |
10860 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
10861 | Ty = EnumTy->getDecl()->getIntegerType(); |
10862 | |
10863 | |
10864 | |
10865 | if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) { |
10866 | return extendType(Ty); |
10867 | } |
10868 | |
10869 | if (const auto *EIT = Ty->getAs<ExtIntType>()) { |
10870 | if (EIT->getNumBits() < XLen && !MustUseStack) |
10871 | return extendType(Ty); |
10872 | if (EIT->getNumBits() > 128 || |
10873 | (!getContext().getTargetInfo().hasInt128Type() && |
10874 | EIT->getNumBits() > 64)) |
10875 | return getNaturalAlignIndirect(Ty, false); |
10876 | } |
10877 | |
10878 | return ABIArgInfo::getDirect(); |
10879 | } |
10880 | |
10881 | |
10882 | |
10883 | if (Size <= 2 * XLen) { |
10884 | unsigned Alignment = getContext().getTypeAlign(Ty); |
10885 | |
10886 | |
10887 | |
10888 | if (Size <= XLen) { |
10889 | return ABIArgInfo::getDirect( |
10890 | llvm::IntegerType::get(getVMContext(), XLen)); |
10891 | } else if (Alignment == 2 * XLen) { |
10892 | return ABIArgInfo::getDirect( |
10893 | llvm::IntegerType::get(getVMContext(), 2 * XLen)); |
10894 | } else { |
10895 | return ABIArgInfo::getDirect(llvm::ArrayType::get( |
10896 | llvm::IntegerType::get(getVMContext(), XLen), 2)); |
10897 | } |
10898 | } |
10899 | return getNaturalAlignIndirect(Ty, false); |
10900 | } |
10901 | |
10902 | ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { |
10903 | if (RetTy->isVoidType()) |
10904 | return ABIArgInfo::getIgnore(); |
10905 | |
10906 | int ArgGPRsLeft = 2; |
10907 | int ArgFPRsLeft = FLen ? 2 : 0; |
10908 | |
10909 | |
10910 | |
10911 | return classifyArgumentType(RetTy, true, ArgGPRsLeft, |
10912 | ArgFPRsLeft); |
10913 | } |
10914 | |
10915 | Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
10916 | QualType Ty) const { |
10917 | CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); |
10918 | |
10919 | |
10920 | if (isEmptyRecord(getContext(), Ty, true)) { |
10921 | Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); |
10922 | Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); |
10923 | return Addr; |
10924 | } |
10925 | |
10926 | auto TInfo = getContext().getTypeInfoInChars(Ty); |
10927 | |
10928 | |
10929 | bool IsIndirect = TInfo.Width > 2 * SlotSize; |
10930 | |
10931 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, |
10932 | SlotSize, true); |
10933 | } |
10934 | |
10935 | ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { |
10936 | int TySize = getContext().getTypeSize(Ty); |
10937 | |
10938 | if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) |
10939 | return ABIArgInfo::getSignExtend(Ty); |
10940 | return ABIArgInfo::getExtend(Ty); |
10941 | } |
10942 | |
10943 | namespace { |
10944 | class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { |
10945 | public: |
10946 | RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, |
10947 | unsigned FLen) |
10948 | : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {} |
10949 | |
10950 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
10951 | CodeGen::CodeGenModule &CGM) const override { |
10952 | const auto *FD = dyn_cast_or_null<FunctionDecl>(D); |
10953 | if (!FD) return; |
10954 | |
10955 | const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); |
10956 | if (!Attr) |
10957 | return; |
10958 | |
10959 | const char *Kind; |
10960 | switch (Attr->getInterrupt()) { |
10961 | case RISCVInterruptAttr::user: Kind = "user"; break; |
10962 | case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; |
10963 | case RISCVInterruptAttr::machine: Kind = "machine"; break; |
10964 | } |
10965 | |
10966 | auto *Fn = cast<llvm::Function>(GV); |
10967 | |
10968 | Fn->addFnAttr("interrupt", Kind); |
10969 | } |
10970 | }; |
10971 | } |
10972 | |
10973 | |
10974 | |
10975 | |
10976 | namespace { |
10977 | class VEABIInfo : public DefaultABIInfo { |
10978 | public: |
10979 | VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
10980 | |
10981 | private: |
10982 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
10983 | ABIArgInfo classifyArgumentType(QualType RetTy) const; |
10984 | void computeInfo(CGFunctionInfo &FI) const override; |
10985 | }; |
10986 | } |
10987 | |
10988 | ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const { |
10989 | if (Ty->isAnyComplexType()) |
10990 | return ABIArgInfo::getDirect(); |
10991 | uint64_t Size = getContext().getTypeSize(Ty); |
10992 | if (Size < 64 && Ty->isIntegerType()) |
10993 | return ABIArgInfo::getExtend(Ty); |
10994 | return DefaultABIInfo::classifyReturnType(Ty); |
10995 | } |
10996 | |
10997 | ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const { |
10998 | if (Ty->isAnyComplexType()) |
10999 | return ABIArgInfo::getDirect(); |
11000 | uint64_t Size = getContext().getTypeSize(Ty); |
11001 | if (Size < 64 && Ty->isIntegerType()) |
11002 | return ABIArgInfo::getExtend(Ty); |
11003 | return DefaultABIInfo::classifyArgumentType(Ty); |
11004 | } |
11005 | |
11006 | void VEABIInfo::computeInfo(CGFunctionInfo &FI) const { |
11007 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
11008 | for (auto &Arg : FI.arguments()) |
11009 | Arg.info = classifyArgumentType(Arg.type); |
11010 | } |
11011 | |
11012 | namespace { |
11013 | class VETargetCodeGenInfo : public TargetCodeGenInfo { |
11014 | public: |
11015 | VETargetCodeGenInfo(CodeGenTypes &CGT) |
11016 | : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {} |
11017 | |
11018 | |
11019 | bool isNoProtoCallVariadic(const CallArgList &args, |
11020 | const FunctionNoProtoType *fnType) const override { |
11021 | return true; |
11022 | } |
11023 | }; |
11024 | } |
11025 | |
11026 | |
11027 | |
11028 | |
11029 | |
11030 | bool CodeGenModule::supportsCOMDAT() const { |
11031 | return getTriple().supportsCOMDAT(); |
11032 | } |
11033 | |
11034 | const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { |
11035 | if (TheTargetCodeGenInfo) |
11036 | return *TheTargetCodeGenInfo; |
11037 | |
11038 | |
11039 | auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { |
11040 | this->TheTargetCodeGenInfo.reset(P); |
11041 | return *P; |
11042 | }; |
11043 | |
11044 | const llvm::Triple &Triple = getTarget().getTriple(); |
11045 | switch (Triple.getArch()) { |
11046 | default: |
11047 | return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); |
11048 | |
11049 | case llvm::Triple::le32: |
11050 | return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); |
11051 | case llvm::Triple::m68k: |
11052 | return SetCGInfo(new M68kTargetCodeGenInfo(Types)); |
11053 | case llvm::Triple::mips: |
11054 | case llvm::Triple::mipsel: |
11055 | if (Triple.getOS() == llvm::Triple::NaCl) |
11056 | return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); |
11057 | return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); |
11058 | |
11059 | case llvm::Triple::mips64: |
11060 | case llvm::Triple::mips64el: |
11061 | return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); |
11062 | |
11063 | case llvm::Triple::avr: |
11064 | return SetCGInfo(new AVRTargetCodeGenInfo(Types)); |
11065 | |
11066 | case llvm::Triple::aarch64: |
11067 | case llvm::Triple::aarch64_32: |
11068 | case llvm::Triple::aarch64_be: { |
11069 | AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; |
11070 | if (getTarget().getABI() == "darwinpcs") |
11071 | Kind = AArch64ABIInfo::DarwinPCS; |
11072 | else if (Triple.isOSWindows()) |
11073 | return SetCGInfo( |
11074 | new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64)); |
11075 | |
11076 | return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); |
11077 | } |
11078 | |
11079 | case llvm::Triple::wasm32: |
11080 | case llvm::Triple::wasm64: { |
11081 | WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP; |
11082 | if (getTarget().getABI() == "experimental-mv") |
11083 | Kind = WebAssemblyABIInfo::ExperimentalMV; |
11084 | return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind)); |
11085 | } |
11086 | |
11087 | case llvm::Triple::arm: |
11088 | case llvm::Triple::armeb: |
11089 | case llvm::Triple::thumb: |
11090 | case llvm::Triple::thumbeb: { |
11091 | if (Triple.getOS() == llvm::Triple::Win32) { |
11092 | return SetCGInfo( |
11093 | new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); |
11094 | } |
11095 | |
11096 | ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; |
11097 | StringRef ABIStr = getTarget().getABI(); |
11098 | if (ABIStr == "apcs-gnu") |
11099 | Kind = ARMABIInfo::APCS; |
11100 | else if (ABIStr == "aapcs16") |
11101 | Kind = ARMABIInfo::AAPCS16_VFP; |
11102 | else if (CodeGenOpts.FloatABI == "hard" || |
11103 | (CodeGenOpts.FloatABI != "soft" && |
11104 | (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || |
11105 | Triple.getEnvironment() == llvm::Triple::MuslEABIHF || |
11106 | Triple.getEnvironment() == llvm::Triple::EABIHF))) |
11107 | Kind = ARMABIInfo::AAPCS_VFP; |
11108 | |
11109 | return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); |
11110 | } |
11111 | |
11112 | case llvm::Triple::ppc: { |
11113 | if (Triple.isOSAIX()) |
11114 | return SetCGInfo(new AIXTargetCodeGenInfo(Types, false)); |
11115 | |
11116 | bool IsSoftFloat = |
11117 | CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe"); |
11118 | bool RetSmallStructInRegABI = |
11119 | PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); |
11120 | return SetCGInfo( |
11121 | new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI)); |
11122 | } |
11123 | case llvm::Triple::ppcle: { |
11124 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; |
11125 | bool RetSmallStructInRegABI = |
11126 | PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); |
11127 | return SetCGInfo( |
11128 | new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI)); |
11129 | } |
11130 | case llvm::Triple::ppc64: |
11131 | if (Triple.isOSAIX()) |
11132 | return SetCGInfo(new AIXTargetCodeGenInfo(Types, true)); |
11133 | |
11134 | if (Triple.isOSBinFormatELF()) { |
11135 | PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; |
11136 | if (getTarget().getABI() == "elfv2") |
11137 | Kind = PPC64_SVR4_ABIInfo::ELFv2; |
11138 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; |
11139 | |
11140 | return SetCGInfo( |
11141 | new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat)); |
11142 | } |
11143 | return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); |
11144 | case llvm::Triple::ppc64le: { |
11145 | assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); |
11146 | PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; |
11147 | if (getTarget().getABI() == "elfv1") |
11148 | Kind = PPC64_SVR4_ABIInfo::ELFv1; |
11149 | bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; |
11150 | |
11151 | return SetCGInfo( |
11152 | new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat)); |
11153 | } |
11154 | |
11155 | case llvm::Triple::nvptx: |
11156 | case llvm::Triple::nvptx64: |
11157 | return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); |
11158 | |
11159 | case llvm::Triple::msp430: |
11160 | return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); |
11161 | |
11162 | case llvm::Triple::riscv32: |
11163 | case llvm::Triple::riscv64: { |
11164 | StringRef ABIStr = getTarget().getABI(); |
11165 | unsigned XLen = getTarget().getPointerWidth(0); |
11166 | unsigned ABIFLen = 0; |
11167 | if (ABIStr.endswith("f")) |
11168 | ABIFLen = 32; |
11169 | else if (ABIStr.endswith("d")) |
11170 | ABIFLen = 64; |
11171 | return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen)); |
11172 | } |
11173 | |
11174 | case llvm::Triple::systemz: { |
11175 | bool SoftFloat = CodeGenOpts.FloatABI == "soft"; |
11176 | bool HasVector = !SoftFloat && getTarget().getABI() == "vector"; |
11177 | return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat)); |
11178 | } |
11179 | |
11180 | case llvm::Triple::tce: |
11181 | case llvm::Triple::tcele: |
11182 | return SetCGInfo(new TCETargetCodeGenInfo(Types)); |
11183 | |
11184 | case llvm::Triple::x86: { |
11185 | bool IsDarwinVectorABI = Triple.isOSDarwin(); |
11186 | bool RetSmallStructInRegABI = |
11187 | X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); |
11188 | bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); |
11189 | |
11190 | if (Triple.getOS() == llvm::Triple::Win32) { |
11191 | return SetCGInfo(new WinX86_32TargetCodeGenInfo( |
11192 | Types, IsDarwinVectorABI, RetSmallStructInRegABI, |
11193 | IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); |
11194 | } else { |
11195 | return SetCGInfo(new X86_32TargetCodeGenInfo( |
11196 | Types, IsDarwinVectorABI, RetSmallStructInRegABI, |
11197 | IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, |
11198 | CodeGenOpts.FloatABI == "soft")); |
11199 | } |
11200 | } |
11201 | |
11202 | case llvm::Triple::x86_64: { |
11203 | StringRef ABI = getTarget().getABI(); |
11204 | X86AVXABILevel AVXLevel = |
11205 | (ABI == "avx512" |
11206 | ? X86AVXABILevel::AVX512 |
11207 | : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); |
11208 | |
11209 | switch (Triple.getOS()) { |
11210 | case llvm::Triple::Win32: |
11211 | return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); |
11212 | default: |
11213 | return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); |
11214 | } |
11215 | } |
11216 | case llvm::Triple::hexagon: |
11217 | return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); |
11218 | case llvm::Triple::lanai: |
11219 | return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); |
11220 | case llvm::Triple::r600: |
11221 | return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); |
11222 | case llvm::Triple::amdgcn: |
11223 | return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); |
11224 | case llvm::Triple::sparc: |
11225 | return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); |
11226 | case llvm::Triple::sparcv9: |
11227 | return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); |
11228 | case llvm::Triple::xcore: |
11229 | return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); |
11230 | case llvm::Triple::arc: |
11231 | return SetCGInfo(new ARCTargetCodeGenInfo(Types)); |
11232 | case llvm::Triple::spir: |
11233 | case llvm::Triple::spir64: |
11234 | return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); |
11235 | case llvm::Triple::ve: |
11236 | return SetCGInfo(new VETargetCodeGenInfo(Types)); |
11237 | } |
11238 | } |
11239 | |
11240 | |
11241 | |
11242 | |
11243 | |
11244 | |
11245 | llvm::Function * |
11246 | TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF, |
11247 | llvm::Function *Invoke, |
11248 | llvm::Value *BlockLiteral) const { |
11249 | auto *InvokeFT = Invoke->getFunctionType(); |
11250 | llvm::SmallVector<llvm::Type *, 2> ArgTys; |
11251 | for (auto &P : InvokeFT->params()) |
11252 | ArgTys.push_back(P); |
11253 | auto &C = CGF.getLLVMContext(); |
11254 | std::string Name = Invoke->getName().str() + "_kernel"; |
11255 | auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); |
11256 | auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, |
11257 | &CGF.CGM.getModule()); |
11258 | auto IP = CGF.Builder.saveIP(); |
11259 | auto *BB = llvm::BasicBlock::Create(C, "entry", F); |
11260 | auto &Builder = CGF.Builder; |
11261 | Builder.SetInsertPoint(BB); |
11262 | llvm::SmallVector<llvm::Value *, 2> Args; |
11263 | for (auto &A : F->args()) |
11264 | Args.push_back(&A); |
11265 | llvm::CallInst *call = Builder.CreateCall(Invoke, Args); |
11266 | call->setCallingConv(Invoke->getCallingConv()); |
11267 | Builder.CreateRetVoid(); |
11268 | Builder.restoreIP(IP); |
11269 | return F; |
11270 | } |
11271 | |
11272 | |
11273 | |
11274 | |
11275 | |
11276 | |
11277 | |
11278 | |
11279 | |
11280 | llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel( |
11281 | CodeGenFunction &CGF, llvm::Function *Invoke, |
11282 | llvm::Value *BlockLiteral) const { |
11283 | auto &Builder = CGF.Builder; |
11284 | auto &C = CGF.getLLVMContext(); |
11285 | |
11286 | auto *BlockTy = BlockLiteral->getType()->getPointerElementType(); |
11287 | auto *InvokeFT = Invoke->getFunctionType(); |
11288 | llvm::SmallVector<llvm::Type *, 2> ArgTys; |
11289 | llvm::SmallVector<llvm::Metadata *, 8> AddressQuals; |
11290 | llvm::SmallVector<llvm::Metadata *, 8> AccessQuals; |
11291 | llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames; |
11292 | llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames; |
11293 | llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals; |
11294 | llvm::SmallVector<llvm::Metadata *, 8> ArgNames; |
11295 | |
11296 | ArgTys.push_back(BlockTy); |
11297 | ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); |
11298 | AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0))); |
11299 | ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); |
11300 | ArgTypeQuals.push_back(llvm::MDString::get(C, "")); |
11301 | AccessQuals.push_back(llvm::MDString::get(C, "none")); |
11302 | ArgNames.push_back(llvm::MDString::get(C, "block_literal")); |
11303 | for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) { |
11304 | ArgTys.push_back(InvokeFT->getParamType(I)); |
11305 | ArgTypeNames.push_back(llvm::MDString::get(C, "void*")); |
11306 | AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3))); |
11307 | AccessQuals.push_back(llvm::MDString::get(C, "none")); |
11308 | ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*")); |
11309 | ArgTypeQuals.push_back(llvm::MDString::get(C, "")); |
11310 | ArgNames.push_back( |
11311 | llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str())); |
11312 | } |
11313 | std::string Name = Invoke->getName().str() + "_kernel"; |
11314 | auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); |
11315 | auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, |
11316 | &CGF.CGM.getModule()); |
11317 | F->addFnAttr("enqueued-block"); |
11318 | auto IP = CGF.Builder.saveIP(); |
11319 | auto *BB = llvm::BasicBlock::Create(C, "entry", F); |
11320 | Builder.SetInsertPoint(BB); |
11321 | const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy); |
11322 | auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr); |
11323 | BlockPtr->setAlignment(BlockAlign); |
11324 | Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign); |
11325 | auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0)); |
11326 | llvm::SmallVector<llvm::Value *, 2> Args; |
11327 | Args.push_back(Cast); |
11328 | for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I) |
11329 | Args.push_back(I); |
11330 | llvm::CallInst *call = Builder.CreateCall(Invoke, Args); |
11331 | call->setCallingConv(Invoke->getCallingConv()); |
11332 | Builder.CreateRetVoid(); |
11333 | Builder.restoreIP(IP); |
11334 | |
11335 | F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals)); |
11336 | F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals)); |
11337 | F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames)); |
11338 | F->setMetadata("kernel_arg_base_type", |
11339 | llvm::MDNode::get(C, ArgBaseTypeNames)); |
11340 | F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals)); |
11341 | if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata) |
11342 | F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames)); |
11343 | |
11344 | return F; |
11345 | } |