clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name TargetInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/TargetInfo.cpp
1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | |
14 | #include "TargetInfo.h" |
15 | #include "ABIInfo.h" |
16 | #include "CGBlocks.h" |
17 | #include "CGCXXABI.h" |
18 | #include "CGValue.h" |
19 | #include "CodeGenFunction.h" |
20 | #include "clang/AST/Attr.h" |
21 | #include "clang/AST/RecordLayout.h" |
22 | #include "clang/Basic/CodeGenOptions.h" |
23 | #include "clang/Basic/DiagnosticFrontend.h" |
24 | #include "clang/Basic/Builtins.h" |
25 | #include "clang/CodeGen/CGFunctionInfo.h" |
26 | #include "clang/CodeGen/SwiftCallingConv.h" |
27 | #include "llvm/ADT/SmallBitVector.h" |
28 | #include "llvm/ADT/StringExtras.h" |
29 | #include "llvm/ADT/StringSwitch.h" |
30 | #include "llvm/ADT/Triple.h" |
31 | #include "llvm/ADT/Twine.h" |
32 | #include "llvm/IR/DataLayout.h" |
33 | #include "llvm/IR/IntrinsicsNVPTX.h" |
34 | #include "llvm/IR/IntrinsicsS390.h" |
35 | #include "llvm/IR/Type.h" |
36 | #include "llvm/Support/raw_ostream.h" |
37 | #include <algorithm> // std::sort |
38 | |
39 | using namespace clang; |
40 | using namespace CodeGen; |
41 | |
42 | |
43 | |
44 | |
45 | |
46 | |
47 | |
48 | |
49 | |
50 | |
51 | |
52 | |
53 | |
54 | |
55 | |
56 | static ABIArgInfo coerceToIntArray(QualType Ty, |
57 | ASTContext &Context, |
58 | llvm::LLVMContext &LLVMContext) { |
59 | |
60 | const uint64_t Size = Context.getTypeSize(Ty); |
61 | const uint64_t Alignment = Context.getTypeAlign(Ty); |
62 | llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); |
63 | const uint64_t NumElements = (Size + Alignment - 1) / Alignment; |
64 | return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); |
65 | } |
66 | |
67 | static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, |
68 | llvm::Value *Array, |
69 | llvm::Value *Value, |
70 | unsigned FirstIndex, |
71 | unsigned LastIndex) { |
72 | |
73 | for (unsigned I = FirstIndex; I <= LastIndex; ++I) { |
74 | llvm::Value *Cell = |
75 | Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); |
76 | Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); |
77 | } |
78 | } |
79 | |
80 | static bool isAggregateTypeForABI(QualType T) { |
81 | return !CodeGenFunction::hasScalarEvaluationKind(T) || |
82 | T->isMemberFunctionPointerType(); |
83 | } |
84 | |
85 | ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal, |
86 | bool Realign, |
87 | llvm::Type *Padding) const { |
88 | return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal, |
89 | Realign, Padding); |
90 | } |
91 | |
92 | ABIArgInfo |
93 | ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { |
94 | return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), |
95 | false, Realign); |
96 | } |
97 | |
98 | Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
99 | QualType Ty) const { |
100 | return Address::invalid(); |
101 | } |
102 | |
103 | bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { |
104 | if (Ty->isPromotableIntegerType()) |
105 | return true; |
106 | |
107 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
108 | if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy)) |
109 | return true; |
110 | |
111 | return false; |
112 | } |
113 | |
114 | ABIInfo::~ABIInfo() {} |
115 | |
116 | |
117 | |
118 | |
119 | |
120 | |
121 | |
122 | |
123 | |
124 | |
125 | |
126 | |
127 | |
128 | |
129 | static bool occupiesMoreThan(CodeGenTypes &cgt, |
130 | ArrayRef<llvm::Type*> scalarTypes, |
131 | unsigned maxAllRegisters) { |
132 | unsigned intCount = 0, fpCount = 0; |
133 | for (llvm::Type *type : scalarTypes) { |
134 | if (type->isPointerTy()) { |
135 | intCount++; |
136 | } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { |
137 | auto ptrWidth = cgt.getTarget().getPointerWidth(0); |
138 | intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; |
139 | } else { |
140 | assert(type->isVectorTy() || type->isFloatingPointTy()); |
141 | fpCount++; |
142 | } |
143 | } |
144 | |
145 | return (intCount + fpCount > maxAllRegisters); |
146 | } |
147 | |
148 | bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, |
149 | llvm::Type *eltTy, |
150 | unsigned numElts) const { |
151 | |
152 | |
153 | return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); |
154 | } |
155 | |
156 | static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, |
157 | CGCXXABI &CXXABI) { |
158 | const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); |
159 | if (!RD) { |
160 | if (!RT->getDecl()->canPassInRegisters()) |
161 | return CGCXXABI::RAA_Indirect; |
162 | return CGCXXABI::RAA_Default; |
163 | } |
164 | return CXXABI.getRecordArgABI(RD); |
165 | } |
166 | |
167 | static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, |
168 | CGCXXABI &CXXABI) { |
169 | const RecordType *RT = T->getAs<RecordType>(); |
170 | if (!RT) |
171 | return CGCXXABI::RAA_Default; |
172 | return getRecordArgABI(RT, CXXABI); |
173 | } |
174 | |
175 | static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, |
176 | const ABIInfo &Info) { |
177 | QualType Ty = FI.getReturnType(); |
178 | |
179 | if (const auto *RT = Ty->getAs<RecordType>()) |
180 | if (!isa<CXXRecordDecl>(RT->getDecl()) && |
181 | !RT->getDecl()->canPassInRegisters()) { |
182 | FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty); |
183 | return true; |
184 | } |
185 | |
186 | return CXXABI.classifyReturnType(FI); |
187 | } |
188 | |
189 | |
190 | |
191 | static QualType useFirstFieldIfTransparentUnion(QualType Ty) { |
192 | if (const RecordType *UT = Ty->getAsUnionType()) { |
193 | const RecordDecl *UD = UT->getDecl(); |
194 | if (UD->hasAttr<TransparentUnionAttr>()) { |
195 | assert(!UD->field_empty() && "sema created an empty transparent union"); |
196 | return UD->field_begin()->getType(); |
197 | } |
198 | } |
199 | return Ty; |
200 | } |
201 | |
202 | CGCXXABI &ABIInfo::getCXXABI() const { |
203 | return CGT.getCXXABI(); |
204 | } |
205 | |
206 | ASTContext &ABIInfo::getContext() const { |
207 | return CGT.getContext(); |
208 | } |
209 | |
210 | llvm::LLVMContext &ABIInfo::getVMContext() const { |
211 | return CGT.getLLVMContext(); |
212 | } |
213 | |
214 | const llvm::DataLayout &ABIInfo::getDataLayout() const { |
215 | return CGT.getDataLayout(); |
216 | } |
217 | |
218 | const TargetInfo &ABIInfo::getTarget() const { |
219 | return CGT.getTarget(); |
220 | } |
221 | |
222 | const CodeGenOptions &ABIInfo::getCodeGenOpts() const { |
223 | return CGT.getCodeGenOpts(); |
224 | } |
225 | |
226 | bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } |
227 | |
228 | bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
229 | return false; |
230 | } |
231 | |
232 | bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, |
233 | uint64_t Members) const { |
234 | return false; |
235 | } |
236 | |
237 | LLVM_DUMP_METHOD void ABIArgInfo::dump() const { |
238 | raw_ostream &OS = llvm::errs(); |
239 | OS << "(ABIArgInfo Kind="; |
240 | switch (TheKind) { |
241 | case Direct: |
242 | OS << "Direct Type="; |
243 | if (llvm::Type *Ty = getCoerceToType()) |
244 | Ty->print(OS); |
245 | else |
246 | OS << "null"; |
247 | break; |
248 | case Extend: |
249 | OS << "Extend"; |
250 | break; |
251 | case Ignore: |
252 | OS << "Ignore"; |
253 | break; |
254 | case InAlloca: |
255 | OS << "InAlloca Offset=" << getInAllocaFieldIndex(); |
256 | break; |
257 | case Indirect: |
258 | OS << "Indirect Align=" << getIndirectAlign().getQuantity() |
259 | << " ByVal=" << getIndirectByVal() |
260 | << " Realign=" << getIndirectRealign(); |
261 | break; |
262 | case IndirectAliased: |
263 | OS << "Indirect Align=" << getIndirectAlign().getQuantity() |
264 | << " AadrSpace=" << getIndirectAddrSpace() |
265 | << " Realign=" << getIndirectRealign(); |
266 | break; |
267 | case Expand: |
268 | OS << "Expand"; |
269 | break; |
270 | case CoerceAndExpand: |
271 | OS << "CoerceAndExpand Type="; |
272 | getCoerceAndExpandType()->print(OS); |
273 | break; |
274 | } |
275 | OS << ")\n"; |
276 | } |
277 | |
278 | |
279 | static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, |
280 | llvm::Value *Ptr, |
281 | CharUnits Align) { |
282 | llvm::Value *PtrAsInt = Ptr; |
283 | |
284 | PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); |
285 | PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, |
286 | llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); |
287 | PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, |
288 | llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); |
289 | PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, |
290 | Ptr->getType(), |
291 | Ptr->getName() + ".aligned"); |
292 | return PtrAsInt; |
293 | } |
294 | |
295 | |
296 | |
297 | |
298 | |
299 | |
300 | |
301 | |
302 | |
303 | |
304 | |
305 | |
306 | |
307 | |
308 | |
309 | static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, |
310 | Address VAListAddr, |
311 | llvm::Type *DirectTy, |
312 | CharUnits DirectSize, |
313 | CharUnits DirectAlign, |
314 | CharUnits SlotSize, |
315 | bool AllowHigherAlign) { |
316 | |
317 | |
318 | if (VAListAddr.getElementType() != CGF.Int8PtrTy) |
319 | VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); |
320 | |
321 | llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); |
322 | |
323 | |
324 | Address Addr = Address::invalid(); |
325 | if (AllowHigherAlign && DirectAlign > SlotSize) { |
326 | Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), |
327 | DirectAlign); |
328 | } else { |
329 | Addr = Address(Ptr, SlotSize); |
330 | } |
331 | |
332 | |
333 | CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); |
334 | Address NextPtr = |
335 | CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next"); |
336 | CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr); |
337 | |
338 | |
339 | |
340 | if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && |
341 | !DirectTy->isStructTy()) { |
342 | Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); |
343 | } |
344 | |
345 | Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); |
346 | return Addr; |
347 | } |
348 | |
349 | |
350 | |
351 | |
352 | |
353 | |
354 | |
355 | |
356 | |
357 | |
358 | |
359 | |
360 | |
361 | |
362 | static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, |
363 | QualType ValueTy, bool IsIndirect, |
364 | TypeInfoChars ValueInfo, |
365 | CharUnits SlotSizeAndAlign, |
366 | bool AllowHigherAlign) { |
367 | |
368 | CharUnits DirectSize, DirectAlign; |
369 | if (IsIndirect) { |
370 | DirectSize = CGF.getPointerSize(); |
371 | DirectAlign = CGF.getPointerAlign(); |
372 | } else { |
373 | DirectSize = ValueInfo.Width; |
374 | DirectAlign = ValueInfo.Align; |
375 | } |
376 | |
377 | |
378 | llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); |
379 | if (IsIndirect) |
380 | DirectTy = DirectTy->getPointerTo(0); |
381 | |
382 | Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, |
383 | DirectSize, DirectAlign, |
384 | SlotSizeAndAlign, |
385 | AllowHigherAlign); |
386 | |
387 | if (IsIndirect) { |
388 | Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align); |
389 | } |
390 | |
391 | return Addr; |
392 | |
393 | } |
394 | |
395 | static Address emitMergePHI(CodeGenFunction &CGF, |
396 | Address Addr1, llvm::BasicBlock *Block1, |
397 | Address Addr2, llvm::BasicBlock *Block2, |
398 | const llvm::Twine &Name = "") { |
399 | assert(Addr1.getType() == Addr2.getType()); |
400 | llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); |
401 | PHI->addIncoming(Addr1.getPointer(), Block1); |
402 | PHI->addIncoming(Addr2.getPointer(), Block2); |
403 | CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); |
404 | return Address(PHI, Align); |
405 | } |
406 | |
407 | TargetCodeGenInfo::~TargetCodeGenInfo() = default; |
408 | |
409 | |
410 | |
411 | unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { |
412 | |
413 | |
414 | |
415 | |
416 | |
417 | |
418 | return 32; |
419 | } |
420 | |
421 | bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, |
422 | const FunctionNoProtoType *fnType) const { |
423 | |
424 | |
425 | |
426 | |
427 | return false; |
428 | } |
429 | |
430 | void |
431 | TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, |
432 | llvm::SmallString<24> &Opt) const { |
433 | |
434 | |
435 | |
436 | Opt = "-l"; |
437 | Opt += Lib; |
438 | } |
439 | |
440 | unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { |
441 | |
442 | |
443 | |
444 | |
445 | |
446 | |
447 | |
448 | |
449 | |
450 | |
451 | return llvm::CallingConv::SPIR_KERNEL; |
452 | } |
453 | |
454 | llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, |
455 | llvm::PointerType *T, QualType QT) const { |
456 | return llvm::ConstantPointerNull::get(T); |
457 | } |
458 | |
459 | LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, |
460 | const VarDecl *D) const { |
461 | assert(!CGM.getLangOpts().OpenCL && |
462 | !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && |
463 | "Address space agnostic languages only"); |
464 | return D ? D->getType().getAddressSpace() : LangAS::Default; |
465 | } |
466 | |
467 | llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( |
468 | CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr, |
469 | LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const { |
470 | |
471 | |
472 | if (auto *C = dyn_cast<llvm::Constant>(Src)) |
473 | return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); |
474 | |
475 | return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( |
476 | Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : ""); |
477 | } |
478 | |
479 | llvm::Constant * |
480 | TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, |
481 | LangAS SrcAddr, LangAS DestAddr, |
482 | llvm::Type *DestTy) const { |
483 | |
484 | |
485 | return llvm::ConstantExpr::getPointerCast(Src, DestTy); |
486 | } |
487 | |
488 | llvm::SyncScope::ID |
489 | TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, |
490 | SyncScope Scope, |
491 | llvm::AtomicOrdering Ordering, |
492 | llvm::LLVMContext &Ctx) const { |
493 | return Ctx.getOrInsertSyncScopeID(""); |
494 | } |
495 | |
496 | static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); |
497 | |
498 | |
499 | |
500 | static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, |
501 | bool AllowArrays) { |
502 | if (FD->isUnnamedBitfield()) |
503 | return true; |
504 | |
505 | QualType FT = FD->getType(); |
506 | |
507 | |
508 | |
509 | bool WasArray = false; |
510 | if (AllowArrays) |
511 | while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { |
512 | if (AT->getSize() == 0) |
513 | return true; |
514 | FT = AT->getElementType(); |
515 | |
516 | |
517 | WasArray = true; |
518 | } |
519 | |
520 | const RecordType *RT = FT->getAs<RecordType>(); |
521 | if (!RT) |
522 | return false; |
523 | |
524 | |
525 | |
526 | |
527 | |
528 | |
529 | |
530 | |
531 | |
532 | |
533 | |
534 | if (isa<CXXRecordDecl>(RT->getDecl()) && |
535 | (WasArray || !FD->hasAttr<NoUniqueAddressAttr>())) |
536 | return false; |
537 | |
538 | return isEmptyRecord(Context, FT, AllowArrays); |
539 | } |
540 | |
541 | |
542 | |
543 | |
544 | static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { |
545 | const RecordType *RT = T->getAs<RecordType>(); |
546 | if (!RT) |
547 | return false; |
548 | const RecordDecl *RD = RT->getDecl(); |
549 | if (RD->hasFlexibleArrayMember()) |
550 | return false; |
551 | |
552 | |
553 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) |
554 | for (const auto &I : CXXRD->bases()) |
555 | if (!isEmptyRecord(Context, I.getType(), true)) |
556 | return false; |
557 | |
558 | for (const auto *I : RD->fields()) |
559 | if (!isEmptyField(Context, I, AllowArrays)) |
560 | return false; |
561 | return true; |
562 | } |
563 | |
564 | |
565 | |
566 | |
567 | |
568 | |
569 | |
570 | |
571 | |
572 | static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { |
573 | const RecordType *RT = T->getAs<RecordType>(); |
574 | if (!RT) |
575 | return nullptr; |
576 | |
577 | const RecordDecl *RD = RT->getDecl(); |
578 | if (RD->hasFlexibleArrayMember()) |
579 | return nullptr; |
580 | |
581 | const Type *Found = nullptr; |
582 | |
583 | |
584 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
585 | for (const auto &I : CXXRD->bases()) { |
586 | |
587 | if (isEmptyRecord(Context, I.getType(), true)) |
588 | continue; |
589 | |
590 | |
591 | if (Found) |
592 | return nullptr; |
593 | |
594 | |
595 | |
596 | Found = isSingleElementStruct(I.getType(), Context); |
597 | if (!Found) |
598 | return nullptr; |
599 | } |
600 | } |
601 | |
602 | |
603 | for (const auto *FD : RD->fields()) { |
604 | QualType FT = FD->getType(); |
605 | |
606 | |
607 | if (isEmptyField(Context, FD, true)) |
608 | continue; |
609 | |
610 | |
611 | |
612 | if (Found) |
613 | return nullptr; |
614 | |
615 | |
616 | while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { |
617 | if (AT->getSize().getZExtValue() != 1) |
618 | break; |
619 | FT = AT->getElementType(); |
620 | } |
621 | |
622 | if (!isAggregateTypeForABI(FT)) { |
623 | Found = FT.getTypePtr(); |
624 | } else { |
625 | Found = isSingleElementStruct(FT, Context); |
626 | if (!Found) |
627 | return nullptr; |
628 | } |
629 | } |
630 | |
631 | |
632 | |
633 | if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) |
634 | return nullptr; |
635 | |
636 | return Found; |
637 | } |
638 | |
639 | namespace { |
640 | Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, |
641 | const ABIArgInfo &AI) { |
642 | |
643 | |
644 | |
645 | |
646 | |
647 | |
648 | |
649 | |
650 | |
651 | llvm::Value *Val; |
652 | |
653 | if (AI.isIndirect()) { |
654 | assert(!AI.getPaddingType() && |
655 | "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); |
656 | assert( |
657 | !AI.getIndirectRealign() && |
658 | "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); |
659 | |
660 | auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); |
661 | CharUnits TyAlignForABI = TyInfo.Align; |
662 | |
663 | llvm::Type *BaseTy = |
664 | llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); |
665 | llvm::Value *Addr = |
666 | CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); |
667 | return Address(Addr, TyAlignForABI); |
668 | } else { |
669 | assert((AI.isDirect() || AI.isExtend()) && |
670 | "Unexpected ArgInfo Kind in generic VAArg emitter!"); |
671 | |
672 | assert(!AI.getInReg() && |
673 | "Unexpected InReg seen in arginfo in generic VAArg emitter!"); |
674 | assert(!AI.getPaddingType() && |
675 | "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); |
676 | assert(!AI.getDirectOffset() && |
677 | "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); |
678 | assert(!AI.getCoerceToType() && |
679 | "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); |
680 | |
681 | Address Temp = CGF.CreateMemTemp(Ty, "varet"); |
682 | Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); |
683 | CGF.Builder.CreateStore(Val, Temp); |
684 | return Temp; |
685 | } |
686 | } |
687 | |
688 | |
689 | |
690 | |
691 | |
692 | class DefaultABIInfo : public ABIInfo { |
693 | public: |
694 | DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} |
695 | |
696 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
697 | ABIArgInfo classifyArgumentType(QualType RetTy) const; |
698 | |
699 | void computeInfo(CGFunctionInfo &FI) const override { |
700 | if (!getCXXABI().classifyReturnType(FI)) |
701 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
702 | for (auto &I : FI.arguments()) |
703 | I.info = classifyArgumentType(I.type); |
704 | } |
705 | |
706 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
707 | QualType Ty) const override { |
708 | return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); |
709 | } |
710 | }; |
711 | |
712 | class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { |
713 | public: |
714 | DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) |
715 | : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} |
716 | }; |
717 | |
718 | ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { |
719 | Ty = useFirstFieldIfTransparentUnion(Ty); |
720 | |
721 | if (isAggregateTypeForABI(Ty)) { |
722 | |
723 | |
724 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
725 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
726 | |
727 | return getNaturalAlignIndirect(Ty); |
728 | } |
729 | |
730 | |
731 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
732 | Ty = EnumTy->getDecl()->getIntegerType(); |
733 | |
734 | ASTContext &Context = getContext(); |
735 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
736 | if (EIT->getNumBits() > |
737 | Context.getTypeSize(Context.getTargetInfo().hasInt128Type() |
738 | ? Context.Int128Ty |
739 | : Context.LongLongTy)) |
740 | return getNaturalAlignIndirect(Ty); |
741 | |
742 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
743 | : ABIArgInfo::getDirect()); |
744 | } |
745 | |
746 | ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { |
747 | if (RetTy->isVoidType()) |
748 | return ABIArgInfo::getIgnore(); |
749 | |
750 | if (isAggregateTypeForABI(RetTy)) |
751 | return getNaturalAlignIndirect(RetTy); |
752 | |
753 | |
754 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
755 | RetTy = EnumTy->getDecl()->getIntegerType(); |
756 | |
757 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
758 | if (EIT->getNumBits() > |
759 | getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type() |
760 | ? getContext().Int128Ty |
761 | : getContext().LongLongTy)) |
762 | return getNaturalAlignIndirect(RetTy); |
763 | |
764 | return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
765 | : ABIArgInfo::getDirect()); |
766 | } |
767 | |
768 | |
769 | |
770 | |
771 | |
772 | |
773 | |
774 | class WebAssemblyABIInfo final : public SwiftABIInfo { |
775 | public: |
776 | enum ABIKind { |
777 | MVP = 0, |
778 | ExperimentalMV = 1, |
779 | }; |
780 | |
781 | private: |
782 | DefaultABIInfo defaultInfo; |
783 | ABIKind Kind; |
784 | |
785 | public: |
786 | explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) |
787 | : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {} |
788 | |
789 | private: |
790 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
791 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
792 | |
793 | |
794 | |
795 | |
796 | void computeInfo(CGFunctionInfo &FI) const override { |
797 | if (!getCXXABI().classifyReturnType(FI)) |
798 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
799 | for (auto &Arg : FI.arguments()) |
800 | Arg.info = classifyArgumentType(Arg.type); |
801 | } |
802 | |
803 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
804 | QualType Ty) const override; |
805 | |
806 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
807 | bool asReturnValue) const override { |
808 | return occupiesMoreThan(CGT, scalars, 4); |
809 | } |
810 | |
811 | bool isSwiftErrorInRegister() const override { |
812 | return false; |
813 | } |
814 | }; |
815 | |
816 | class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { |
817 | public: |
818 | explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, |
819 | WebAssemblyABIInfo::ABIKind K) |
820 | : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {} |
821 | |
822 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
823 | CodeGen::CodeGenModule &CGM) const override { |
824 | TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
825 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
826 | if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) { |
827 | llvm::Function *Fn = cast<llvm::Function>(GV); |
828 | llvm::AttrBuilder B; |
829 | B.addAttribute("wasm-import-module", Attr->getImportModule()); |
830 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); |
831 | } |
832 | if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) { |
833 | llvm::Function *Fn = cast<llvm::Function>(GV); |
834 | llvm::AttrBuilder B; |
835 | B.addAttribute("wasm-import-name", Attr->getImportName()); |
836 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); |
837 | } |
838 | if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) { |
839 | llvm::Function *Fn = cast<llvm::Function>(GV); |
840 | llvm::AttrBuilder B; |
841 | B.addAttribute("wasm-export-name", Attr->getExportName()); |
842 | Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); |
843 | } |
844 | } |
845 | |
846 | if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
847 | llvm::Function *Fn = cast<llvm::Function>(GV); |
848 | if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype()) |
849 | Fn->addFnAttr("no-prototype"); |
850 | } |
851 | } |
852 | }; |
853 | |
854 | |
855 | ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { |
856 | Ty = useFirstFieldIfTransparentUnion(Ty); |
857 | |
858 | if (isAggregateTypeForABI(Ty)) { |
859 | |
860 | |
861 | if (auto RAA = getRecordArgABI(Ty, getCXXABI())) |
862 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
863 | |
864 | if (isEmptyRecord(getContext(), Ty, true)) |
865 | return ABIArgInfo::getIgnore(); |
866 | |
867 | |
868 | |
869 | if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) |
870 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); |
871 | |
872 | if (Kind == ABIKind::ExperimentalMV) { |
873 | const RecordType *RT = Ty->getAs<RecordType>(); |
874 | assert(RT); |
875 | bool HasBitField = false; |
876 | for (auto *Field : RT->getDecl()->fields()) { |
877 | if (Field->isBitField()) { |
878 | HasBitField = true; |
879 | break; |
880 | } |
881 | } |
882 | if (!HasBitField) |
883 | return ABIArgInfo::getExpand(); |
884 | } |
885 | } |
886 | |
887 | |
888 | return defaultInfo.classifyArgumentType(Ty); |
889 | } |
890 | |
891 | ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { |
892 | if (isAggregateTypeForABI(RetTy)) { |
893 | |
894 | |
895 | if (!getRecordArgABI(RetTy, getCXXABI())) { |
896 | |
897 | if (isEmptyRecord(getContext(), RetTy, true)) |
898 | return ABIArgInfo::getIgnore(); |
899 | |
900 | |
901 | |
902 | if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) |
903 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); |
904 | |
905 | if (Kind == ABIKind::ExperimentalMV) |
906 | return ABIArgInfo::getDirect(); |
907 | } |
908 | } |
909 | |
910 | |
911 | return defaultInfo.classifyReturnType(RetTy); |
912 | } |
913 | |
914 | Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
915 | QualType Ty) const { |
916 | bool IsIndirect = isAggregateTypeForABI(Ty) && |
917 | !isEmptyRecord(getContext(), Ty, true) && |
918 | !isSingleElementStruct(Ty, getContext()); |
919 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, |
920 | getContext().getTypeInfoInChars(Ty), |
921 | CharUnits::fromQuantity(4), |
922 | true); |
923 | } |
924 | |
925 | |
926 | |
927 | |
928 | |
929 | |
930 | |
931 | |
932 | class PNaClABIInfo : public ABIInfo { |
933 | public: |
934 | PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} |
935 | |
936 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
937 | ABIArgInfo classifyArgumentType(QualType RetTy) const; |
938 | |
939 | void computeInfo(CGFunctionInfo &FI) const override; |
940 | Address EmitVAArg(CodeGenFunction &CGF, |
941 | Address VAListAddr, QualType Ty) const override; |
942 | }; |
943 | |
944 | class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { |
945 | public: |
946 | PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) |
947 | : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {} |
948 | }; |
949 | |
950 | void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { |
951 | if (!getCXXABI().classifyReturnType(FI)) |
952 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
953 | |
954 | for (auto &I : FI.arguments()) |
955 | I.info = classifyArgumentType(I.type); |
956 | } |
957 | |
958 | Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
959 | QualType Ty) const { |
960 | |
961 | |
962 | |
963 | |
964 | |
965 | |
966 | return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); |
967 | } |
968 | |
969 | |
970 | ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { |
971 | if (isAggregateTypeForABI(Ty)) { |
972 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
973 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
974 | return getNaturalAlignIndirect(Ty); |
975 | } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { |
976 | |
977 | Ty = EnumTy->getDecl()->getIntegerType(); |
978 | } else if (Ty->isFloatingType()) { |
979 | |
980 | return ABIArgInfo::getDirect(); |
981 | } else if (const auto *EIT = Ty->getAs<ExtIntType>()) { |
982 | |
983 | if (EIT->getNumBits() > 64) |
984 | return getNaturalAlignIndirect(Ty); |
985 | return ABIArgInfo::getDirect(); |
986 | } |
987 | |
988 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
989 | : ABIArgInfo::getDirect()); |
990 | } |
991 | |
992 | ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { |
993 | if (RetTy->isVoidType()) |
994 | return ABIArgInfo::getIgnore(); |
995 | |
996 | |
997 | if (isAggregateTypeForABI(RetTy)) |
998 | return getNaturalAlignIndirect(RetTy); |
999 | |
1000 | |
1001 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) { |
1002 | if (EIT->getNumBits() > 64) |
1003 | return getNaturalAlignIndirect(RetTy); |
1004 | return ABIArgInfo::getDirect(); |
1005 | } |
1006 | |
1007 | |
1008 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
1009 | RetTy = EnumTy->getDecl()->getIntegerType(); |
1010 | |
1011 | return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
1012 | : ABIArgInfo::getDirect()); |
1013 | } |
1014 | |
1015 | |
1016 | bool IsX86_MMXType(llvm::Type *IRType) { |
1017 | |
1018 | return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && |
1019 | cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && |
1020 | IRType->getScalarSizeInBits() != 64; |
1021 | } |
1022 | |
1023 | static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, |
1024 | StringRef Constraint, |
1025 | llvm::Type* Ty) { |
1026 | bool IsMMXCons = llvm::StringSwitch<bool>(Constraint) |
1027 | .Cases("y", "&y", "^Ym", true) |
1028 | .Default(false); |
1029 | if (IsMMXCons && Ty->isVectorTy()) { |
1030 | if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() != |
1031 | 64) { |
1032 | |
1033 | return nullptr; |
1034 | } |
1035 | |
1036 | return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); |
1037 | } |
1038 | |
1039 | |
1040 | return Ty; |
1041 | } |
1042 | |
1043 | |
1044 | |
1045 | static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { |
1046 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
1047 | if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { |
1048 | if (BT->getKind() == BuiltinType::LongDouble) { |
1049 | if (&Context.getTargetInfo().getLongDoubleFormat() == |
1050 | &llvm::APFloat::x87DoubleExtended()) |
1051 | return false; |
1052 | } |
1053 | return true; |
1054 | } |
1055 | } else if (const VectorType *VT = Ty->getAs<VectorType>()) { |
1056 | |
1057 | |
1058 | unsigned VecSize = Context.getTypeSize(VT); |
1059 | if (VecSize == 128 || VecSize == 256 || VecSize == 512) |
1060 | return true; |
1061 | } |
1062 | return false; |
1063 | } |
1064 | |
1065 | |
1066 | |
1067 | static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { |
1068 | return NumMembers <= 4; |
1069 | } |
1070 | |
1071 | |
1072 | static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { |
1073 | auto AI = ABIArgInfo::getDirect(T); |
1074 | AI.setInReg(true); |
1075 | AI.setCanBeFlattened(false); |
1076 | return AI; |
1077 | } |
1078 | |
1079 | |
1080 | |
1081 | |
1082 | |
1083 | |
1084 | struct CCState { |
1085 | CCState(CGFunctionInfo &FI) |
1086 | : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {} |
1087 | |
1088 | llvm::SmallBitVector IsPreassigned; |
1089 | unsigned CC = CallingConv::CC_C; |
1090 | unsigned FreeRegs = 0; |
1091 | unsigned FreeSSERegs = 0; |
1092 | }; |
1093 | |
1094 | |
1095 | class X86_32ABIInfo : public SwiftABIInfo { |
1096 | enum Class { |
1097 | Integer, |
1098 | Float |
1099 | }; |
1100 | |
1101 | static const unsigned MinABIStackAlignInBytes = 4; |
1102 | |
1103 | bool IsDarwinVectorABI; |
1104 | bool IsRetSmallStructInRegABI; |
1105 | bool IsWin32StructABI; |
1106 | bool IsSoftFloatABI; |
1107 | bool IsMCUABI; |
1108 | bool IsLinuxABI; |
1109 | unsigned DefaultNumRegisterParameters; |
1110 | |
1111 | static bool isRegisterSize(unsigned Size) { |
1112 | return (Size == 8 || Size == 16 || Size == 32 || Size == 64); |
1113 | } |
1114 | |
1115 | bool isHomogeneousAggregateBaseType(QualType Ty) const override { |
1116 | |
1117 | return isX86VectorTypeForVectorCall(getContext(), Ty); |
1118 | } |
1119 | |
1120 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
1121 | uint64_t NumMembers) const override { |
1122 | |
1123 | return isX86VectorCallAggregateSmallEnough(NumMembers); |
1124 | } |
1125 | |
1126 | bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; |
1127 | |
1128 | |
1129 | |
1130 | ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; |
1131 | |
1132 | ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; |
1133 | |
1134 | |
1135 | unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; |
1136 | |
1137 | Class classify(QualType Ty) const; |
1138 | ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; |
1139 | ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; |
1140 | |
1141 | |
1142 | |
1143 | bool updateFreeRegs(QualType Ty, CCState &State) const; |
1144 | |
1145 | bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, |
1146 | bool &NeedsPadding) const; |
1147 | bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; |
1148 | |
1149 | bool canExpandIndirectArgument(QualType Ty) const; |
1150 | |
1151 | |
1152 | |
1153 | void rewriteWithInAlloca(CGFunctionInfo &FI) const; |
1154 | |
1155 | void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, |
1156 | CharUnits &StackOffset, ABIArgInfo &Info, |
1157 | QualType Type) const; |
1158 | void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const; |
1159 | |
1160 | public: |
1161 | |
1162 | void computeInfo(CGFunctionInfo &FI) const override; |
1163 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
1164 | QualType Ty) const override; |
1165 | |
1166 | X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, |
1167 | bool RetSmallStructInRegABI, bool Win32StructABI, |
1168 | unsigned NumRegisterParameters, bool SoftFloatABI) |
1169 | : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), |
1170 | IsRetSmallStructInRegABI(RetSmallStructInRegABI), |
1171 | IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI), |
1172 | IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), |
1173 | IsLinuxABI(CGT.getTarget().getTriple().isOSLinux()), |
1174 | DefaultNumRegisterParameters(NumRegisterParameters) {} |
1175 | |
1176 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
1177 | bool asReturnValue) const override { |
1178 | |
1179 | |
1180 | |
1181 | |
1182 | return occupiesMoreThan(CGT, scalars, 3); |
1183 | } |
1184 | |
1185 | bool isSwiftErrorInRegister() const override { |
1186 | |
1187 | return false; |
1188 | } |
1189 | }; |
1190 | |
1191 | class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { |
1192 | public: |
1193 | X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, |
1194 | bool RetSmallStructInRegABI, bool Win32StructABI, |
1195 | unsigned NumRegisterParameters, bool SoftFloatABI) |
1196 | : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>( |
1197 | CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, |
1198 | NumRegisterParameters, SoftFloatABI)) {} |
1199 | |
1200 | static bool isStructReturnInRegABI( |
1201 | const llvm::Triple &Triple, const CodeGenOptions &Opts); |
1202 | |
1203 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
1204 | CodeGen::CodeGenModule &CGM) const override; |
1205 | |
1206 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { |
1207 | |
1208 | if (CGM.getTarget().getTriple().isOSDarwin()) return 5; |
1209 | return 4; |
1210 | } |
1211 | |
1212 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
1213 | llvm::Value *Address) const override; |
1214 | |
1215 | llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, |
1216 | StringRef Constraint, |
1217 | llvm::Type* Ty) const override { |
1218 | return X86AdjustInlineAsmType(CGF, Constraint, Ty); |
1219 | } |
1220 | |
1221 | void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, |
1222 | std::string &Constraints, |
1223 | std::vector<llvm::Type *> &ResultRegTypes, |
1224 | std::vector<llvm::Type *> &ResultTruncRegTypes, |
1225 | std::vector<LValue> &ResultRegDests, |
1226 | std::string &AsmString, |
1227 | unsigned NumOutputs) const override; |
1228 | |
1229 | llvm::Constant * |
1230 | getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { |
1231 | unsigned Sig = (0xeb << 0) | |
1232 | (0x06 << 8) | |
1233 | ('v' << 16) | |
1234 | ('2' << 24); |
1235 | return llvm::ConstantInt::get(CGM.Int32Ty, Sig); |
1236 | } |
1237 | |
1238 | StringRef getARCRetainAutoreleasedReturnValueMarker() const override { |
1239 | return "movl\t%ebp, %ebp" |
1240 | "\t\t// marker for objc_retainAutoreleaseReturnValue"; |
1241 | } |
1242 | }; |
1243 | |
1244 | } |
1245 | |
1246 | |
1247 | |
1248 | |
1249 | |
1250 | |
1251 | |
1252 | |
1253 | |
1254 | static void rewriteInputConstraintReferences(unsigned FirstIn, |
1255 | unsigned NumNewOuts, |
1256 | std::string &AsmString) { |
1257 | std::string Buf; |
1258 | llvm::raw_string_ostream OS(Buf); |
1259 | size_t Pos = 0; |
1260 | while (Pos < AsmString.size()) { |
1261 | size_t DollarStart = AsmString.find('$', Pos); |
1262 | if (DollarStart == std::string::npos) |
1263 | DollarStart = AsmString.size(); |
1264 | size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); |
1265 | if (DollarEnd == std::string::npos) |
1266 | DollarEnd = AsmString.size(); |
1267 | OS << StringRef(&AsmString[Pos], DollarEnd - Pos); |
1268 | Pos = DollarEnd; |
1269 | size_t NumDollars = DollarEnd - DollarStart; |
1270 | if (NumDollars % 2 != 0 && Pos < AsmString.size()) { |
1271 | |
1272 | size_t DigitStart = Pos; |
1273 | if (AsmString[DigitStart] == '{') { |
1274 | OS << '{'; |
1275 | ++DigitStart; |
1276 | } |
1277 | size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); |
1278 | if (DigitEnd == std::string::npos) |
1279 | DigitEnd = AsmString.size(); |
1280 | StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); |
1281 | unsigned OperandIndex; |
1282 | if (!OperandStr.getAsInteger(10, OperandIndex)) { |
1283 | if (OperandIndex >= FirstIn) |
1284 | OperandIndex += NumNewOuts; |
1285 | OS << OperandIndex; |
1286 | } else { |
1287 | OS << OperandStr; |
1288 | } |
1289 | Pos = DigitEnd; |
1290 | } |
1291 | } |
1292 | AsmString = std::move(OS.str()); |
1293 | } |
1294 | |
1295 | |
1296 | void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( |
1297 | CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, |
1298 | std::vector<llvm::Type *> &ResultRegTypes, |
1299 | std::vector<llvm::Type *> &ResultTruncRegTypes, |
1300 | std::vector<LValue> &ResultRegDests, std::string &AsmString, |
1301 | unsigned NumOutputs) const { |
1302 | uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); |
1303 | |
1304 | |
1305 | |
1306 | if (!Constraints.empty()) |
1307 | Constraints += ','; |
1308 | if (RetWidth <= 32) { |
1309 | Constraints += "={eax}"; |
1310 | ResultRegTypes.push_back(CGF.Int32Ty); |
1311 | } else { |
1312 | |
1313 | Constraints += "=A"; |
1314 | ResultRegTypes.push_back(CGF.Int64Ty); |
1315 | } |
1316 | |
1317 | |
1318 | llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); |
1319 | ResultTruncRegTypes.push_back(CoerceTy); |
1320 | |
1321 | |
1322 | ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF), |
1323 | CoerceTy->getPointerTo())); |
1324 | ResultRegDests.push_back(ReturnSlot); |
1325 | |
1326 | rewriteInputConstraintReferences(NumOutputs, 1, AsmString); |
1327 | } |
1328 | |
1329 | |
1330 | |
1331 | bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, |
1332 | ASTContext &Context) const { |
1333 | uint64_t Size = Context.getTypeSize(Ty); |
1334 | |
1335 | |
1336 | |
1337 | if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) |
1338 | return false; |
1339 | |
1340 | if (Ty->isVectorType()) { |
1341 | |
1342 | |
1343 | if (Size == 64 || Size == 128) |
1344 | return false; |
1345 | |
1346 | return true; |
1347 | } |
1348 | |
1349 | |
1350 | |
1351 | if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || |
1352 | Ty->isAnyComplexType() || Ty->isEnumeralType() || |
1353 | Ty->isBlockPointerType() || Ty->isMemberPointerType()) |
1354 | return true; |
1355 | |
1356 | |
1357 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) |
1358 | return shouldReturnTypeInRegister(AT->getElementType(), Context); |
1359 | |
1360 | |
1361 | const RecordType *RT = Ty->getAs<RecordType>(); |
1362 | if (!RT) return false; |
1363 | |
1364 | |
1365 | |
1366 | |
1367 | |
1368 | for (const auto *FD : RT->getDecl()->fields()) { |
1369 | |
1370 | if (isEmptyField(Context, FD, true)) |
1371 | continue; |
1372 | |
1373 | |
1374 | if (!shouldReturnTypeInRegister(FD->getType(), Context)) |
1375 | return false; |
1376 | } |
1377 | return true; |
1378 | } |
1379 | |
1380 | static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { |
1381 | |
1382 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
1383 | Ty = CTy->getElementType(); |
1384 | |
1385 | |
1386 | |
1387 | |
1388 | if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && |
1389 | !Ty->isEnumeralType() && !Ty->isBlockPointerType()) |
1390 | return false; |
1391 | |
1392 | uint64_t Size = Context.getTypeSize(Ty); |
1393 | return Size == 32 || Size == 64; |
1394 | } |
1395 | |
1396 | static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, |
1397 | uint64_t &Size) { |
1398 | for (const auto *FD : RD->fields()) { |
1399 | |
1400 | |
1401 | |
1402 | if (!is32Or64BitBasicType(FD->getType(), Context)) |
1403 | return false; |
1404 | |
1405 | |
1406 | |
1407 | |
1408 | if (FD->isBitField()) |
1409 | return false; |
1410 | |
1411 | Size += Context.getTypeSize(FD->getType()); |
1412 | } |
1413 | return true; |
1414 | } |
1415 | |
1416 | static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, |
1417 | uint64_t &Size) { |
1418 | |
1419 | for (const CXXBaseSpecifier &Base : RD->bases()) { |
1420 | if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), |
1421 | Size)) |
1422 | return false; |
1423 | } |
1424 | if (!addFieldSizes(Context, RD, Size)) |
1425 | return false; |
1426 | return true; |
1427 | } |
1428 | |
1429 | |
1430 | |
1431 | |
1432 | |
1433 | bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { |
1434 | |
1435 | const RecordType *RT = Ty->getAs<RecordType>(); |
1436 | if (!RT) |
1437 | return false; |
1438 | const RecordDecl *RD = RT->getDecl(); |
1439 | uint64_t Size = 0; |
1440 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
1441 | if (!IsWin32StructABI) { |
1442 | |
1443 | |
1444 | if (!CXXRD->isCLike()) |
1445 | return false; |
1446 | } else { |
1447 | |
1448 | if (CXXRD->isDynamicClass()) |
1449 | return false; |
1450 | } |
1451 | if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) |
1452 | return false; |
1453 | } else { |
1454 | if (!addFieldSizes(getContext(), RD, Size)) |
1455 | return false; |
1456 | } |
1457 | |
1458 | |
1459 | return Size == getContext().getTypeSize(Ty); |
1460 | } |
1461 | |
1462 | ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { |
1463 | |
1464 | |
1465 | if (State.FreeRegs) { |
1466 | --State.FreeRegs; |
1467 | if (!IsMCUABI) |
1468 | return getNaturalAlignIndirectInReg(RetTy); |
1469 | } |
1470 | return getNaturalAlignIndirect(RetTy, false); |
1471 | } |
1472 | |
1473 | ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, |
1474 | CCState &State) const { |
1475 | if (RetTy->isVoidType()) |
1476 | return ABIArgInfo::getIgnore(); |
1477 | |
1478 | const Type *Base = nullptr; |
1479 | uint64_t NumElts = 0; |
1480 | if ((State.CC == llvm::CallingConv::X86_VectorCall || |
1481 | State.CC == llvm::CallingConv::X86_RegCall) && |
1482 | isHomogeneousAggregate(RetTy, Base, NumElts)) { |
1483 | |
1484 | return ABIArgInfo::getDirect(); |
1485 | } |
1486 | |
1487 | if (const VectorType *VT = RetTy->getAs<VectorType>()) { |
1488 | |
1489 | if (IsDarwinVectorABI) { |
1490 | uint64_t Size = getContext().getTypeSize(RetTy); |
1491 | |
1492 | |
1493 | |
1494 | |
1495 | if (Size == 128) |
1496 | return ABIArgInfo::getDirect(llvm::FixedVectorType::get( |
1497 | llvm::Type::getInt64Ty(getVMContext()), 2)); |
1498 | |
1499 | |
1500 | |
1501 | if ((Size == 8 || Size == 16 || Size == 32) || |
1502 | (Size == 64 && VT->getNumElements() == 1)) |
1503 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), |
1504 | Size)); |
1505 | |
1506 | return getIndirectReturnResult(RetTy, State); |
1507 | } |
1508 | |
1509 | return ABIArgInfo::getDirect(); |
1510 | } |
1511 | |
1512 | if (isAggregateTypeForABI(RetTy)) { |
1513 | if (const RecordType *RT = RetTy->getAs<RecordType>()) { |
1514 | |
1515 | if (RT->getDecl()->hasFlexibleArrayMember()) |
1516 | return getIndirectReturnResult(RetTy, State); |
1517 | } |
1518 | |
1519 | |
1520 | if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) |
1521 | return getIndirectReturnResult(RetTy, State); |
1522 | |
1523 | |
1524 | if (isEmptyRecord(getContext(), RetTy, true)) |
1525 | return ABIArgInfo::getIgnore(); |
1526 | |
1527 | |
1528 | |
1529 | if (shouldReturnTypeInRegister(RetTy, getContext())) { |
1530 | uint64_t Size = getContext().getTypeSize(RetTy); |
1531 | |
1532 | |
1533 | |
1534 | |
1535 | |
1536 | |
1537 | if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) |
1538 | if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) |
1539 | || SeltTy->hasPointerRepresentation()) |
1540 | return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); |
1541 | |
1542 | |
1543 | |
1544 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); |
1545 | } |
1546 | |
1547 | return getIndirectReturnResult(RetTy, State); |
1548 | } |
1549 | |
1550 | |
1551 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
1552 | RetTy = EnumTy->getDecl()->getIntegerType(); |
1553 | |
1554 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
1555 | if (EIT->getNumBits() > 64) |
1556 | return getIndirectReturnResult(RetTy, State); |
1557 | |
1558 | return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
1559 | : ABIArgInfo::getDirect()); |
1560 | } |
1561 | |
1562 | static bool isSIMDVectorType(ASTContext &Context, QualType Ty) { |
1563 | return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; |
1564 | } |
1565 | |
1566 | static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) { |
1567 | const RecordType *RT = Ty->getAs<RecordType>(); |
1568 | if (!RT) |
1569 | return 0; |
1570 | const RecordDecl *RD = RT->getDecl(); |
1571 | |
1572 | |
1573 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) |
1574 | for (const auto &I : CXXRD->bases()) |
1575 | if (!isRecordWithSIMDVectorType(Context, I.getType())) |
1576 | return false; |
1577 | |
1578 | for (const auto *i : RD->fields()) { |
1579 | QualType FT = i->getType(); |
1580 | |
1581 | if (isSIMDVectorType(Context, FT)) |
1582 | return true; |
1583 | |
1584 | if (isRecordWithSIMDVectorType(Context, FT)) |
1585 | return true; |
1586 | } |
1587 | |
1588 | return false; |
1589 | } |
1590 | |
1591 | unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, |
1592 | unsigned Align) const { |
1593 | |
1594 | |
1595 | if (Align <= MinABIStackAlignInBytes) |
1596 | return 0; |
1597 | |
1598 | if (IsLinuxABI) { |
1599 | |
1600 | |
1601 | |
1602 | |
1603 | if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64)) |
1604 | return Align; |
1605 | } |
1606 | |
1607 | if (!IsDarwinVectorABI) { |
1608 | |
1609 | return MinABIStackAlignInBytes; |
1610 | } |
1611 | |
1612 | |
1613 | if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) || |
1614 | isRecordWithSIMDVectorType(getContext(), Ty))) |
1615 | return 16; |
1616 | |
1617 | return MinABIStackAlignInBytes; |
1618 | } |
1619 | |
1620 | ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, |
1621 | CCState &State) const { |
1622 | if (!ByVal) { |
1623 | if (State.FreeRegs) { |
1624 | --State.FreeRegs; |
1625 | if (!IsMCUABI) |
1626 | return getNaturalAlignIndirectInReg(Ty); |
1627 | } |
1628 | return getNaturalAlignIndirect(Ty, false); |
1629 | } |
1630 | |
1631 | |
1632 | unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; |
1633 | unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); |
1634 | if (StackAlign == 0) |
1635 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), true); |
1636 | |
1637 | |
1638 | |
1639 | bool Realign = TypeAlign > StackAlign; |
1640 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), |
1641 | true, Realign); |
1642 | } |
1643 | |
1644 | X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { |
1645 | const Type *T = isSingleElementStruct(Ty, getContext()); |
1646 | if (!T) |
1647 | T = Ty.getTypePtr(); |
1648 | |
1649 | if (const BuiltinType *BT = T->getAs<BuiltinType>()) { |
1650 | BuiltinType::Kind K = BT->getKind(); |
1651 | if (K == BuiltinType::Float || K == BuiltinType::Double) |
1652 | return Float; |
1653 | } |
1654 | return Integer; |
1655 | } |
1656 | |
1657 | bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { |
1658 | if (!IsSoftFloatABI) { |
1659 | Class C = classify(Ty); |
1660 | if (C == Float) |
1661 | return false; |
1662 | } |
1663 | |
1664 | unsigned Size = getContext().getTypeSize(Ty); |
1665 | unsigned SizeInRegs = (Size + 31) / 32; |
1666 | |
1667 | if (SizeInRegs == 0) |
1668 | return false; |
1669 | |
1670 | if (!IsMCUABI) { |
1671 | if (SizeInRegs > State.FreeRegs) { |
1672 | State.FreeRegs = 0; |
1673 | return false; |
1674 | } |
1675 | } else { |
1676 | |
1677 | |
1678 | |
1679 | |
1680 | if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) |
1681 | return false; |
1682 | } |
1683 | |
1684 | State.FreeRegs -= SizeInRegs; |
1685 | return true; |
1686 | } |
1687 | |
1688 | bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, |
1689 | bool &InReg, |
1690 | bool &NeedsPadding) const { |
1691 | |
1692 | |
1693 | |
1694 | if (IsWin32StructABI && isAggregateTypeForABI(Ty)) |
1695 | return false; |
1696 | |
1697 | NeedsPadding = false; |
1698 | InReg = !IsMCUABI; |
1699 | |
1700 | if (!updateFreeRegs(Ty, State)) |
1701 | return false; |
1702 | |
1703 | if (IsMCUABI) |
1704 | return true; |
1705 | |
1706 | if (State.CC == llvm::CallingConv::X86_FastCall || |
1707 | State.CC == llvm::CallingConv::X86_VectorCall || |
1708 | State.CC == llvm::CallingConv::X86_RegCall) { |
1709 | if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) |
1710 | NeedsPadding = true; |
1711 | |
1712 | return false; |
1713 | } |
1714 | |
1715 | return true; |
1716 | } |
1717 | |
1718 | bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { |
1719 | if (!updateFreeRegs(Ty, State)) |
1720 | return false; |
1721 | |
1722 | if (IsMCUABI) |
1723 | return false; |
1724 | |
1725 | if (State.CC == llvm::CallingConv::X86_FastCall || |
1726 | State.CC == llvm::CallingConv::X86_VectorCall || |
1727 | State.CC == llvm::CallingConv::X86_RegCall) { |
1728 | if (getContext().getTypeSize(Ty) > 32) |
1729 | return false; |
1730 | |
1731 | return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || |
1732 | Ty->isReferenceType()); |
1733 | } |
1734 | |
1735 | return true; |
1736 | } |
1737 | |
1738 | void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const { |
1739 | |
1740 | |
1741 | |
1742 | |
1743 | |
1744 | |
1745 | |
1746 | |
1747 | |
1748 | MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); |
1749 | for (int I = 0, E = Args.size(); I < E; ++I) { |
1750 | const Type *Base = nullptr; |
1751 | uint64_t NumElts = 0; |
1752 | const QualType &Ty = Args[I].type; |
1753 | if ((Ty->isVectorType() || Ty->isBuiltinType()) && |
1754 | isHomogeneousAggregate(Ty, Base, NumElts)) { |
1755 | if (State.FreeSSERegs >= NumElts) { |
1756 | State.FreeSSERegs -= NumElts; |
1757 | Args[I].info = ABIArgInfo::getDirectInReg(); |
1758 | State.IsPreassigned.set(I); |
1759 | } |
1760 | } |
1761 | } |
1762 | } |
1763 | |
1764 | ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, |
1765 | CCState &State) const { |
1766 | |
1767 | bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall; |
1768 | bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall; |
1769 | bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall; |
1770 | |
1771 | Ty = useFirstFieldIfTransparentUnion(Ty); |
1772 | TypeInfo TI = getContext().getTypeInfo(Ty); |
1773 | |
1774 | |
1775 | const RecordType *RT = Ty->getAs<RecordType>(); |
1776 | if (RT) { |
1777 | CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); |
1778 | if (RAA == CGCXXABI::RAA_Indirect) { |
1779 | return getIndirectResult(Ty, false, State); |
1780 | } else if (RAA == CGCXXABI::RAA_DirectInMemory) { |
1781 | |
1782 | return ABIArgInfo::getInAlloca(0); |
1783 | } |
1784 | } |
1785 | |
1786 | |
1787 | |
1788 | const Type *Base = nullptr; |
1789 | uint64_t NumElts = 0; |
1790 | if ((IsRegCall || IsVectorCall) && |
1791 | isHomogeneousAggregate(Ty, Base, NumElts)) { |
1792 | if (State.FreeSSERegs >= NumElts) { |
1793 | State.FreeSSERegs -= NumElts; |
1794 | |
1795 | |
1796 | |
1797 | if (IsVectorCall) |
1798 | return getDirectX86Hva(); |
1799 | |
1800 | if (Ty->isBuiltinType() || Ty->isVectorType()) |
1801 | return ABIArgInfo::getDirect(); |
1802 | return ABIArgInfo::getExpand(); |
1803 | } |
1804 | return getIndirectResult(Ty, false, State); |
1805 | } |
1806 | |
1807 | if (isAggregateTypeForABI(Ty)) { |
1808 | |
1809 | |
1810 | if (RT && RT->getDecl()->hasFlexibleArrayMember()) |
1811 | return getIndirectResult(Ty, true, State); |
1812 | |
1813 | |
1814 | if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) |
1815 | return ABIArgInfo::getIgnore(); |
1816 | |
1817 | llvm::LLVMContext &LLVMContext = getVMContext(); |
1818 | llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); |
1819 | bool NeedsPadding = false; |
1820 | bool InReg; |
1821 | if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { |
1822 | unsigned SizeInRegs = (TI.Width + 31) / 32; |
1823 | SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); |
1824 | llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); |
1825 | if (InReg) |
1826 | return ABIArgInfo::getDirectInReg(Result); |
1827 | else |
1828 | return ABIArgInfo::getDirect(Result); |
1829 | } |
1830 | llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; |
1831 | |
1832 | |
1833 | |
1834 | if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32) |
1835 | return getIndirectResult(Ty, false, State); |
1836 | |
1837 | |
1838 | |
1839 | |
1840 | |
1841 | |
1842 | |
1843 | if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) && |
1844 | canExpandIndirectArgument(Ty)) |
1845 | return ABIArgInfo::getExpandWithPadding( |
1846 | IsFastCall || IsVectorCall || IsRegCall, PaddingType); |
1847 | |
1848 | return getIndirectResult(Ty, true, State); |
1849 | } |
1850 | |
1851 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
1852 | |
1853 | |
1854 | |
1855 | if (IsWin32StructABI) { |
1856 | if (TI.Width <= 512 && State.FreeSSERegs > 0) { |
1857 | --State.FreeSSERegs; |
1858 | return ABIArgInfo::getDirectInReg(); |
1859 | } |
1860 | return getIndirectResult(Ty, false, State); |
1861 | } |
1862 | |
1863 | |
1864 | |
1865 | if (IsDarwinVectorABI) { |
1866 | if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) || |
1867 | (TI.Width == 64 && VT->getNumElements() == 1)) |
1868 | return ABIArgInfo::getDirect( |
1869 | llvm::IntegerType::get(getVMContext(), TI.Width)); |
1870 | } |
1871 | |
1872 | if (IsX86_MMXType(CGT.ConvertType(Ty))) |
1873 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); |
1874 | |
1875 | return ABIArgInfo::getDirect(); |
1876 | } |
1877 | |
1878 | |
1879 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
1880 | Ty = EnumTy->getDecl()->getIntegerType(); |
1881 | |
1882 | bool InReg = shouldPrimitiveUseInReg(Ty, State); |
1883 | |
1884 | if (isPromotableIntegerTypeForABI(Ty)) { |
1885 | if (InReg) |
1886 | return ABIArgInfo::getExtendInReg(Ty); |
1887 | return ABIArgInfo::getExtend(Ty); |
1888 | } |
1889 | |
1890 | if (const auto * EIT = Ty->getAs<ExtIntType>()) { |
1891 | if (EIT->getNumBits() <= 64) { |
1892 | if (InReg) |
1893 | return ABIArgInfo::getDirectInReg(); |
1894 | return ABIArgInfo::getDirect(); |
1895 | } |
1896 | return getIndirectResult(Ty, false, State); |
1897 | } |
1898 | |
1899 | if (InReg) |
1900 | return ABIArgInfo::getDirectInReg(); |
1901 | return ABIArgInfo::getDirect(); |
1902 | } |
1903 | |
1904 | void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { |
1905 | CCState State(FI); |
1906 | if (IsMCUABI) |
1907 | State.FreeRegs = 3; |
1908 | else if (State.CC == llvm::CallingConv::X86_FastCall) { |
1909 | State.FreeRegs = 2; |
1910 | State.FreeSSERegs = 3; |
1911 | } else if (State.CC == llvm::CallingConv::X86_VectorCall) { |
1912 | State.FreeRegs = 2; |
1913 | State.FreeSSERegs = 6; |
1914 | } else if (FI.getHasRegParm()) |
1915 | State.FreeRegs = FI.getRegParm(); |
1916 | else if (State.CC == llvm::CallingConv::X86_RegCall) { |
1917 | State.FreeRegs = 5; |
1918 | State.FreeSSERegs = 8; |
1919 | } else if (IsWin32StructABI) { |
1920 | |
1921 | |
1922 | State.FreeRegs = DefaultNumRegisterParameters; |
1923 | State.FreeSSERegs = 3; |
1924 | } else |
1925 | State.FreeRegs = DefaultNumRegisterParameters; |
1926 | |
1927 | if (!::classifyReturnType(getCXXABI(), FI, *this)) { |
1928 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); |
1929 | } else if (FI.getReturnInfo().isIndirect()) { |
1930 | |
1931 | |
1932 | if (State.FreeRegs) { |
1933 | --State.FreeRegs; |
1934 | if (!IsMCUABI) |
1935 | FI.getReturnInfo().setInReg(true); |
1936 | } |
1937 | } |
1938 | |
1939 | |
1940 | if (FI.isChainCall()) |
1941 | ++State.FreeRegs; |
1942 | |
1943 | |
1944 | |
1945 | if (State.CC == llvm::CallingConv::X86_VectorCall) |
1946 | runVectorCallFirstPass(FI, State); |
1947 | |
1948 | bool UsedInAlloca = false; |
1949 | MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); |
1950 | for (int I = 0, E = Args.size(); I < E; ++I) { |
1951 | |
1952 | if (State.IsPreassigned.test(I)) |
1953 | continue; |
1954 | |
1955 | Args[I].info = classifyArgumentType(Args[I].type, State); |
1956 | UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca); |
1957 | } |
1958 | |
1959 | |
1960 | |
1961 | if (UsedInAlloca) |
1962 | rewriteWithInAlloca(FI); |
1963 | } |
1964 | |
1965 | void |
1966 | X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, |
1967 | CharUnits &StackOffset, ABIArgInfo &Info, |
1968 | QualType Type) const { |
1969 | |
1970 | CharUnits WordSize = CharUnits::fromQuantity(4); |
1971 | assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct"); |
1972 | |
1973 | |
1974 | |
1975 | |
1976 | bool IsIndirect = false; |
1977 | if (Info.isIndirect() && !Info.getIndirectByVal()) |
1978 | IsIndirect = true; |
1979 | Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect); |
1980 | llvm::Type *LLTy = CGT.ConvertTypeForMem(Type); |
1981 | if (IsIndirect) |
1982 | LLTy = LLTy->getPointerTo(0); |
1983 | FrameFields.push_back(LLTy); |
1984 | StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type); |
1985 | |
1986 | |
1987 | CharUnits FieldEnd = StackOffset; |
1988 | StackOffset = FieldEnd.alignTo(WordSize); |
1989 | if (StackOffset != FieldEnd) { |
1990 | CharUnits NumBytes = StackOffset - FieldEnd; |
1991 | llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); |
1992 | Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); |
1993 | FrameFields.push_back(Ty); |
1994 | } |
1995 | } |
1996 | |
1997 | static bool isArgInAlloca(const ABIArgInfo &Info) { |
1998 | |
1999 | switch (Info.getKind()) { |
2000 | case ABIArgInfo::InAlloca: |
2001 | return true; |
2002 | case ABIArgInfo::Ignore: |
2003 | case ABIArgInfo::IndirectAliased: |
2004 | return false; |
2005 | case ABIArgInfo::Indirect: |
2006 | case ABIArgInfo::Direct: |
2007 | case ABIArgInfo::Extend: |
2008 | return !Info.getInReg(); |
2009 | case ABIArgInfo::Expand: |
2010 | case ABIArgInfo::CoerceAndExpand: |
2011 | |
2012 | |
2013 | return true; |
2014 | } |
2015 | llvm_unreachable("invalid enum"); |
2016 | } |
2017 | |
2018 | void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { |
2019 | assert(IsWin32StructABI && "inalloca only supported on win32"); |
2020 | |
2021 | |
2022 | SmallVector<llvm::Type *, 6> FrameFields; |
2023 | |
2024 | |
2025 | CharUnits StackAlign = CharUnits::fromQuantity(4); |
2026 | |
2027 | CharUnits StackOffset; |
2028 | CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); |
2029 | |
2030 | |
2031 | bool IsThisCall = |
2032 | FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; |
2033 | ABIArgInfo &Ret = FI.getReturnInfo(); |
2034 | if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && |
2035 | isArgInAlloca(I->info)) { |
2036 | addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); |
2037 | ++I; |
2038 | } |
2039 | |
2040 | |
2041 | if (Ret.isIndirect() && !Ret.getInReg()) { |
2042 | addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType()); |
2043 | |
2044 | Ret.setInAllocaSRet(IsWin32StructABI); |
2045 | } |
2046 | |
2047 | |
2048 | if (IsThisCall) |
2049 | ++I; |
2050 | |
2051 | |
2052 | for (; I != E; ++I) { |
2053 | if (isArgInAlloca(I->info)) |
2054 | addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); |
2055 | } |
2056 | |
2057 | FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, |
2058 | true), |
2059 | StackAlign); |
2060 | } |
2061 | |
2062 | Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, |
2063 | Address VAListAddr, QualType Ty) const { |
2064 | |
2065 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); |
2066 | |
2067 | |
2068 | |
2069 | |
2070 | |
2071 | TypeInfo.Align = CharUnits::fromQuantity( |
2072 | getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity())); |
2073 | |
2074 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, |
2075 | TypeInfo, CharUnits::fromQuantity(4), |
2076 | true); |
2077 | } |
2078 | |
2079 | bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( |
2080 | const llvm::Triple &Triple, const CodeGenOptions &Opts) { |
2081 | assert(Triple.getArch() == llvm::Triple::x86); |
2082 | |
2083 | switch (Opts.getStructReturnConvention()) { |
2084 | case CodeGenOptions::SRCK_Default: |
2085 | break; |
2086 | case CodeGenOptions::SRCK_OnStack: |
2087 | return false; |
2088 | case CodeGenOptions::SRCK_InRegs: |
2089 | return true; |
2090 | } |
2091 | |
2092 | if (Triple.isOSDarwin() || Triple.isOSIAMCU()) |
2093 | return true; |
2094 | |
2095 | switch (Triple.getOS()) { |
2096 | case llvm::Triple::DragonFly: |
2097 | case llvm::Triple::FreeBSD: |
2098 | case llvm::Triple::OpenBSD: |
2099 | case llvm::Triple::Win32: |
2100 | return true; |
2101 | default: |
2102 | return false; |
2103 | } |
2104 | } |
2105 | |
2106 | static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, |
2107 | CodeGen::CodeGenModule &CGM) { |
2108 | if (!FD->hasAttr<AnyX86InterruptAttr>()) |
2109 | return; |
2110 | |
2111 | llvm::Function *Fn = cast<llvm::Function>(GV); |
2112 | Fn->setCallingConv(llvm::CallingConv::X86_INTR); |
2113 | if (FD->getNumParams() == 0) |
2114 | return; |
2115 | |
2116 | auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType()); |
2117 | llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType()); |
2118 | llvm::Attribute NewAttr = llvm::Attribute::getWithByValType( |
2119 | Fn->getContext(), ByValTy); |
2120 | Fn->addParamAttr(0, NewAttr); |
2121 | } |
2122 | |
2123 | void X86_32TargetCodeGenInfo::setTargetAttributes( |
2124 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
2125 | if (GV->isDeclaration()) |
2126 | return; |
2127 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
2128 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { |
2129 | llvm::Function *Fn = cast<llvm::Function>(GV); |
2130 | Fn->addFnAttr("stackrealign"); |
2131 | } |
2132 | |
2133 | addX86InterruptAttrs(FD, GV, CGM); |
2134 | } |
2135 | } |
2136 | |
2137 | bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( |
2138 | CodeGen::CodeGenFunction &CGF, |
2139 | llvm::Value *Address) const { |
2140 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
2141 | |
2142 | llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); |
2143 | |
2144 | |
2145 | |
2146 | |
2147 | AssignToArrayRange(Builder, Address, Four8, 0, 8); |
2148 | |
2149 | if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { |
2150 | |
2151 | |
2152 | |
2153 | llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); |
2154 | AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); |
2155 | |
2156 | } else { |
2157 | |
2158 | |
2159 | Builder.CreateAlignedStore( |
2160 | Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), |
2161 | CharUnits::One()); |
2162 | |
2163 | |
2164 | |
2165 | |
2166 | llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); |
2167 | AssignToArrayRange(Builder, Address, Twelve8, 11, 16); |
2168 | } |
2169 | |
2170 | return false; |
2171 | } |
2172 | |
2173 | |
2174 | |
2175 | |
2176 | |
2177 | |
2178 | namespace { |
2179 | |
2180 | enum class X86AVXABILevel { |
2181 | None, |
2182 | AVX, |
2183 | AVX512 |
2184 | }; |
2185 | |
2186 | |
2187 | static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { |
2188 | switch (AVXLevel) { |
2189 | case X86AVXABILevel::AVX512: |
2190 | return 512; |
2191 | case X86AVXABILevel::AVX: |
2192 | return 256; |
2193 | case X86AVXABILevel::None: |
2194 | return 128; |
2195 | } |
2196 | llvm_unreachable("Unknown AVXLevel"); |
2197 | } |
2198 | |
2199 | |
2200 | class X86_64ABIInfo : public SwiftABIInfo { |
2201 | enum Class { |
2202 | Integer = 0, |
2203 | SSE, |
2204 | SSEUp, |
2205 | X87, |
2206 | X87Up, |
2207 | ComplexX87, |
2208 | NoClass, |
2209 | Memory |
2210 | }; |
2211 | |
2212 | |
2213 | |
2214 | |
2215 | |
2216 | |
2217 | |
2218 | |
2219 | |
2220 | |
2221 | static Class merge(Class Accum, Class Field); |
2222 | |
2223 | |
2224 | |
2225 | |
2226 | |
2227 | |
2228 | |
2229 | |
2230 | |
2231 | |
2232 | |
2233 | |
2234 | |
2235 | |
2236 | |
2237 | void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; |
2238 | |
2239 | |
2240 | |
2241 | |
2242 | |
2243 | |
2244 | |
2245 | |
2246 | |
2247 | |
2248 | |
2249 | |
2250 | |
2251 | |
2252 | |
2253 | |
2254 | |
2255 | |
2256 | |
2257 | |
2258 | |
2259 | |
2260 | |
2261 | |
2262 | |
2263 | void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, |
2264 | bool isNamedArg) const; |
2265 | |
2266 | llvm::Type *GetByteVectorType(QualType Ty) const; |
2267 | llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, |
2268 | unsigned IROffset, QualType SourceTy, |
2269 | unsigned SourceOffset) const; |
2270 | llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, |
2271 | unsigned IROffset, QualType SourceTy, |
2272 | unsigned SourceOffset) const; |
2273 | |
2274 | |
2275 | |
2276 | ABIArgInfo getIndirectReturnResult(QualType Ty) const; |
2277 | |
2278 | |
2279 | |
2280 | |
2281 | |
2282 | |
2283 | ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; |
2284 | |
2285 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
2286 | |
2287 | ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, |
2288 | unsigned &neededInt, unsigned &neededSSE, |
2289 | bool isNamedArg) const; |
2290 | |
2291 | ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, |
2292 | unsigned &NeededSSE) const; |
2293 | |
2294 | ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, |
2295 | unsigned &NeededSSE) const; |
2296 | |
2297 | bool IsIllegalVectorType(QualType Ty) const; |
2298 | |
2299 | |
2300 | |
2301 | |
2302 | |
2303 | |
2304 | bool honorsRevision0_98() const { |
2305 | return !getTarget().getTriple().isOSDarwin(); |
2306 | } |
2307 | |
2308 | |
2309 | |
2310 | bool classifyIntegerMMXAsSSE() const { |
2311 | |
2312 | if (getContext().getLangOpts().getClangABICompat() <= |
2313 | LangOptions::ClangABI::Ver3_8) |
2314 | return false; |
2315 | |
2316 | const llvm::Triple &Triple = getTarget().getTriple(); |
2317 | if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) |
2318 | return false; |
2319 | if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) |
2320 | return false; |
2321 | return true; |
2322 | } |
2323 | |
2324 | |
2325 | bool passInt128VectorsInMem() const { |
2326 | |
2327 | if (getContext().getLangOpts().getClangABICompat() <= |
2328 | LangOptions::ClangABI::Ver9) |
2329 | return false; |
2330 | |
2331 | const llvm::Triple &T = getTarget().getTriple(); |
2332 | return T.isOSLinux() || T.isOSNetBSD(); |
2333 | } |
2334 | |
2335 | X86AVXABILevel AVXLevel; |
2336 | |
2337 | |
2338 | bool Has64BitPointers; |
2339 | |
2340 | public: |
2341 | X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : |
2342 | SwiftABIInfo(CGT), AVXLevel(AVXLevel), |
2343 | Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { |
2344 | } |
2345 | |
2346 | bool isPassedUsingAVXType(QualType type) const { |
2347 | unsigned neededInt, neededSSE; |
2348 | |
2349 | ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, |
2350 | true); |
2351 | if (info.isDirect()) { |
2352 | llvm::Type *ty = info.getCoerceToType(); |
2353 | if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) |
2354 | return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128; |
2355 | } |
2356 | return false; |
2357 | } |
2358 | |
2359 | void computeInfo(CGFunctionInfo &FI) const override; |
2360 | |
2361 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
2362 | QualType Ty) const override; |
2363 | Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
2364 | QualType Ty) const override; |
2365 | |
2366 | bool has64BitPointers() const { |
2367 | return Has64BitPointers; |
2368 | } |
2369 | |
2370 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
2371 | bool asReturnValue) const override { |
2372 | return occupiesMoreThan(CGT, scalars, 4); |
2373 | } |
2374 | bool isSwiftErrorInRegister() const override { |
2375 | return true; |
2376 | } |
2377 | }; |
2378 | |
2379 | |
2380 | class WinX86_64ABIInfo : public SwiftABIInfo { |
2381 | public: |
2382 | WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) |
2383 | : SwiftABIInfo(CGT), AVXLevel(AVXLevel), |
2384 | IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} |
2385 | |
2386 | void computeInfo(CGFunctionInfo &FI) const override; |
2387 | |
2388 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
2389 | QualType Ty) const override; |
2390 | |
2391 | bool isHomogeneousAggregateBaseType(QualType Ty) const override { |
2392 | |
2393 | return isX86VectorTypeForVectorCall(getContext(), Ty); |
2394 | } |
2395 | |
2396 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
2397 | uint64_t NumMembers) const override { |
2398 | |
2399 | return isX86VectorCallAggregateSmallEnough(NumMembers); |
2400 | } |
2401 | |
2402 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars, |
2403 | bool asReturnValue) const override { |
2404 | return occupiesMoreThan(CGT, scalars, 4); |
2405 | } |
2406 | |
2407 | bool isSwiftErrorInRegister() const override { |
2408 | return true; |
2409 | } |
2410 | |
2411 | private: |
2412 | ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, |
2413 | bool IsVectorCall, bool IsRegCall) const; |
2414 | ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs, |
2415 | const ABIArgInfo ¤t) const; |
2416 | |
2417 | X86AVXABILevel AVXLevel; |
2418 | |
2419 | bool IsMingw64; |
2420 | }; |
2421 | |
2422 | class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { |
2423 | public: |
2424 | X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) |
2425 | : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {} |
2426 | |
2427 | const X86_64ABIInfo &getABIInfo() const { |
2428 | return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); |
2429 | } |
2430 | |
2431 | |
2432 | |
2433 | bool markARCOptimizedReturnCallsAsNoTail() const override { return true; } |
2434 | |
2435 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { |
2436 | return 7; |
2437 | } |
2438 | |
2439 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
2440 | llvm::Value *Address) const override { |
2441 | llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); |
2442 | |
2443 | |
2444 | |
2445 | AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); |
2446 | return false; |
2447 | } |
2448 | |
2449 | llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, |
2450 | StringRef Constraint, |
2451 | llvm::Type* Ty) const override { |
2452 | return X86AdjustInlineAsmType(CGF, Constraint, Ty); |
2453 | } |
2454 | |
2455 | bool isNoProtoCallVariadic(const CallArgList &args, |
2456 | const FunctionNoProtoType *fnType) const override { |
2457 | |
2458 | |
2459 | |
2460 | |
2461 | |
2462 | |
2463 | if (fnType->getCallConv() == CC_C) { |
2464 | bool HasAVXType = false; |
2465 | for (CallArgList::const_iterator |
2466 | it = args.begin(), ie = args.end(); it != ie; ++it) { |
2467 | if (getABIInfo().isPassedUsingAVXType(it->Ty)) { |
2468 | HasAVXType = true; |
2469 | break; |
2470 | } |
2471 | } |
2472 | |
2473 | if (!HasAVXType) |
2474 | return true; |
2475 | } |
2476 | |
2477 | return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); |
2478 | } |
2479 | |
2480 | llvm::Constant * |
2481 | getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { |
2482 | unsigned Sig = (0xeb << 0) | |
2483 | (0x06 << 8) | |
2484 | ('v' << 16) | |
2485 | ('2' << 24); |
2486 | return llvm::ConstantInt::get(CGM.Int32Ty, Sig); |
2487 | } |
2488 | |
2489 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
2490 | CodeGen::CodeGenModule &CGM) const override { |
2491 | if (GV->isDeclaration()) |
2492 | return; |
2493 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
2494 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { |
2495 | llvm::Function *Fn = cast<llvm::Function>(GV); |
2496 | Fn->addFnAttr("stackrealign"); |
2497 | } |
2498 | |
2499 | addX86InterruptAttrs(FD, GV, CGM); |
2500 | } |
2501 | } |
2502 | |
2503 | void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, |
2504 | const FunctionDecl *Caller, |
2505 | const FunctionDecl *Callee, |
2506 | const CallArgList &Args) const override; |
2507 | }; |
2508 | |
2509 | static void initFeatureMaps(const ASTContext &Ctx, |
2510 | llvm::StringMap<bool> &CallerMap, |
2511 | const FunctionDecl *Caller, |
2512 | llvm::StringMap<bool> &CalleeMap, |
2513 | const FunctionDecl *Callee) { |
2514 | if (CalleeMap.empty() && CallerMap.empty()) { |
2515 | |
2516 | |
2517 | |
2518 | Ctx.getFunctionFeatureMap(CallerMap, Caller); |
2519 | Ctx.getFunctionFeatureMap(CalleeMap, Callee); |
2520 | } |
2521 | } |
2522 | |
2523 | static bool checkAVXParamFeature(DiagnosticsEngine &Diag, |
2524 | SourceLocation CallLoc, |
2525 | const llvm::StringMap<bool> &CallerMap, |
2526 | const llvm::StringMap<bool> &CalleeMap, |
2527 | QualType Ty, StringRef Feature, |
2528 | bool IsArgument) { |
2529 | bool CallerHasFeat = CallerMap.lookup(Feature); |
2530 | bool CalleeHasFeat = CalleeMap.lookup(Feature); |
2531 | if (!CallerHasFeat && !CalleeHasFeat) |
2532 | return Diag.Report(CallLoc, diag::warn_avx_calling_convention) |
2533 | << IsArgument << Ty << Feature; |
2534 | |
2535 | |
2536 | if (!CallerHasFeat || !CalleeHasFeat) |
2537 | return Diag.Report(CallLoc, diag::err_avx_calling_convention) |
2538 | << IsArgument << Ty << Feature; |
2539 | |
2540 | |
2541 | |
2542 | return false; |
2543 | } |
2544 | |
2545 | static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, |
2546 | SourceLocation CallLoc, |
2547 | const llvm::StringMap<bool> &CallerMap, |
2548 | const llvm::StringMap<bool> &CalleeMap, QualType Ty, |
2549 | bool IsArgument) { |
2550 | uint64_t Size = Ctx.getTypeSize(Ty); |
2551 | if (Size > 256) |
2552 | return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, |
2553 | "avx512f", IsArgument); |
2554 | |
2555 | if (Size > 128) |
2556 | return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx", |
2557 | IsArgument); |
2558 | |
2559 | return false; |
2560 | } |
2561 | |
2562 | void X86_64TargetCodeGenInfo::checkFunctionCallABI( |
2563 | CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, |
2564 | const FunctionDecl *Callee, const CallArgList &Args) const { |
2565 | llvm::StringMap<bool> CallerMap; |
2566 | llvm::StringMap<bool> CalleeMap; |
2567 | unsigned ArgIndex = 0; |
2568 | |
2569 | |
2570 | |
2571 | for (const CallArg &Arg : Args) { |
2572 | |
2573 | |
2574 | |
2575 | |
2576 | |
2577 | |
2578 | |
2579 | if (Arg.getType()->isVectorType() && |
2580 | CGM.getContext().getTypeSize(Arg.getType()) > 128) { |
2581 | initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); |
2582 | QualType Ty = Arg.getType(); |
2583 | |
2584 | |
2585 | if (ArgIndex < Callee->getNumParams()) |
2586 | Ty = Callee->getParamDecl(ArgIndex)->getType(); |
2587 | |
2588 | if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, |
2589 | CalleeMap, Ty, true)) |
2590 | return; |
2591 | } |
2592 | ++ArgIndex; |
2593 | } |
2594 | |
2595 | |
2596 | |
2597 | if (Callee->getReturnType()->isVectorType() && |
2598 | CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) { |
2599 | initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); |
2600 | checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, |
2601 | CalleeMap, Callee->getReturnType(), |
2602 | false); |
2603 | } |
2604 | } |
2605 | |
2606 | static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { |
2607 | |
2608 | |
2609 | |
2610 | bool Quote = (Lib.find(' ') != StringRef::npos); |
2611 | std::string ArgStr = Quote ? "\"" : ""; |
2612 | ArgStr += Lib; |
2613 | if (!Lib.endswith_insensitive(".lib") && !Lib.endswith_insensitive(".a")) |
2614 | ArgStr += ".lib"; |
2615 | ArgStr += Quote ? "\"" : ""; |
2616 | return ArgStr; |
2617 | } |
2618 | |
2619 | class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { |
2620 | public: |
2621 | WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, |
2622 | bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, |
2623 | unsigned NumRegisterParameters) |
2624 | : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, |
2625 | Win32StructABI, NumRegisterParameters, false) {} |
2626 | |
2627 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
2628 | CodeGen::CodeGenModule &CGM) const override; |
2629 | |
2630 | void getDependentLibraryOption(llvm::StringRef Lib, |
2631 | llvm::SmallString<24> &Opt) const override { |
2632 | Opt = "/DEFAULTLIB:"; |
2633 | Opt += qualifyWindowsLibrary(Lib); |
2634 | } |
2635 | |
2636 | void getDetectMismatchOption(llvm::StringRef Name, |
2637 | llvm::StringRef Value, |
2638 | llvm::SmallString<32> &Opt) const override { |
2639 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; |
2640 | } |
2641 | }; |
2642 | |
2643 | static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
2644 | CodeGen::CodeGenModule &CGM) { |
2645 | if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) { |
2646 | |
2647 | if (CGM.getCodeGenOpts().StackProbeSize != 4096) |
2648 | Fn->addFnAttr("stack-probe-size", |
2649 | llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); |
2650 | if (CGM.getCodeGenOpts().NoStackArgProbe) |
2651 | Fn->addFnAttr("no-stack-arg-probe"); |
2652 | } |
2653 | } |
2654 | |
2655 | void WinX86_32TargetCodeGenInfo::setTargetAttributes( |
2656 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
2657 | X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
2658 | if (GV->isDeclaration()) |
2659 | return; |
2660 | addStackProbeTargetAttributes(D, GV, CGM); |
2661 | } |
2662 | |
2663 | class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { |
2664 | public: |
2665 | WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, |
2666 | X86AVXABILevel AVXLevel) |
2667 | : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {} |
2668 | |
2669 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
2670 | CodeGen::CodeGenModule &CGM) const override; |
2671 | |
2672 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { |
2673 | return 7; |
2674 | } |
2675 | |
2676 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
2677 | llvm::Value *Address) const override { |
2678 | llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); |
2679 | |
2680 | |
2681 | |
2682 | AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); |
2683 | return false; |
2684 | } |
2685 | |
2686 | void getDependentLibraryOption(llvm::StringRef Lib, |
2687 | llvm::SmallString<24> &Opt) const override { |
2688 | Opt = "/DEFAULTLIB:"; |
2689 | Opt += qualifyWindowsLibrary(Lib); |
2690 | } |
2691 | |
2692 | void getDetectMismatchOption(llvm::StringRef Name, |
2693 | llvm::StringRef Value, |
2694 | llvm::SmallString<32> &Opt) const override { |
2695 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; |
2696 | } |
2697 | }; |
2698 | |
2699 | void WinX86_64TargetCodeGenInfo::setTargetAttributes( |
2700 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
2701 | TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
2702 | if (GV->isDeclaration()) |
2703 | return; |
2704 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { |
2705 | if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { |
2706 | llvm::Function *Fn = cast<llvm::Function>(GV); |
2707 | Fn->addFnAttr("stackrealign"); |
2708 | } |
2709 | |
2710 | addX86InterruptAttrs(FD, GV, CGM); |
2711 | } |
2712 | |
2713 | addStackProbeTargetAttributes(D, GV, CGM); |
2714 | } |
2715 | } |
2716 | |
2717 | void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, |
2718 | Class &Hi) const { |
2719 | |
2720 | |
2721 | |
2722 | |
2723 | |
2724 | |
2725 | |
2726 | |
2727 | |
2728 | |
2729 | |
2730 | |
2731 | |
2732 | |
2733 | |
2734 | |
2735 | |
2736 | |
2737 | |
2738 | |
2739 | |
2740 | if (Hi == Memory) |
2741 | Lo = Memory; |
2742 | if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) |
2743 | Lo = Memory; |
2744 | if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) |
2745 | Lo = Memory; |
2746 | if (Hi == SSEUp && Lo != SSE) |
2747 | Hi = SSE; |
2748 | } |
2749 | |
2750 | X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { |
2751 | |
2752 | |
2753 | |
2754 | |
2755 | |
2756 | |
2757 | |
2758 | |
2759 | |
2760 | |
2761 | |
2762 | |
2763 | |
2764 | |
2765 | |
2766 | |
2767 | |
2768 | |
2769 | |
2770 | |
2771 | |
2772 | |
2773 | |
2774 | assert((Accum != Memory && Accum != ComplexX87) && |
2775 | "Invalid accumulated classification during merge."); |
2776 | if (Accum == Field || Field == NoClass) |
2777 | return Accum; |
2778 | if (Field == Memory) |
2779 | return Memory; |
2780 | if (Accum == NoClass) |
2781 | return Field; |
2782 | if (Accum == Integer || Field == Integer) |
2783 | return Integer; |
2784 | if (Field == X87 || Field == X87Up || Field == ComplexX87 || |
2785 | Accum == X87 || Accum == X87Up) |
2786 | return Memory; |
2787 | return SSE; |
2788 | } |
2789 | |
2790 | void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, |
2791 | Class &Lo, Class &Hi, bool isNamedArg) const { |
2792 | |
2793 | |
2794 | |
2795 | |
2796 | |
2797 | |
2798 | |
2799 | |
2800 | Lo = Hi = NoClass; |
2801 | |
2802 | Class &Current = OffsetBase < 64 ? Lo : Hi; |
2803 | Current = Memory; |
2804 | |
2805 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
2806 | BuiltinType::Kind k = BT->getKind(); |
2807 | |
2808 | if (k == BuiltinType::Void) { |
2809 | Current = NoClass; |
2810 | } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { |
2811 | Lo = Integer; |
2812 | Hi = Integer; |
2813 | } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { |
2814 | Current = Integer; |
2815 | } else if (k == BuiltinType::Float || k == BuiltinType::Double) { |
2816 | Current = SSE; |
2817 | } else if (k == BuiltinType::LongDouble) { |
2818 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); |
2819 | if (LDF == &llvm::APFloat::IEEEquad()) { |
2820 | Lo = SSE; |
2821 | Hi = SSEUp; |
2822 | } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { |
2823 | Lo = X87; |
2824 | Hi = X87Up; |
2825 | } else if (LDF == &llvm::APFloat::IEEEdouble()) { |
2826 | Current = SSE; |
2827 | } else |
2828 | llvm_unreachable("unexpected long double representation!"); |
2829 | } |
2830 | |
2831 | |
2832 | return; |
2833 | } |
2834 | |
2835 | if (const EnumType *ET = Ty->getAs<EnumType>()) { |
2836 | |
2837 | classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); |
2838 | return; |
2839 | } |
2840 | |
2841 | if (Ty->hasPointerRepresentation()) { |
2842 | Current = Integer; |
2843 | return; |
2844 | } |
2845 | |
2846 | if (Ty->isMemberPointerType()) { |
2847 | if (Ty->isMemberFunctionPointerType()) { |
2848 | if (Has64BitPointers) { |
2849 | |
2850 | |
2851 | Lo = Hi = Integer; |
2852 | } else { |
2853 | |
2854 | |
2855 | uint64_t EB_FuncPtr = (OffsetBase) / 64; |
2856 | uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; |
2857 | if (EB_FuncPtr != EB_ThisAdj) { |
2858 | Lo = Hi = Integer; |
2859 | } else { |
2860 | Current = Integer; |
2861 | } |
2862 | } |
2863 | } else { |
2864 | Current = Integer; |
2865 | } |
2866 | return; |
2867 | } |
2868 | |
2869 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
2870 | uint64_t Size = getContext().getTypeSize(VT); |
2871 | if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { |
2872 | |
2873 | |
2874 | |
2875 | |
2876 | Current = Integer; |
2877 | |
2878 | |
2879 | |
2880 | uint64_t EB_Lo = (OffsetBase) / 64; |
2881 | uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; |
2882 | if (EB_Lo != EB_Hi) |
2883 | Hi = Lo; |
2884 | } else if (Size == 64) { |
2885 | QualType ElementType = VT->getElementType(); |
2886 | |
2887 | |
2888 | if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) |
2889 | return; |
2890 | |
2891 | |
2892 | |
2893 | |
2894 | if (!classifyIntegerMMXAsSSE() && |
2895 | (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || |
2896 | ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || |
2897 | ElementType->isSpecificBuiltinType(BuiltinType::Long) || |
2898 | ElementType->isSpecificBuiltinType(BuiltinType::ULong))) |
2899 | Current = Integer; |
2900 | else |
2901 | Current = SSE; |
2902 | |
2903 | |
2904 | |
2905 | if (OffsetBase && OffsetBase != 64) |
2906 | Hi = Lo; |
2907 | } else if (Size == 128 || |
2908 | (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { |
2909 | QualType ElementType = VT->getElementType(); |
2910 | |
2911 | |
2912 | if (passInt128VectorsInMem() && Size != 128 && |
2913 | (ElementType->isSpecificBuiltinType(BuiltinType::Int128) || |
2914 | ElementType->isSpecificBuiltinType(BuiltinType::UInt128))) |
2915 | return; |
2916 | |
2917 | |
2918 | |
2919 | |
2920 | |
2921 | |
2922 | |
2923 | |
2924 | |
2925 | |
2926 | |
2927 | |
2928 | |
2929 | |
2930 | |
2931 | Lo = SSE; |
2932 | Hi = SSEUp; |
2933 | } |
2934 | return; |
2935 | } |
2936 | |
2937 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
2938 | QualType ET = getContext().getCanonicalType(CT->getElementType()); |
2939 | |
2940 | uint64_t Size = getContext().getTypeSize(Ty); |
2941 | if (ET->isIntegralOrEnumerationType()) { |
2942 | if (Size <= 64) |
2943 | Current = Integer; |
2944 | else if (Size <= 128) |
2945 | Lo = Hi = Integer; |
2946 | } else if (ET == getContext().FloatTy) { |
2947 | Current = SSE; |
2948 | } else if (ET == getContext().DoubleTy) { |
2949 | Lo = Hi = SSE; |
2950 | } else if (ET == getContext().LongDoubleTy) { |
2951 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); |
2952 | if (LDF == &llvm::APFloat::IEEEquad()) |
2953 | Current = Memory; |
2954 | else if (LDF == &llvm::APFloat::x87DoubleExtended()) |
2955 | Current = ComplexX87; |
2956 | else if (LDF == &llvm::APFloat::IEEEdouble()) |
2957 | Lo = Hi = SSE; |
2958 | else |
2959 | llvm_unreachable("unexpected long double representation!"); |
2960 | } |
2961 | |
2962 | |
2963 | |
2964 | uint64_t EB_Real = (OffsetBase) / 64; |
2965 | uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; |
2966 | if (Hi == NoClass && EB_Real != EB_Imag) |
2967 | Hi = Lo; |
2968 | |
2969 | return; |
2970 | } |
2971 | |
2972 | if (const auto *EITy = Ty->getAs<ExtIntType>()) { |
2973 | if (EITy->getNumBits() <= 64) |
2974 | Current = Integer; |
2975 | else if (EITy->getNumBits() <= 128) |
2976 | Lo = Hi = Integer; |
2977 | |
2978 | return; |
2979 | } |
2980 | |
2981 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { |
2982 | |
2983 | |
2984 | uint64_t Size = getContext().getTypeSize(Ty); |
2985 | |
2986 | |
2987 | |
2988 | if (Size > 512) |
2989 | return; |
2990 | |
2991 | |
2992 | |
2993 | |
2994 | |
2995 | if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) |
2996 | return; |
2997 | |
2998 | |
2999 | |
3000 | Current = NoClass; |
3001 | uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); |
3002 | uint64_t ArraySize = AT->getSize().getZExtValue(); |
3003 | |
3004 | |
3005 | |
3006 | |
3007 | |
3008 | if (Size > 128 && |
3009 | (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) |
3010 | return; |
3011 | |
3012 | for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { |
3013 | Class FieldLo, FieldHi; |
3014 | classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); |
3015 | Lo = merge(Lo, FieldLo); |
3016 | Hi = merge(Hi, FieldHi); |
3017 | if (Lo == Memory || Hi == Memory) |
3018 | break; |
3019 | } |
3020 | |
3021 | postMerge(Size, Lo, Hi); |
3022 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); |
3023 | return; |
3024 | } |
3025 | |
3026 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
3027 | uint64_t Size = getContext().getTypeSize(Ty); |
3028 | |
3029 | |
3030 | |
3031 | if (Size > 512) |
3032 | return; |
3033 | |
3034 | |
3035 | |
3036 | |
3037 | if (getRecordArgABI(RT, getCXXABI())) |
3038 | return; |
3039 | |
3040 | const RecordDecl *RD = RT->getDecl(); |
3041 | |
3042 | |
3043 | if (RD->hasFlexibleArrayMember()) |
3044 | return; |
3045 | |
3046 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); |
3047 | |
3048 | |
3049 | Current = NoClass; |
3050 | |
3051 | |
3052 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
3053 | for (const auto &I : CXXRD->bases()) { |
3054 | assert(!I.isVirtual() && !I.getType()->isDependentType() && |
3055 | "Unexpected base class!"); |
3056 | const auto *Base = |
3057 | cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); |
3058 | |
3059 | |
3060 | |
3061 | |
3062 | |
3063 | |
3064 | Class FieldLo, FieldHi; |
3065 | uint64_t Offset = |
3066 | OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); |
3067 | classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); |
3068 | Lo = merge(Lo, FieldLo); |
3069 | Hi = merge(Hi, FieldHi); |
3070 | if (Lo == Memory || Hi == Memory) { |
3071 | postMerge(Size, Lo, Hi); |
3072 | return; |
3073 | } |
3074 | } |
3075 | } |
3076 | |
3077 | |
3078 | unsigned idx = 0; |
3079 | bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <= |
3080 | LangOptions::ClangABI::Ver11 || |
3081 | getContext().getTargetInfo().getTriple().isPS4(); |
3082 | bool IsUnion = RT->isUnionType() && !UseClang11Compat; |
3083 | |
3084 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); |
3085 | i != e; ++i, ++idx) { |
3086 | uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); |
3087 | bool BitField = i->isBitField(); |
3088 | |
3089 | |
3090 | if (BitField && i->isUnnamedBitfield()) |
3091 | continue; |
3092 | |
3093 | |
3094 | |
3095 | |
3096 | |
3097 | |
3098 | |
3099 | |
3100 | |
3101 | |
3102 | if (Size > 128 && |
3103 | ((!IsUnion && Size != getContext().getTypeSize(i->getType())) || |
3104 | Size > getNativeVectorSizeForAVXABI(AVXLevel))) { |
3105 | Lo = Memory; |
3106 | postMerge(Size, Lo, Hi); |
3107 | return; |
3108 | } |
3109 | |
3110 | if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { |
3111 | Lo = Memory; |
3112 | postMerge(Size, Lo, Hi); |
3113 | return; |
3114 | } |
3115 | |
3116 | |
3117 | |
3118 | |
3119 | |
3120 | |
3121 | |
3122 | Class FieldLo, FieldHi; |
3123 | |
3124 | |
3125 | |
3126 | |
3127 | if (BitField) { |
3128 | assert(!i->isUnnamedBitfield()); |
3129 | uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); |
3130 | uint64_t Size = i->getBitWidthValue(getContext()); |
3131 | |
3132 | uint64_t EB_Lo = Offset / 64; |
3133 | uint64_t EB_Hi = (Offset + Size - 1) / 64; |
3134 | |
3135 | if (EB_Lo) { |
3136 | assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); |
3137 | FieldLo = NoClass; |
3138 | FieldHi = Integer; |
3139 | } else { |
3140 | FieldLo = Integer; |
3141 | FieldHi = EB_Hi ? Integer : NoClass; |
3142 | } |
3143 | } else |
3144 | classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); |
3145 | Lo = merge(Lo, FieldLo); |
3146 | Hi = merge(Hi, FieldHi); |
3147 | if (Lo == Memory || Hi == Memory) |
3148 | break; |
3149 | } |
3150 | |
3151 | postMerge(Size, Lo, Hi); |
3152 | } |
3153 | } |
3154 | |
3155 | ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { |
3156 | |
3157 | |
3158 | if (!isAggregateTypeForABI(Ty)) { |
3159 | |
3160 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
3161 | Ty = EnumTy->getDecl()->getIntegerType(); |
3162 | |
3163 | if (Ty->isExtIntType()) |
3164 | return getNaturalAlignIndirect(Ty); |
3165 | |
3166 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
3167 | : ABIArgInfo::getDirect()); |
3168 | } |
3169 | |
3170 | return getNaturalAlignIndirect(Ty); |
3171 | } |
3172 | |
3173 | bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { |
3174 | if (const VectorType *VecTy = Ty->getAs<VectorType>()) { |
3175 | uint64_t Size = getContext().getTypeSize(VecTy); |
3176 | unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); |
3177 | if (Size <= 64 || Size > LargestVector) |
3178 | return true; |
3179 | QualType EltTy = VecTy->getElementType(); |
3180 | if (passInt128VectorsInMem() && |
3181 | (EltTy->isSpecificBuiltinType(BuiltinType::Int128) || |
3182 | EltTy->isSpecificBuiltinType(BuiltinType::UInt128))) |
3183 | return true; |
3184 | } |
3185 | |
3186 | return false; |
3187 | } |
3188 | |
3189 | ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, |
3190 | unsigned freeIntRegs) const { |
3191 | |
3192 | |
3193 | |
3194 | |
3195 | |
3196 | |
3197 | |
3198 | |
3199 | if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) && |
3200 | !Ty->isExtIntType()) { |
3201 | |
3202 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
3203 | Ty = EnumTy->getDecl()->getIntegerType(); |
3204 | |
3205 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
3206 | : ABIArgInfo::getDirect()); |
3207 | } |
3208 | |
3209 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
3210 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
3211 | |
3212 | |
3213 | |
3214 | unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); |
3215 | |
3216 | |
3217 | |
3218 | |
3219 | |
3220 | |
3221 | |
3222 | |
3223 | |
3224 | |
3225 | |
3226 | |
3227 | |
3228 | |
3229 | |
3230 | |
3231 | |
3232 | |
3233 | |
3234 | |
3235 | |
3236 | |
3237 | if (freeIntRegs == 0) { |
3238 | uint64_t Size = getContext().getTypeSize(Ty); |
3239 | |
3240 | |
3241 | |
3242 | if (Align == 8 && Size <= 64) |
3243 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), |
3244 | Size)); |
3245 | } |
3246 | |
3247 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); |
3248 | } |
3249 | |
3250 | |
3251 | |
3252 | llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { |
3253 | |
3254 | |
3255 | if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) |
3256 | Ty = QualType(InnerTy, 0); |
3257 | |
3258 | llvm::Type *IRType = CGT.ConvertType(Ty); |
3259 | if (isa<llvm::VectorType>(IRType)) { |
3260 | |
3261 | |
3262 | if (passInt128VectorsInMem() && |
3263 | cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) { |
3264 | |
3265 | uint64_t Size = getContext().getTypeSize(Ty); |
3266 | return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()), |
3267 | Size / 64); |
3268 | } |
3269 | |
3270 | return IRType; |
3271 | } |
3272 | |
3273 | if (IRType->getTypeID() == llvm::Type::FP128TyID) |
3274 | return IRType; |
3275 | |
3276 | |
3277 | uint64_t Size = getContext().getTypeSize(Ty); |
3278 | assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); |
3279 | |
3280 | |
3281 | |
3282 | return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()), |
3283 | Size / 64); |
3284 | } |
3285 | |
3286 | |
3287 | |
3288 | |
3289 | |
3290 | |
3291 | |
3292 | |
3293 | static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, |
3294 | unsigned EndBit, ASTContext &Context) { |
3295 | |
3296 | |
3297 | |
3298 | unsigned TySize = (unsigned)Context.getTypeSize(Ty); |
3299 | if (TySize <= StartBit) |
3300 | return true; |
3301 | |
3302 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { |
3303 | unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); |
3304 | unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); |
3305 | |
3306 | |
3307 | for (unsigned i = 0; i != NumElts; ++i) { |
3308 | |
3309 | unsigned EltOffset = i*EltSize; |
3310 | if (EltOffset >= EndBit) break; |
3311 | |
3312 | unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; |
3313 | if (!BitsContainNoUserData(AT->getElementType(), EltStart, |
3314 | EndBit-EltOffset, Context)) |
3315 | return false; |
3316 | } |
3317 | |
3318 | return true; |
3319 | } |
3320 | |
3321 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
3322 | const RecordDecl *RD = RT->getDecl(); |
3323 | const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); |
3324 | |
3325 | |
3326 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
3327 | for (const auto &I : CXXRD->bases()) { |
3328 | assert(!I.isVirtual() && !I.getType()->isDependentType() && |
3329 | "Unexpected base class!"); |
3330 | const auto *Base = |
3331 | cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); |
3332 | |
3333 | |
3334 | unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); |
3335 | if (BaseOffset >= EndBit) continue; |
3336 | |
3337 | unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; |
3338 | if (!BitsContainNoUserData(I.getType(), BaseStart, |
3339 | EndBit-BaseOffset, Context)) |
3340 | return false; |
3341 | } |
3342 | } |
3343 | |
3344 | |
3345 | |
3346 | |
3347 | |
3348 | unsigned idx = 0; |
3349 | for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); |
3350 | i != e; ++i, ++idx) { |
3351 | unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); |
3352 | |
3353 | |
3354 | if (FieldOffset >= EndBit) break; |
3355 | |
3356 | unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; |
3357 | if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, |
3358 | Context)) |
3359 | return false; |
3360 | } |
3361 | |
3362 | |
3363 | |
3364 | return true; |
3365 | } |
3366 | |
3367 | return false; |
3368 | } |
3369 | |
3370 | |
3371 | |
3372 | |
3373 | |
3374 | static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, |
3375 | const llvm::DataLayout &TD) { |
3376 | |
3377 | if (IROffset == 0 && IRType->isFloatTy()) |
3378 | return true; |
3379 | |
3380 | |
3381 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { |
3382 | const llvm::StructLayout *SL = TD.getStructLayout(STy); |
3383 | unsigned Elt = SL->getElementContainingOffset(IROffset); |
3384 | IROffset -= SL->getElementOffset(Elt); |
3385 | return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); |
3386 | } |
3387 | |
3388 | |
3389 | if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { |
3390 | llvm::Type *EltTy = ATy->getElementType(); |
3391 | unsigned EltSize = TD.getTypeAllocSize(EltTy); |
3392 | IROffset -= IROffset/EltSize*EltSize; |
3393 | return ContainsFloatAtOffset(EltTy, IROffset, TD); |
3394 | } |
3395 | |
3396 | return false; |
3397 | } |
3398 | |
3399 | |
3400 | |
3401 | |
3402 | llvm::Type *X86_64ABIInfo:: |
3403 | GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, |
3404 | QualType SourceTy, unsigned SourceOffset) const { |
3405 | |
3406 | |
3407 | |
3408 | if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, |
3409 | SourceOffset*8+64, getContext())) |
3410 | return llvm::Type::getFloatTy(getVMContext()); |
3411 | |
3412 | |
3413 | |
3414 | |
3415 | if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && |
3416 | ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) |
3417 | return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()), |
3418 | 2); |
3419 | |
3420 | return llvm::Type::getDoubleTy(getVMContext()); |
3421 | } |
3422 | |
3423 | |
3424 | |
3425 | |
3426 | |
3427 | |
3428 | |
3429 | |
3430 | |
3431 | |
3432 | |
3433 | |
3434 | |
3435 | |
3436 | |
3437 | |
3438 | llvm::Type *X86_64ABIInfo:: |
3439 | GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, |
3440 | QualType SourceTy, unsigned SourceOffset) const { |
3441 | |
3442 | |
3443 | if (IROffset == 0) { |
3444 | |
3445 | if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || |
3446 | IRType->isIntegerTy(64)) |
3447 | return IRType; |
3448 | |
3449 | |
3450 | |
3451 | |
3452 | |
3453 | |
3454 | |
3455 | if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || |
3456 | IRType->isIntegerTy(32) || |
3457 | (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { |
3458 | unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : |
3459 | cast<llvm::IntegerType>(IRType)->getBitWidth(); |
3460 | |
3461 | if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, |
3462 | SourceOffset*8+64, getContext())) |
3463 | return IRType; |
3464 | } |
3465 | } |
3466 | |
3467 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { |
3468 | |
3469 | const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); |
3470 | if (IROffset < SL->getSizeInBytes()) { |
3471 | unsigned FieldIdx = SL->getElementContainingOffset(IROffset); |
3472 | IROffset -= SL->getElementOffset(FieldIdx); |
3473 | |
3474 | return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, |
3475 | SourceTy, SourceOffset); |
3476 | } |
3477 | } |
3478 | |
3479 | if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { |
3480 | llvm::Type *EltTy = ATy->getElementType(); |
3481 | unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); |
3482 | unsigned EltOffset = IROffset/EltSize*EltSize; |
3483 | return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, |
3484 | SourceOffset); |
3485 | } |
3486 | |
3487 | |
3488 | |
3489 | unsigned TySizeInBytes = |
3490 | (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); |
3491 | |
3492 | assert(TySizeInBytes != SourceOffset && "Empty field?"); |
3493 | |
3494 | |
3495 | |
3496 | return llvm::IntegerType::get(getVMContext(), |
3497 | std::min(TySizeInBytes-SourceOffset, 8U)*8); |
3498 | } |
3499 | |
3500 | |
3501 | |
3502 | |
3503 | |
3504 | |
3505 | |
3506 | static llvm::Type * |
3507 | GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, |
3508 | const llvm::DataLayout &TD) { |
3509 | |
3510 | |
3511 | |
3512 | |
3513 | unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); |
3514 | unsigned HiAlign = TD.getABITypeAlignment(Hi); |
3515 | unsigned HiStart = llvm::alignTo(LoSize, HiAlign); |
3516 | assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); |
3517 | |
3518 | |
3519 | |
3520 | |
3521 | |
3522 | if (HiStart != 8) { |
3523 | |
3524 | |
3525 | |
3526 | |
3527 | |
3528 | if (Lo->isFloatTy()) |
3529 | Lo = llvm::Type::getDoubleTy(Lo->getContext()); |
3530 | else { |
3531 | assert((Lo->isIntegerTy() || Lo->isPointerTy()) |
3532 | && "Invalid/unknown lo type"); |
3533 | Lo = llvm::Type::getInt64Ty(Lo->getContext()); |
3534 | } |
3535 | } |
3536 | |
3537 | llvm::StructType *Result = llvm::StructType::get(Lo, Hi); |
3538 | |
3539 | |
3540 | assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && |
3541 | "Invalid x86-64 argument pair!"); |
3542 | return Result; |
3543 | } |
3544 | |
3545 | ABIArgInfo X86_64ABIInfo:: |
3546 | classifyReturnType(QualType RetTy) const { |
3547 | |
3548 | |
3549 | X86_64ABIInfo::Class Lo, Hi; |
3550 | classify(RetTy, 0, Lo, Hi, true); |
3551 | |
3552 | |
3553 | assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); |
3554 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); |
3555 | |
3556 | llvm::Type *ResType = nullptr; |
3557 | switch (Lo) { |
3558 | case NoClass: |
3559 | if (Hi == NoClass) |
3560 | return ABIArgInfo::getIgnore(); |
3561 | |
3562 | |
3563 | assert((Hi == SSE || Hi == Integer || Hi == X87Up) && |
3564 | "Unknown missing lo part"); |
3565 | break; |
3566 | |
3567 | case SSEUp: |
3568 | case X87Up: |
3569 | llvm_unreachable("Invalid classification for lo word."); |
3570 | |
3571 | |
3572 | |
3573 | case Memory: |
3574 | return getIndirectReturnResult(RetTy); |
3575 | |
3576 | |
3577 | |
3578 | case Integer: |
3579 | ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); |
3580 | |
3581 | |
3582 | |
3583 | if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { |
3584 | |
3585 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
3586 | RetTy = EnumTy->getDecl()->getIntegerType(); |
3587 | |
3588 | if (RetTy->isIntegralOrEnumerationType() && |
3589 | isPromotableIntegerTypeForABI(RetTy)) |
3590 | return ABIArgInfo::getExtend(RetTy); |
3591 | } |
3592 | break; |
3593 | |
3594 | |
3595 | |
3596 | case SSE: |
3597 | ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); |
3598 | break; |
3599 | |
3600 | |
3601 | |
3602 | case X87: |
3603 | ResType = llvm::Type::getX86_FP80Ty(getVMContext()); |
3604 | break; |
3605 | |
3606 | |
3607 | |
3608 | |
3609 | case ComplexX87: |
3610 | assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); |
3611 | ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), |
3612 | llvm::Type::getX86_FP80Ty(getVMContext())); |
3613 | break; |
3614 | } |
3615 | |
3616 | llvm::Type *HighPart = nullptr; |
3617 | switch (Hi) { |
3618 | |
3619 | |
3620 | case Memory: |
3621 | case X87: |
3622 | llvm_unreachable("Invalid classification for hi word."); |
3623 | |
3624 | case ComplexX87: |
3625 | case NoClass: |
3626 | break; |
3627 | |
3628 | case Integer: |
3629 | HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); |
3630 | if (Lo == NoClass) |
3631 | return ABIArgInfo::getDirect(HighPart, 8); |
3632 | break; |
3633 | case SSE: |
3634 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); |
3635 | if (Lo == NoClass) |
3636 | return ABIArgInfo::getDirect(HighPart, 8); |
3637 | break; |
3638 | |
3639 | |
3640 | |
3641 | |
3642 | |
3643 | |
3644 | case SSEUp: |
3645 | assert(Lo == SSE && "Unexpected SSEUp classification."); |
3646 | ResType = GetByteVectorType(RetTy); |
3647 | break; |
3648 | |
3649 | |
3650 | |
3651 | case X87Up: |
3652 | |
3653 | |
3654 | |
3655 | |
3656 | if (Lo != X87) { |
3657 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); |
3658 | if (Lo == NoClass) |
3659 | return ABIArgInfo::getDirect(HighPart, 8); |
3660 | } |
3661 | break; |
3662 | } |
3663 | |
3664 | |
3665 | |
3666 | |
3667 | if (HighPart) |
3668 | ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); |
3669 | |
3670 | return ABIArgInfo::getDirect(ResType); |
3671 | } |
3672 | |
3673 | ABIArgInfo X86_64ABIInfo::classifyArgumentType( |
3674 | QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, |
3675 | bool isNamedArg) |
3676 | const |
3677 | { |
3678 | Ty = useFirstFieldIfTransparentUnion(Ty); |
3679 | |
3680 | X86_64ABIInfo::Class Lo, Hi; |
3681 | classify(Ty, 0, Lo, Hi, isNamedArg); |
3682 | |
3683 | |
3684 | |
3685 | assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); |
3686 | assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); |
3687 | |
3688 | neededInt = 0; |
3689 | neededSSE = 0; |
3690 | llvm::Type *ResType = nullptr; |
3691 | switch (Lo) { |
3692 | case NoClass: |
3693 | if (Hi == NoClass) |
3694 | return ABIArgInfo::getIgnore(); |
3695 | |
3696 | |
3697 | assert((Hi == SSE || Hi == Integer || Hi == X87Up) && |
3698 | "Unknown missing lo part"); |
3699 | break; |
3700 | |
3701 | |
3702 | |
3703 | case Memory: |
3704 | |
3705 | |
3706 | |
3707 | case X87: |
3708 | case ComplexX87: |
3709 | if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) |
3710 | ++neededInt; |
3711 | return getIndirectResult(Ty, freeIntRegs); |
3712 | |
3713 | case SSEUp: |
3714 | case X87Up: |
3715 | llvm_unreachable("Invalid classification for lo word."); |
3716 | |
3717 | |
3718 | |
3719 | |
3720 | case Integer: |
3721 | ++neededInt; |
3722 | |
3723 | |
3724 | ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); |
3725 | |
3726 | |
3727 | |
3728 | if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { |
3729 | |
3730 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
3731 | Ty = EnumTy->getDecl()->getIntegerType(); |
3732 | |
3733 | if (Ty->isIntegralOrEnumerationType() && |
3734 | isPromotableIntegerTypeForABI(Ty)) |
3735 | return ABIArgInfo::getExtend(Ty); |
3736 | } |
3737 | |
3738 | break; |
3739 | |
3740 | |
3741 | |
3742 | |
3743 | case SSE: { |
3744 | llvm::Type *IRType = CGT.ConvertType(Ty); |
3745 | ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); |
3746 | ++neededSSE; |
3747 | break; |
3748 | } |
3749 | } |
3750 | |
3751 | llvm::Type *HighPart = nullptr; |
3752 | switch (Hi) { |
3753 | |
3754 | |
3755 | |
3756 | case Memory: |
3757 | case X87: |
3758 | case ComplexX87: |
3759 | llvm_unreachable("Invalid classification for hi word."); |
3760 | |
3761 | case NoClass: break; |
3762 | |
3763 | case Integer: |
3764 | ++neededInt; |
3765 | |
3766 | HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); |
3767 | |
3768 | if (Lo == NoClass) |
3769 | return ABIArgInfo::getDirect(HighPart, 8); |
3770 | break; |
3771 | |
3772 | |
3773 | |
3774 | case X87Up: |
3775 | case SSE: |
3776 | HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); |
3777 | |
3778 | if (Lo == NoClass) |
3779 | return ABIArgInfo::getDirect(HighPart, 8); |
3780 | |
3781 | ++neededSSE; |
3782 | break; |
3783 | |
3784 | |
3785 | |
3786 | |
3787 | case SSEUp: |
3788 | assert(Lo == SSE && "Unexpected SSEUp classification"); |
3789 | ResType = GetByteVectorType(Ty); |
3790 | break; |
3791 | } |
3792 | |
3793 | |
3794 | |
3795 | |
3796 | if (HighPart) |
3797 | ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); |
3798 | |
3799 | return ABIArgInfo::getDirect(ResType); |
3800 | } |
3801 | |
3802 | ABIArgInfo |
3803 | X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, |
3804 | unsigned &NeededSSE) const { |
3805 | auto RT = Ty->getAs<RecordType>(); |
| 12 | | Assuming the object is not a 'RecordType' | |
|
| 13 | | 'RT' initialized to a null pointer value | |
|
3806 | assert(RT && "classifyRegCallStructType only valid with struct types"); |
3807 | |
3808 | if (RT->getDecl()->hasFlexibleArrayMember()) |
| 14 | | Called C++ object pointer is null |
|
3809 | return getIndirectReturnResult(Ty); |
3810 | |
3811 | |
3812 | if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { |
3813 | if (CXXRD->isDynamicClass()) { |
3814 | NeededInt = NeededSSE = 0; |
3815 | return getIndirectReturnResult(Ty); |
3816 | } |
3817 | |
3818 | for (const auto &I : CXXRD->bases()) |
3819 | if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE) |
3820 | .isIndirect()) { |
3821 | NeededInt = NeededSSE = 0; |
3822 | return getIndirectReturnResult(Ty); |
3823 | } |
3824 | } |
3825 | |
3826 | |
3827 | for (const auto *FD : RT->getDecl()->fields()) { |
3828 | if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) { |
3829 | if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE) |
3830 | .isIndirect()) { |
3831 | NeededInt = NeededSSE = 0; |
3832 | return getIndirectReturnResult(Ty); |
3833 | } |
3834 | } else { |
3835 | unsigned LocalNeededInt, LocalNeededSSE; |
3836 | if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt, |
3837 | LocalNeededSSE, true) |
3838 | .isIndirect()) { |
3839 | NeededInt = NeededSSE = 0; |
3840 | return getIndirectReturnResult(Ty); |
3841 | } |
3842 | NeededInt += LocalNeededInt; |
3843 | NeededSSE += LocalNeededSSE; |
3844 | } |
3845 | } |
3846 | |
3847 | return ABIArgInfo::getDirect(); |
3848 | } |
3849 | |
3850 | ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty, |
3851 | unsigned &NeededInt, |
3852 | unsigned &NeededSSE) const { |
3853 | |
3854 | NeededInt = 0; |
3855 | NeededSSE = 0; |
3856 | |
3857 | return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE); |
| 11 | | Calling 'X86_64ABIInfo::classifyRegCallStructTypeImpl' | |
|
3858 | } |
3859 | |
3860 | void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { |
3861 | |
3862 | const unsigned CallingConv = FI.getCallingConvention(); |
3863 | |
3864 | |
3865 | |
3866 | if (CallingConv == llvm::CallingConv::Win64) { |
| 1 | Assuming 'CallingConv' is not equal to Win64 | |
|
| |
3867 | WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel); |
3868 | Win64ABIInfo.computeInfo(FI); |
3869 | return; |
3870 | } |
3871 | |
3872 | bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; |
| 3 | | Assuming 'CallingConv' is equal to X86_RegCall | |
|
3873 | |
3874 | |
3875 | unsigned FreeIntRegs = IsRegCall ? 11 : 6; |
| |
3876 | unsigned FreeSSERegs = IsRegCall ? 16 : 8; |
| |
3877 | unsigned NeededInt, NeededSSE; |
3878 | |
3879 | if (!::classifyReturnType(getCXXABI(), FI, *this)) { |
| 6 | | Assuming the condition is true | |
|
| |
3880 | if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && |
| |
3881 | !FI.getReturnType()->getTypePtr()->isUnionType()) { |
| 8 | | Assuming the condition is true | |
|
3882 | FI.getReturnInfo() = |
3883 | classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE); |
| 10 | | Calling 'X86_64ABIInfo::classifyRegCallStructType' | |
|
3884 | if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { |
3885 | FreeIntRegs -= NeededInt; |
3886 | FreeSSERegs -= NeededSSE; |
3887 | } else { |
3888 | FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); |
3889 | } |
3890 | } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() && |
3891 | getContext().getCanonicalType(FI.getReturnType() |
3892 | ->getAs<ComplexType>() |
3893 | ->getElementType()) == |
3894 | getContext().LongDoubleTy) |
3895 | |
3896 | |
3897 | FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); |
3898 | else |
3899 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
3900 | } |
3901 | |
3902 | |
3903 | |
3904 | if (FI.getReturnInfo().isIndirect()) |
3905 | --FreeIntRegs; |
3906 | |
3907 | |
3908 | if (FI.isChainCall()) |
3909 | ++FreeIntRegs; |
3910 | |
3911 | unsigned NumRequiredArgs = FI.getNumRequiredArgs(); |
3912 | |
3913 | |
3914 | unsigned ArgNo = 0; |
3915 | for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); |
3916 | it != ie; ++it, ++ArgNo) { |
3917 | bool IsNamedArg = ArgNo < NumRequiredArgs; |
3918 | |
3919 | if (IsRegCall && it->type->isStructureOrClassType()) |
3920 | it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); |
3921 | else |
3922 | it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, |
3923 | NeededSSE, IsNamedArg); |
3924 | |
3925 | |
3926 | |
3927 | |
3928 | |
3929 | if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { |
3930 | FreeIntRegs -= NeededInt; |
3931 | FreeSSERegs -= NeededSSE; |
3932 | } else { |
3933 | it->info = getIndirectResult(it->type, FreeIntRegs); |
3934 | } |
3935 | } |
3936 | } |
3937 | |
3938 | static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, |
3939 | Address VAListAddr, QualType Ty) { |
3940 | Address overflow_arg_area_p = |
3941 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); |
3942 | llvm::Value *overflow_arg_area = |
3943 | CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); |
3944 | |
3945 | |
3946 | |
3947 | |
3948 | |
3949 | CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); |
3950 | if (Align > CharUnits::fromQuantity(8)) { |
3951 | overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, |
3952 | Align); |
3953 | } |
3954 | |
3955 | |
3956 | llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); |
3957 | llvm::Value *Res = |
3958 | CGF.Builder.CreateBitCast(overflow_arg_area, |
3959 | llvm::PointerType::getUnqual(LTy)); |
3960 | |
3961 | |
3962 | |
3963 | |
3964 | |
3965 | |
3966 | uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; |
3967 | llvm::Value *Offset = |
3968 | llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); |
3969 | overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area, |
3970 | Offset, "overflow_arg_area.next"); |
3971 | CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); |
3972 | |
3973 | |
3974 | return Address(Res, Align); |
3975 | } |
3976 | |
3977 | Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
3978 | QualType Ty) const { |
3979 | |
3980 | |
3981 | |
3982 | |
3983 | |
3984 | |
3985 | |
3986 | unsigned neededInt, neededSSE; |
3987 | |
3988 | Ty = getContext().getCanonicalType(Ty); |
3989 | ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, |
3990 | false); |
3991 | |
3992 | |
3993 | |
3994 | if (!neededInt && !neededSSE) |
3995 | return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); |
3996 | |
3997 | |
3998 | |
3999 | |
4000 | |
4001 | |
4002 | |
4003 | |
4004 | |
4005 | |
4006 | |
4007 | |
4008 | llvm::Value *InRegs = nullptr; |
4009 | Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); |
4010 | llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; |
4011 | if (neededInt) { |
4012 | gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); |
4013 | gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); |
4014 | InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); |
4015 | InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); |
4016 | } |
4017 | |
4018 | if (neededSSE) { |
4019 | fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); |
4020 | fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); |
4021 | llvm::Value *FitsInFP = |
4022 | llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); |
4023 | FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); |
4024 | InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; |
4025 | } |
4026 | |
4027 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); |
4028 | llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); |
4029 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); |
4030 | CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); |
4031 | |
4032 | |
4033 | |
4034 | CGF.EmitBlock(InRegBlock); |
4035 | |
4036 | |
4037 | |
4038 | |
4039 | |
4040 | |
4041 | |
4042 | |
4043 | |
4044 | |
4045 | |
4046 | llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); |
4047 | llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( |
4048 | CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area"); |
4049 | |
4050 | Address RegAddr = Address::invalid(); |
4051 | if (neededInt && neededSSE) { |
4052 | |
4053 | assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); |
4054 | llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); |
4055 | Address Tmp = CGF.CreateMemTemp(Ty); |
4056 | Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); |
4057 | assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); |
4058 | llvm::Type *TyLo = ST->getElementType(0); |
4059 | llvm::Type *TyHi = ST->getElementType(1); |
4060 | assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && |
4061 | "Unexpected ABI info for mixed regs"); |
4062 | llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); |
4063 | llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); |
4064 | llvm::Value *GPAddr = |
4065 | CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset); |
4066 | llvm::Value *FPAddr = |
4067 | CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset); |
4068 | llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; |
4069 | llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; |
4070 | |
4071 | |
4072 | |
4073 | llvm::Value *V = CGF.Builder.CreateAlignedLoad( |
4074 | TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), |
4075 | CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); |
4076 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); |
4077 | |
4078 | |
4079 | V = CGF.Builder.CreateAlignedLoad( |
4080 | TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), |
4081 | CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); |
4082 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); |
4083 | |
4084 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); |
4085 | } else if (neededInt) { |
4086 | RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset), |
4087 | CharUnits::fromQuantity(8)); |
4088 | RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); |
4089 | |
4090 | |
4091 | auto TInfo = getContext().getTypeInfoInChars(Ty); |
4092 | uint64_t TySize = TInfo.Width.getQuantity(); |
4093 | CharUnits TyAlign = TInfo.Align; |
4094 | |
4095 | |
4096 | |
4097 | if (TyAlign.getQuantity() > 8) { |
4098 | Address Tmp = CGF.CreateMemTemp(Ty); |
4099 | CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); |
4100 | RegAddr = Tmp; |
4101 | } |
4102 | |
4103 | } else if (neededSSE == 1) { |
4104 | RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset), |
4105 | CharUnits::fromQuantity(16)); |
4106 | RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); |
4107 | } else { |
4108 | assert(neededSSE == 2 && "Invalid number of needed registers!"); |
4109 | |
4110 | |
4111 | |
4112 | |
4113 | |
4114 | |
4115 | Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, |
4116 | fp_offset), |
4117 | CharUnits::fromQuantity(16)); |
4118 | Address RegAddrHi = |
4119 | CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, |
4120 | CharUnits::fromQuantity(16)); |
4121 | llvm::Type *ST = AI.canHaveCoerceToType() |
4122 | ? AI.getCoerceToType() |
4123 | : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy); |
4124 | llvm::Value *V; |
4125 | Address Tmp = CGF.CreateMemTemp(Ty); |
4126 | Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); |
4127 | V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( |
4128 | RegAddrLo, ST->getStructElementType(0))); |
4129 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); |
4130 | V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( |
4131 | RegAddrHi, ST->getStructElementType(1))); |
4132 | CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); |
4133 | |
4134 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); |
4135 | } |
4136 | |
4137 | |
4138 | |
4139 | |
4140 | if (neededInt) { |
4141 | llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); |
4142 | CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), |
4143 | gp_offset_p); |
4144 | } |
4145 | if (neededSSE) { |
4146 | llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); |
4147 | CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), |
4148 | fp_offset_p); |
4149 | } |
4150 | CGF.EmitBranch(ContBlock); |
4151 | |
4152 | |
4153 | |
4154 | CGF.EmitBlock(InMemBlock); |
4155 | Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); |
4156 | |
4157 | |
4158 | |
4159 | CGF.EmitBlock(ContBlock); |
4160 | Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, |
4161 | "vaarg.addr"); |
4162 | return ResAddr; |
4163 | } |
4164 | |
4165 | Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4166 | QualType Ty) const { |
4167 | |
4168 | |
4169 | uint64_t Width = getContext().getTypeSize(Ty); |
4170 | bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); |
4171 | |
4172 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, |
4173 | CGF.getContext().getTypeInfoInChars(Ty), |
4174 | CharUnits::fromQuantity(8), |
4175 | false); |
4176 | } |
4177 | |
4178 | ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall( |
4179 | QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo ¤t) const { |
4180 | const Type *Base = nullptr; |
4181 | uint64_t NumElts = 0; |
4182 | |
4183 | if (!Ty->isBuiltinType() && !Ty->isVectorType() && |
4184 | isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { |
4185 | FreeSSERegs -= NumElts; |
4186 | return getDirectX86Hva(); |
4187 | } |
4188 | return current; |
4189 | } |
4190 | |
4191 | ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, |
4192 | bool IsReturnType, bool IsVectorCall, |
4193 | bool IsRegCall) const { |
4194 | |
4195 | if (Ty->isVoidType()) |
4196 | return ABIArgInfo::getIgnore(); |
4197 | |
4198 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
4199 | Ty = EnumTy->getDecl()->getIntegerType(); |
4200 | |
4201 | TypeInfo Info = getContext().getTypeInfo(Ty); |
4202 | uint64_t Width = Info.Width; |
4203 | CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); |
4204 | |
4205 | const RecordType *RT = Ty->getAs<RecordType>(); |
4206 | if (RT) { |
4207 | if (!IsReturnType) { |
4208 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) |
4209 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
4210 | } |
4211 | |
4212 | if (RT->getDecl()->hasFlexibleArrayMember()) |
4213 | return getNaturalAlignIndirect(Ty, false); |
4214 | |
4215 | } |
4216 | |
4217 | const Type *Base = nullptr; |
4218 | uint64_t NumElts = 0; |
4219 | |
4220 | |
4221 | if ((IsVectorCall || IsRegCall) && |
4222 | isHomogeneousAggregate(Ty, Base, NumElts)) { |
4223 | if (IsRegCall) { |
4224 | if (FreeSSERegs >= NumElts) { |
4225 | FreeSSERegs -= NumElts; |
4226 | if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) |
4227 | return ABIArgInfo::getDirect(); |
4228 | return ABIArgInfo::getExpand(); |
4229 | } |
4230 | return ABIArgInfo::getIndirect(Align, false); |
4231 | } else if (IsVectorCall) { |
4232 | if (FreeSSERegs >= NumElts && |
4233 | (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { |
4234 | FreeSSERegs -= NumElts; |
4235 | return ABIArgInfo::getDirect(); |
4236 | } else if (IsReturnType) { |
4237 | return ABIArgInfo::getExpand(); |
4238 | } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { |
4239 | |
4240 | return ABIArgInfo::getIndirect(Align, false); |
4241 | } |
4242 | } |
4243 | } |
4244 | |
4245 | if (Ty->isMemberPointerType()) { |
4246 | |
4247 | |
4248 | llvm::Type *LLTy = CGT.ConvertType(Ty); |
4249 | if (LLTy->isPointerTy() || LLTy->isIntegerTy()) |
4250 | return ABIArgInfo::getDirect(); |
4251 | } |
4252 | |
4253 | if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { |
4254 | |
4255 | |
4256 | if (Width > 64 || !llvm::isPowerOf2_64(Width)) |
4257 | return getNaturalAlignIndirect(Ty, false); |
4258 | |
4259 | |
4260 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); |
4261 | } |
4262 | |
4263 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
4264 | switch (BT->getKind()) { |
4265 | case BuiltinType::Bool: |
4266 | |
4267 | |
4268 | return ABIArgInfo::getExtend(Ty); |
4269 | |
4270 | case BuiltinType::LongDouble: |
4271 | |
4272 | |
4273 | if (IsMingw64) { |
4274 | const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); |
4275 | if (LDF == &llvm::APFloat::x87DoubleExtended()) |
4276 | return ABIArgInfo::getIndirect(Align, false); |
4277 | } |
4278 | break; |
4279 | |
4280 | case BuiltinType::Int128: |
4281 | case BuiltinType::UInt128: |
4282 | |
4283 | |
4284 | |
4285 | if (!IsReturnType) |
4286 | return ABIArgInfo::getIndirect(Align, false); |
4287 | |
4288 | |
4289 | |
4290 | return ABIArgInfo::getDirect(llvm::FixedVectorType::get( |
4291 | llvm::Type::getInt64Ty(getVMContext()), 2)); |
4292 | |
4293 | default: |
4294 | break; |
4295 | } |
4296 | } |
4297 | |
4298 | if (Ty->isExtIntType()) { |
4299 | |
4300 | |
4301 | |
4302 | |
4303 | |
4304 | if (Width <= 64) |
4305 | return ABIArgInfo::getDirect(); |
4306 | return ABIArgInfo::getIndirect(Align, false); |
4307 | } |
4308 | |
4309 | return ABIArgInfo::getDirect(); |
4310 | } |
4311 | |
4312 | void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { |
4313 | const unsigned CC = FI.getCallingConvention(); |
4314 | bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall; |
4315 | bool IsRegCall = CC == llvm::CallingConv::X86_RegCall; |
4316 | |
4317 | |
4318 | |
4319 | if (CC == llvm::CallingConv::X86_64_SysV) { |
4320 | X86_64ABIInfo SysVABIInfo(CGT, AVXLevel); |
4321 | SysVABIInfo.computeInfo(FI); |
4322 | return; |
4323 | } |
4324 | |
4325 | unsigned FreeSSERegs = 0; |
4326 | if (IsVectorCall) { |
4327 | |
4328 | FreeSSERegs = 4; |
4329 | } else if (IsRegCall) { |
4330 | |
4331 | FreeSSERegs = 16; |
4332 | } |
4333 | |
4334 | if (!getCXXABI().classifyReturnType(FI)) |
4335 | FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, |
4336 | IsVectorCall, IsRegCall); |
4337 | |
4338 | if (IsVectorCall) { |
4339 | |
4340 | FreeSSERegs = 6; |
4341 | } else if (IsRegCall) { |
4342 | |
4343 | FreeSSERegs = 16; |
4344 | } |
4345 | |
4346 | unsigned ArgNum = 0; |
4347 | unsigned ZeroSSERegs = 0; |
4348 | for (auto &I : FI.arguments()) { |
4349 | |
4350 | |
4351 | |
4352 | unsigned *MaybeFreeSSERegs = |
4353 | (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs; |
4354 | I.info = |
4355 | classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall); |
4356 | ++ArgNum; |
4357 | } |
4358 | |
4359 | if (IsVectorCall) { |
4360 | |
4361 | |
4362 | for (auto &I : FI.arguments()) |
4363 | I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info); |
4364 | } |
4365 | } |
4366 | |
4367 | Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4368 | QualType Ty) const { |
4369 | |
4370 | |
4371 | uint64_t Width = getContext().getTypeSize(Ty); |
4372 | bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); |
4373 | |
4374 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, |
4375 | CGF.getContext().getTypeInfoInChars(Ty), |
4376 | CharUnits::fromQuantity(8), |
4377 | false); |
4378 | } |
4379 | |
4380 | static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4381 | llvm::Value *Address, bool Is64Bit, |
4382 | bool IsAIX) { |
4383 | |
4384 | |
4385 | |
4386 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
4387 | |
4388 | llvm::IntegerType *i8 = CGF.Int8Ty; |
4389 | llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); |
4390 | llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); |
4391 | llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); |
4392 | |
4393 | |
4394 | AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31); |
4395 | |
4396 | |
4397 | AssignToArrayRange(Builder, Address, Eight8, 32, 63); |
4398 | |
4399 | |
4400 | |
4401 | |
4402 | |
4403 | |
4404 | AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67); |
4405 | |
4406 | |
4407 | |
4408 | |
4409 | AssignToArrayRange(Builder, Address, Four8, 68, 76); |
4410 | |
4411 | |
4412 | AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); |
4413 | |
4414 | |
4415 | |
4416 | AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110); |
4417 | |
4418 | |
4419 | if (IsAIX) |
4420 | return false; |
4421 | |
4422 | |
4423 | |
4424 | |
4425 | AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113); |
4426 | |
4427 | if (!Is64Bit) |
4428 | return false; |
4429 | |
4430 | |
4431 | |
4432 | |
4433 | |
4434 | |
4435 | |
4436 | AssignToArrayRange(Builder, Address, Eight8, 114, 116); |
4437 | |
4438 | return false; |
4439 | } |
4440 | |
4441 | |
4442 | namespace { |
4443 | |
4444 | class AIXABIInfo : public ABIInfo { |
4445 | const bool Is64Bit; |
4446 | const unsigned PtrByteSize; |
4447 | CharUnits getParamTypeAlignment(QualType Ty) const; |
4448 | |
4449 | public: |
4450 | AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) |
4451 | : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {} |
4452 | |
4453 | bool isPromotableTypeForABI(QualType Ty) const; |
4454 | |
4455 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
4456 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
4457 | |
4458 | void computeInfo(CGFunctionInfo &FI) const override { |
4459 | if (!getCXXABI().classifyReturnType(FI)) |
4460 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
4461 | |
4462 | for (auto &I : FI.arguments()) |
4463 | I.info = classifyArgumentType(I.type); |
4464 | } |
4465 | |
4466 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4467 | QualType Ty) const override; |
4468 | }; |
4469 | |
4470 | class AIXTargetCodeGenInfo : public TargetCodeGenInfo { |
4471 | const bool Is64Bit; |
4472 | |
4473 | public: |
4474 | AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) |
4475 | : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)), |
4476 | Is64Bit(Is64Bit) {} |
4477 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
4478 | return 1; |
4479 | } |
4480 | |
4481 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4482 | llvm::Value *Address) const override; |
4483 | }; |
4484 | } |
4485 | |
4486 | |
4487 | |
4488 | bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const { |
4489 | |
4490 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
4491 | Ty = EnumTy->getDecl()->getIntegerType(); |
4492 | |
4493 | |
4494 | if (Ty->isPromotableIntegerType()) |
4495 | return true; |
4496 | |
4497 | if (!Is64Bit) |
4498 | return false; |
4499 | |
4500 | |
4501 | |
4502 | |
4503 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) |
4504 | switch (BT->getKind()) { |
4505 | case BuiltinType::Int: |
4506 | case BuiltinType::UInt: |
4507 | return true; |
4508 | default: |
4509 | break; |
4510 | } |
4511 | |
4512 | return false; |
4513 | } |
4514 | |
4515 | ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const { |
4516 | if (RetTy->isAnyComplexType()) |
4517 | return ABIArgInfo::getDirect(); |
4518 | |
4519 | if (RetTy->isVectorType()) |
4520 | return ABIArgInfo::getDirect(); |
4521 | |
4522 | if (RetTy->isVoidType()) |
4523 | return ABIArgInfo::getIgnore(); |
4524 | |
4525 | if (isAggregateTypeForABI(RetTy)) |
4526 | return getNaturalAlignIndirect(RetTy); |
4527 | |
4528 | return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
4529 | : ABIArgInfo::getDirect()); |
4530 | } |
4531 | |
4532 | ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const { |
4533 | Ty = useFirstFieldIfTransparentUnion(Ty); |
4534 | |
4535 | if (Ty->isAnyComplexType()) |
4536 | return ABIArgInfo::getDirect(); |
4537 | |
4538 | if (Ty->isVectorType()) |
4539 | return ABIArgInfo::getDirect(); |
4540 | |
4541 | if (isAggregateTypeForABI(Ty)) { |
4542 | |
4543 | |
4544 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
4545 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
4546 | |
4547 | CharUnits CCAlign = getParamTypeAlignment(Ty); |
4548 | CharUnits TyAlign = getContext().getTypeAlignInChars(Ty); |
4549 | |
4550 | return ABIArgInfo::getIndirect(CCAlign, true, |
4551 | TyAlign > CCAlign); |
4552 | } |
4553 | |
4554 | return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
4555 | : ABIArgInfo::getDirect()); |
4556 | } |
4557 | |
4558 | CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const { |
4559 | |
4560 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
4561 | Ty = CTy->getElementType(); |
4562 | |
4563 | if (Ty->isVectorType()) |
4564 | return CharUnits::fromQuantity(16); |
4565 | |
4566 | |
4567 | if (isRecordWithSIMDVectorType(getContext(), Ty)) |
4568 | return CharUnits::fromQuantity(16); |
4569 | |
4570 | return CharUnits::fromQuantity(PtrByteSize); |
4571 | } |
4572 | |
4573 | Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4574 | QualType Ty) const { |
4575 | if (Ty->isAnyComplexType()) |
4576 | llvm::report_fatal_error("complex type is not supported on AIX yet"); |
4577 | |
4578 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); |
4579 | TypeInfo.Align = getParamTypeAlignment(Ty); |
4580 | |
4581 | CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize); |
4582 | |
4583 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TypeInfo, |
4584 | SlotSize, true); |
4585 | } |
4586 | |
4587 | bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable( |
4588 | CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { |
4589 | return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, true); |
4590 | } |
4591 | |
4592 | |
4593 | namespace { |
4594 | |
4595 | class PPC32_SVR4_ABIInfo : public DefaultABIInfo { |
4596 | bool IsSoftFloatABI; |
4597 | bool IsRetSmallStructInRegABI; |
4598 | |
4599 | CharUnits getParamTypeAlignment(QualType Ty) const; |
4600 | |
4601 | public: |
4602 | PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI, |
4603 | bool RetSmallStructInRegABI) |
4604 | : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI), |
4605 | IsRetSmallStructInRegABI(RetSmallStructInRegABI) {} |
4606 | |
4607 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
4608 | |
4609 | void computeInfo(CGFunctionInfo &FI) const override { |
4610 | if (!getCXXABI().classifyReturnType(FI)) |
4611 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
4612 | for (auto &I : FI.arguments()) |
4613 | I.info = classifyArgumentType(I.type); |
4614 | } |
4615 | |
4616 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4617 | QualType Ty) const override; |
4618 | }; |
4619 | |
4620 | class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { |
4621 | public: |
4622 | PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI, |
4623 | bool RetSmallStructInRegABI) |
4624 | : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>( |
4625 | CGT, SoftFloatABI, RetSmallStructInRegABI)) {} |
4626 | |
4627 | static bool isStructReturnInRegABI(const llvm::Triple &Triple, |
4628 | const CodeGenOptions &Opts); |
4629 | |
4630 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
4631 | |
4632 | return 1; |
4633 | } |
4634 | |
4635 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4636 | llvm::Value *Address) const override; |
4637 | }; |
4638 | } |
4639 | |
4640 | CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { |
4641 | |
4642 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
4643 | Ty = CTy->getElementType(); |
4644 | |
4645 | if (Ty->isVectorType()) |
4646 | return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 |
4647 | : 4); |
4648 | |
4649 | |
4650 | |
4651 | const Type *AlignTy = nullptr; |
4652 | if (const Type *EltType = isSingleElementStruct(Ty, getContext())) { |
4653 | const BuiltinType *BT = EltType->getAs<BuiltinType>(); |
4654 | if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || |
4655 | (BT && BT->isFloatingPoint())) |
4656 | AlignTy = EltType; |
4657 | } |
4658 | |
4659 | if (AlignTy) |
4660 | return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4); |
4661 | return CharUnits::fromQuantity(4); |
4662 | } |
4663 | |
4664 | ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { |
4665 | uint64_t Size; |
4666 | |
4667 | |
4668 | if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI && |
4669 | (Size = getContext().getTypeSize(RetTy)) <= 64) { |
4670 | |
4671 | |
4672 | |
4673 | |
4674 | |
4675 | |
4676 | |
4677 | |
4678 | |
4679 | |
4680 | |
4681 | if (Size == 0) |
4682 | return ABIArgInfo::getIgnore(); |
4683 | else { |
4684 | llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size); |
4685 | return ABIArgInfo::getDirect(CoerceTy); |
4686 | } |
4687 | } |
4688 | |
4689 | return DefaultABIInfo::classifyReturnType(RetTy); |
4690 | } |
4691 | |
4692 | |
4693 | |
4694 | Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, |
4695 | QualType Ty) const { |
4696 | if (getTarget().getTriple().isOSDarwin()) { |
4697 | auto TI = getContext().getTypeInfoInChars(Ty); |
4698 | TI.Align = getParamTypeAlignment(Ty); |
4699 | |
4700 | CharUnits SlotSize = CharUnits::fromQuantity(4); |
4701 | return emitVoidPtrVAArg(CGF, VAList, Ty, |
4702 | classifyArgumentType(Ty).isIndirect(), TI, SlotSize, |
4703 | true); |
4704 | } |
4705 | |
4706 | const unsigned OverflowLimit = 8; |
4707 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { |
4708 | |
4709 | (void)CTy; |
4710 | return Address::invalid(); |
4711 | } |
4712 | |
4713 | |
4714 | |
4715 | |
4716 | |
4717 | |
4718 | |
4719 | |
4720 | |
4721 | bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; |
4722 | bool isInt = !Ty->isFloatingType(); |
4723 | bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; |
4724 | |
4725 | |
4726 | |
4727 | bool isIndirect = isAggregateTypeForABI(Ty); |
4728 | |
4729 | CGBuilderTy &Builder = CGF.Builder; |
4730 | |
4731 | |
4732 | Address NumRegsAddr = Address::invalid(); |
4733 | if (isInt || IsSoftFloatABI) { |
4734 | NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr"); |
4735 | } else { |
4736 | NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr"); |
4737 | } |
4738 | |
4739 | llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); |
4740 | |
4741 | |
4742 | if (isI64 || (isF64 && IsSoftFloatABI)) { |
4743 | NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); |
4744 | NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); |
4745 | } |
4746 | |
4747 | llvm::Value *CC = |
4748 | Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); |
4749 | |
4750 | llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); |
4751 | llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); |
4752 | llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); |
4753 | |
4754 | Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); |
4755 | |
4756 | llvm::Type *DirectTy = CGF.ConvertType(Ty); |
4757 | if (isIndirect) DirectTy = DirectTy->getPointerTo(0); |
4758 | |
4759 | |
4760 | Address RegAddr = Address::invalid(); |
4761 | { |
4762 | CGF.EmitBlock(UsingRegs); |
4763 | |
4764 | Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4); |
4765 | RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), |
4766 | CharUnits::fromQuantity(8)); |
4767 | assert(RegAddr.getElementType() == CGF.Int8Ty); |
4768 | |
4769 | |
4770 | if (!(isInt || IsSoftFloatABI)) { |
4771 | RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, |
4772 | CharUnits::fromQuantity(32)); |
4773 | } |
4774 | |
4775 | |
4776 | |
4777 | CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); |
4778 | llvm::Value *RegOffset = |
4779 | Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); |
4780 | RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, |
4781 | RegAddr.getPointer(), RegOffset), |
4782 | RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); |
4783 | RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); |
4784 | |
4785 | |
4786 | NumRegs = |
4787 | Builder.CreateAdd(NumRegs, |
4788 | Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); |
4789 | Builder.CreateStore(NumRegs, NumRegsAddr); |
4790 | |
4791 | CGF.EmitBranch(Cont); |
4792 | } |
4793 | |
4794 | |
4795 | Address MemAddr = Address::invalid(); |
4796 | { |
4797 | CGF.EmitBlock(UsingOverflow); |
4798 | |
4799 | Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); |
4800 | |
4801 | |
4802 | CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); |
4803 | |
4804 | CharUnits Size; |
4805 | if (!isIndirect) { |
4806 | auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); |
4807 | Size = TypeInfo.Width.alignTo(OverflowAreaAlign); |
4808 | } else { |
4809 | Size = CGF.getPointerSize(); |
4810 | } |
4811 | |
4812 | Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3); |
4813 | Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), |
4814 | OverflowAreaAlign); |
4815 | |
4816 | CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); |
4817 | if (Align > OverflowAreaAlign) { |
4818 | llvm::Value *Ptr = OverflowArea.getPointer(); |
4819 | OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), |
4820 | Align); |
4821 | } |
4822 | |
4823 | MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); |
4824 | |
4825 | |
4826 | OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); |
4827 | Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); |
4828 | CGF.EmitBranch(Cont); |
4829 | } |
4830 | |
4831 | CGF.EmitBlock(Cont); |
4832 | |
4833 | |
4834 | Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, |
4835 | "vaarg.addr"); |
4836 | |
4837 | |
4838 | if (isIndirect) { |
4839 | Result = Address(Builder.CreateLoad(Result, "aggr"), |
4840 | getContext().getTypeAlignInChars(Ty)); |
4841 | } |
4842 | |
4843 | return Result; |
4844 | } |
4845 | |
4846 | bool PPC32TargetCodeGenInfo::isStructReturnInRegABI( |
4847 | const llvm::Triple &Triple, const CodeGenOptions &Opts) { |
4848 | assert(Triple.isPPC32()); |
4849 | |
4850 | switch (Opts.getStructReturnConvention()) { |
4851 | case CodeGenOptions::SRCK_Default: |
4852 | break; |
4853 | case CodeGenOptions::SRCK_OnStack: |
4854 | return false; |
4855 | case CodeGenOptions::SRCK_InRegs: |
4856 | return true; |
4857 | } |
4858 | |
4859 | if (Triple.isOSBinFormatELF() && !Triple.isOSLinux()) |
4860 | return true; |
4861 | |
4862 | return false; |
4863 | } |
4864 | |
4865 | bool |
4866 | PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4867 | llvm::Value *Address) const { |
4868 | return PPC_initDwarfEHRegSizeTable(CGF, Address, false, |
4869 | false); |
4870 | } |
4871 | |
4872 | |
4873 | |
4874 | namespace { |
4875 | |
4876 | class PPC64_SVR4_ABIInfo : public SwiftABIInfo { |
4877 | public: |
4878 | enum ABIKind { |
4879 | ELFv1 = 0, |
4880 | ELFv2 |
4881 | }; |
4882 | |
4883 | private: |
4884 | static const unsigned GPRBits = 64; |
4885 | ABIKind Kind; |
4886 | bool IsSoftFloatABI; |
4887 | |
4888 | public: |
4889 | PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, |
4890 | bool SoftFloatABI) |
4891 | : SwiftABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {} |
4892 | |
4893 | bool isPromotableTypeForABI(QualType Ty) const; |
4894 | CharUnits getParamTypeAlignment(QualType Ty) const; |
4895 | |
4896 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
4897 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
4898 | |
4899 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; |
4900 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
4901 | uint64_t Members) const override; |
4902 | |
4903 | |
4904 | |
4905 | |
4906 | |
4907 | |
4908 | |
4909 | void computeInfo(CGFunctionInfo &FI) const override { |
4910 | if (!getCXXABI().classifyReturnType(FI)) |
4911 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
4912 | for (auto &I : FI.arguments()) { |
4913 | |
4914 | |
4915 | |
4916 | const Type *T = isSingleElementStruct(I.type, getContext()); |
4917 | if (T) { |
4918 | const BuiltinType *BT = T->getAs<BuiltinType>(); |
4919 | if ((T->isVectorType() && getContext().getTypeSize(T) == 128) || |
4920 | (BT && BT->isFloatingPoint())) { |
4921 | QualType QT(T, 0); |
4922 | I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); |
4923 | continue; |
4924 | } |
4925 | } |
4926 | I.info = classifyArgumentType(I.type); |
4927 | } |
4928 | } |
4929 | |
4930 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
4931 | QualType Ty) const override; |
4932 | |
4933 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
4934 | bool asReturnValue) const override { |
4935 | return occupiesMoreThan(CGT, scalars, 4); |
4936 | } |
4937 | |
4938 | bool isSwiftErrorInRegister() const override { |
4939 | return false; |
4940 | } |
4941 | }; |
4942 | |
4943 | class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { |
4944 | |
4945 | public: |
4946 | PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, |
4947 | PPC64_SVR4_ABIInfo::ABIKind Kind, |
4948 | bool SoftFloatABI) |
4949 | : TargetCodeGenInfo( |
4950 | std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {} |
4951 | |
4952 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
4953 | |
4954 | return 1; |
4955 | } |
4956 | |
4957 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4958 | llvm::Value *Address) const override; |
4959 | }; |
4960 | |
4961 | class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { |
4962 | public: |
4963 | PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} |
4964 | |
4965 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
4966 | |
4967 | return 1; |
4968 | } |
4969 | |
4970 | bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
4971 | llvm::Value *Address) const override; |
4972 | }; |
4973 | |
4974 | } |
4975 | |
4976 | |
4977 | |
4978 | bool |
4979 | PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { |
4980 | |
4981 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
4982 | Ty = EnumTy->getDecl()->getIntegerType(); |
4983 | |
4984 | |
4985 | if (isPromotableIntegerTypeForABI(Ty)) |
4986 | return true; |
4987 | |
4988 | |
4989 | |
4990 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) |
4991 | switch (BT->getKind()) { |
4992 | case BuiltinType::Int: |
4993 | case BuiltinType::UInt: |
4994 | return true; |
4995 | default: |
4996 | break; |
4997 | } |
4998 | |
4999 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
5000 | if (EIT->getNumBits() < 64) |
5001 | return true; |
5002 | |
5003 | return false; |
5004 | } |
5005 | |
5006 | |
5007 | |
5008 | CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { |
5009 | |
5010 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) |
5011 | Ty = CTy->getElementType(); |
5012 | |
5013 | |
5014 | |
5015 | if (Ty->isVectorType()) { |
5016 | return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); |
5017 | } else if (Ty->isRealFloatingType() && |
5018 | &getContext().getFloatTypeSemantics(Ty) == |
5019 | &llvm::APFloat::IEEEquad()) { |
5020 | |
5021 | |
5022 | |
5023 | return CharUnits::fromQuantity(16); |
5024 | } |
5025 | |
5026 | |
5027 | |
5028 | const Type *AlignAsType = nullptr; |
5029 | const Type *EltType = isSingleElementStruct(Ty, getContext()); |
5030 | if (EltType) { |
5031 | const BuiltinType *BT = EltType->getAs<BuiltinType>(); |
5032 | if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || |
5033 | (BT && BT->isFloatingPoint())) |
5034 | AlignAsType = EltType; |
5035 | } |
5036 | |
5037 | |
5038 | const Type *Base = nullptr; |
5039 | uint64_t Members = 0; |
5040 | if (!AlignAsType && Kind == ELFv2 && |
5041 | isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) |
5042 | AlignAsType = Base; |
5043 | |
5044 | |
5045 | if (AlignAsType) { |
5046 | return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); |
5047 | } |
5048 | |
5049 | |
5050 | |
5051 | if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { |
5052 | return CharUnits::fromQuantity(16); |
5053 | } |
5054 | |
5055 | return CharUnits::fromQuantity(8); |
5056 | } |
5057 | |
5058 | |
5059 | |
5060 | |
5061 | bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, |
5062 | uint64_t &Members) const { |
5063 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { |
5064 | uint64_t NElements = AT->getSize().getZExtValue(); |
5065 | if (NElements == 0) |
5066 | return false; |
5067 | if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) |
5068 | return false; |
5069 | Members *= NElements; |
5070 | } else if (const RecordType *RT = Ty->getAs<RecordType>()) { |
5071 | const RecordDecl *RD = RT->getDecl(); |
5072 | if (RD->hasFlexibleArrayMember()) |
5073 | return false; |
5074 | |
5075 | Members = 0; |
5076 | |
5077 | |
5078 | |
5079 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
5080 | if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD)) |
5081 | return false; |
5082 | |
5083 | for (const auto &I : CXXRD->bases()) { |
5084 | |
5085 | if (isEmptyRecord(getContext(), I.getType(), true)) |
5086 | continue; |
5087 | |
5088 | uint64_t FldMembers; |
5089 | if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) |
5090 | return false; |
5091 | |
5092 | Members += FldMembers; |
5093 | } |
5094 | } |
5095 | |
5096 | for (const auto *FD : RD->fields()) { |
5097 | |
5098 | QualType FT = FD->getType(); |
5099 | while (const ConstantArrayType *AT = |
5100 | getContext().getAsConstantArrayType(FT)) { |
5101 | if (AT->getSize().getZExtValue() == 0) |
5102 | return false; |
5103 | FT = AT->getElementType(); |
5104 | } |
5105 | if (isEmptyRecord(getContext(), FT, true)) |
5106 | continue; |
5107 | |
5108 | |
5109 | if (getContext().getLangOpts().CPlusPlus && |
5110 | FD->isZeroLengthBitField(getContext())) |
5111 | continue; |
5112 | |
5113 | uint64_t FldMembers; |
5114 | if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) |
5115 | return false; |
5116 | |
5117 | Members = (RD->isUnion() ? |
5118 | std::max(Members, FldMembers) : Members + FldMembers); |
5119 | } |
5120 | |
5121 | if (!Base) |
5122 | return false; |
5123 | |
5124 | |
5125 | if (getContext().getTypeSize(Base) * Members != |
5126 | getContext().getTypeSize(Ty)) |
5127 | return false; |
5128 | } else { |
5129 | Members = 1; |
5130 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
5131 | Members = 2; |
5132 | Ty = CT->getElementType(); |
5133 | } |
5134 | |
5135 | |
5136 | if (!isHomogeneousAggregateBaseType(Ty)) |
5137 | return false; |
5138 | |
5139 | |
5140 | |
5141 | |
5142 | const Type *TyPtr = Ty.getTypePtr(); |
5143 | if (!Base) { |
5144 | Base = TyPtr; |
5145 | |
5146 | |
5147 | if (const VectorType *VT = Base->getAs<VectorType>()) { |
5148 | QualType EltTy = VT->getElementType(); |
5149 | unsigned NumElements = |
5150 | getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); |
5151 | Base = getContext() |
5152 | .getVectorType(EltTy, NumElements, VT->getVectorKind()) |
5153 | .getTypePtr(); |
5154 | } |
5155 | } |
5156 | |
5157 | if (Base->isVectorType() != TyPtr->isVectorType() || |
5158 | getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) |
5159 | return false; |
5160 | } |
5161 | return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); |
5162 | } |
5163 | |
5164 | bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
5165 | |
5166 | |
5167 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
5168 | if (BT->getKind() == BuiltinType::Float || |
5169 | BT->getKind() == BuiltinType::Double || |
5170 | BT->getKind() == BuiltinType::LongDouble || |
5171 | (getContext().getTargetInfo().hasFloat128Type() && |
5172 | (BT->getKind() == BuiltinType::Float128))) { |
5173 | if (IsSoftFloatABI) |
5174 | return false; |
5175 | return true; |
5176 | } |
5177 | } |
5178 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
5179 | if (getContext().getTypeSize(VT) == 128) |
5180 | return true; |
5181 | } |
5182 | return false; |
5183 | } |
5184 | |
5185 | bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( |
5186 | const Type *Base, uint64_t Members) const { |
5187 | |
5188 | |
5189 | uint32_t NumRegs = |
5190 | ((getContext().getTargetInfo().hasFloat128Type() && |
5191 | Base->isFloat128Type()) || |
5192 | Base->isVectorType()) ? 1 |
5193 | : (getContext().getTypeSize(Base) + 63) / 64; |
5194 | |
5195 | |
5196 | return Members * NumRegs <= 8; |
5197 | } |
5198 | |
5199 | ABIArgInfo |
5200 | PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { |
5201 | Ty = useFirstFieldIfTransparentUnion(Ty); |
5202 | |
5203 | if (Ty->isAnyComplexType()) |
5204 | return ABIArgInfo::getDirect(); |
5205 | |
5206 | |
5207 | |
5208 | if (Ty->isVectorType()) { |
5209 | uint64_t Size = getContext().getTypeSize(Ty); |
5210 | if (Size > 128) |
5211 | return getNaturalAlignIndirect(Ty, false); |
5212 | else if (Size < 128) { |
5213 | llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); |
5214 | return ABIArgInfo::getDirect(CoerceTy); |
5215 | } |
5216 | } |
5217 | |
5218 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
5219 | if (EIT->getNumBits() > 128) |
5220 | return getNaturalAlignIndirect(Ty, true); |
5221 | |
5222 | if (isAggregateTypeForABI(Ty)) { |
5223 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
5224 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
5225 | |
5226 | uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); |
5227 | uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); |
5228 | |
5229 | |
5230 | const Type *Base = nullptr; |
5231 | uint64_t Members = 0; |
5232 | if (Kind == ELFv2 && |
5233 | isHomogeneousAggregate(Ty, Base, Members)) { |
5234 | llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); |
5235 | llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); |
5236 | return ABIArgInfo::getDirect(CoerceTy); |
5237 | } |
5238 | |
5239 | |
5240 | |
5241 | |
5242 | |
5243 | uint64_t Bits = getContext().getTypeSize(Ty); |
5244 | if (Bits > 0 && Bits <= 8 * GPRBits) { |
5245 | llvm::Type *CoerceTy; |
5246 | |
5247 | |
5248 | |
5249 | if (Bits <= GPRBits) |
5250 | CoerceTy = |
5251 | llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); |
5252 | |
5253 | |
5254 | else { |
5255 | uint64_t RegBits = ABIAlign * 8; |
5256 | uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; |
5257 | llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); |
5258 | CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); |
5259 | } |
5260 | |
5261 | return ABIArgInfo::getDirect(CoerceTy); |
5262 | } |
5263 | |
5264 | |
5265 | return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), |
5266 | true, |
5267 | TyAlign > ABIAlign); |
5268 | } |
5269 | |
5270 | return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
5271 | : ABIArgInfo::getDirect()); |
5272 | } |
5273 | |
5274 | ABIArgInfo |
5275 | PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { |
5276 | if (RetTy->isVoidType()) |
5277 | return ABIArgInfo::getIgnore(); |
5278 | |
5279 | if (RetTy->isAnyComplexType()) |
5280 | return ABIArgInfo::getDirect(); |
5281 | |
5282 | |
5283 | |
5284 | if (RetTy->isVectorType()) { |
5285 | uint64_t Size = getContext().getTypeSize(RetTy); |
5286 | if (Size > 128) |
5287 | return getNaturalAlignIndirect(RetTy); |
5288 | else if (Size < 128) { |
5289 | llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); |
5290 | return ABIArgInfo::getDirect(CoerceTy); |
5291 | } |
5292 | } |
5293 | |
5294 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
5295 | if (EIT->getNumBits() > 128) |
5296 | return getNaturalAlignIndirect(RetTy, false); |
5297 | |
5298 | if (isAggregateTypeForABI(RetTy)) { |
5299 | |
5300 | const Type *Base = nullptr; |
5301 | uint64_t Members = 0; |
5302 | if (Kind == ELFv2 && |
5303 | isHomogeneousAggregate(RetTy, Base, Members)) { |
5304 | llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); |
5305 | llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); |
5306 | return ABIArgInfo::getDirect(CoerceTy); |
5307 | } |
5308 | |
5309 | |
5310 | uint64_t Bits = getContext().getTypeSize(RetTy); |
5311 | if (Kind == ELFv2 && Bits <= 2 * GPRBits) { |
5312 | if (Bits == 0) |
5313 | return ABIArgInfo::getIgnore(); |
5314 | |
5315 | llvm::Type *CoerceTy; |
5316 | if (Bits > GPRBits) { |
5317 | CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); |
5318 | CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); |
5319 | } else |
5320 | CoerceTy = |
5321 | llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); |
5322 | return ABIArgInfo::getDirect(CoerceTy); |
5323 | } |
5324 | |
5325 | |
5326 | return getNaturalAlignIndirect(RetTy); |
5327 | } |
5328 | |
5329 | return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
5330 | : ABIArgInfo::getDirect()); |
5331 | } |
5332 | |
5333 | |
5334 | Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
5335 | QualType Ty) const { |
5336 | auto TypeInfo = getContext().getTypeInfoInChars(Ty); |
5337 | TypeInfo.Align = getParamTypeAlignment(Ty); |
5338 | |
5339 | CharUnits SlotSize = CharUnits::fromQuantity(8); |
5340 | |
5341 | |
5342 | |
5343 | |
5344 | |
5345 | |
5346 | |
5347 | if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { |
5348 | CharUnits EltSize = TypeInfo.Width / 2; |
5349 | if (EltSize < SlotSize) { |
5350 | Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, |
5351 | SlotSize * 2, SlotSize, |
5352 | SlotSize, true); |
5353 | |
5354 | Address RealAddr = Addr; |
5355 | Address ImagAddr = RealAddr; |
5356 | if (CGF.CGM.getDataLayout().isBigEndian()) { |
5357 | RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, |
5358 | SlotSize - EltSize); |
5359 | ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, |
5360 | 2 * SlotSize - EltSize); |
5361 | } else { |
5362 | ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); |
5363 | } |
5364 | |
5365 | llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); |
5366 | RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); |
5367 | ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); |
5368 | llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); |
5369 | llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); |
5370 | |
5371 | Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); |
5372 | CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), |
5373 | true); |
5374 | return Temp; |
5375 | } |
5376 | } |
5377 | |
5378 | |
5379 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, |
5380 | TypeInfo, SlotSize, true); |
5381 | } |
5382 | |
5383 | bool |
5384 | PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( |
5385 | CodeGen::CodeGenFunction &CGF, |
5386 | llvm::Value *Address) const { |
5387 | return PPC_initDwarfEHRegSizeTable(CGF, Address, true, |
5388 | false); |
5389 | } |
5390 | |
5391 | bool |
5392 | PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, |
5393 | llvm::Value *Address) const { |
5394 | return PPC_initDwarfEHRegSizeTable(CGF, Address, true, |
5395 | false); |
5396 | } |
5397 | |
5398 | |
5399 | |
5400 | |
5401 | |
5402 | namespace { |
5403 | |
5404 | class AArch64ABIInfo : public SwiftABIInfo { |
5405 | public: |
5406 | enum ABIKind { |
5407 | AAPCS = 0, |
5408 | DarwinPCS, |
5409 | Win64 |
5410 | }; |
5411 | |
5412 | private: |
5413 | ABIKind Kind; |
5414 | |
5415 | public: |
5416 | AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) |
5417 | : SwiftABIInfo(CGT), Kind(Kind) {} |
5418 | |
5419 | private: |
5420 | ABIKind getABIKind() const { return Kind; } |
5421 | bool isDarwinPCS() const { return Kind == DarwinPCS; } |
5422 | |
5423 | ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; |
5424 | ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic, |
5425 | unsigned CallingConvention) const; |
5426 | ABIArgInfo coerceIllegalVector(QualType Ty) const; |
5427 | bool isHomogeneousAggregateBaseType(QualType Ty) const override; |
5428 | bool isHomogeneousAggregateSmallEnough(const Type *Ty, |
5429 | uint64_t Members) const override; |
5430 | |
5431 | bool isIllegalVectorType(QualType Ty) const; |
5432 | |
5433 | void computeInfo(CGFunctionInfo &FI) const override { |
5434 | if (!::classifyReturnType(getCXXABI(), FI, *this)) |
5435 | FI.getReturnInfo() = |
5436 | classifyReturnType(FI.getReturnType(), FI.isVariadic()); |
5437 | |
5438 | for (auto &it : FI.arguments()) |
5439 | it.info = classifyArgumentType(it.type, FI.isVariadic(), |
5440 | FI.getCallingConvention()); |
5441 | } |
5442 | |
5443 | Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, |
5444 | CodeGenFunction &CGF) const; |
5445 | |
5446 | Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, |
5447 | CodeGenFunction &CGF) const; |
5448 | |
5449 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
5450 | QualType Ty) const override { |
5451 | llvm::Type *BaseTy = CGF.ConvertType(Ty); |
5452 | if (isa<llvm::ScalableVectorType>(BaseTy)) |
5453 | llvm::report_fatal_error("Passing SVE types to variadic functions is " |
5454 | "currently not supported"); |
5455 | |
5456 | return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) |
5457 | : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) |
5458 | : EmitAAPCSVAArg(VAListAddr, Ty, CGF); |
5459 | } |
5460 | |
5461 | Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
5462 | QualType Ty) const override; |
5463 | |
5464 | bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, |
5465 | bool asReturnValue) const override { |
5466 | return occupiesMoreThan(CGT, scalars, 4); |
5467 | } |
5468 | bool isSwiftErrorInRegister() const override { |
5469 | return true; |
5470 | } |
5471 | |
5472 | bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, |
5473 | unsigned elts) const override; |
5474 | |
5475 | bool allowBFloatArgsAndRet() const override { |
5476 | return getTarget().hasBFloat16Type(); |
5477 | } |
5478 | }; |
5479 | |
5480 | class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { |
5481 | public: |
5482 | AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) |
5483 | : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {} |
5484 | |
5485 | StringRef getARCRetainAutoreleasedReturnValueMarker() const override { |
5486 | return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; |
5487 | } |
5488 | |
5489 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
5490 | return 31; |
5491 | } |
5492 | |
5493 | bool doesReturnSlotInterfereWithArgs() const override { return false; } |
5494 | |
5495 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
5496 | CodeGen::CodeGenModule &CGM) const override { |
5497 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
5498 | if (!FD) |
5499 | return; |
5500 | |
5501 | const auto *TA = FD->getAttr<TargetAttr>(); |
5502 | if (TA == nullptr) |
5503 | return; |
5504 | |
5505 | ParsedTargetAttr Attr = TA->parse(); |
5506 | if (Attr.BranchProtection.empty()) |
5507 | return; |
5508 | |
5509 | TargetInfo::BranchProtectionInfo BPI; |
5510 | StringRef Error; |
5511 | (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection, |
5512 | BPI, Error); |
5513 | assert(Error.empty()); |
5514 | |
5515 | auto *Fn = cast<llvm::Function>(GV); |
5516 | static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"}; |
5517 | Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]); |
5518 | |
5519 | if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) { |
5520 | Fn->addFnAttr("sign-return-address-key", |
5521 | BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey |
5522 | ? "a_key" |
5523 | : "b_key"); |
5524 | } |
5525 | |
5526 | Fn->addFnAttr("branch-target-enforcement", |
5527 | BPI.BranchTargetEnforcement ? "true" : "false"); |
5528 | } |
5529 | |
5530 | bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, |
5531 | llvm::Type *Ty) const override { |
5532 | if (CGF.getTarget().hasFeature("ls64")) { |
5533 | auto *ST = dyn_cast<llvm::StructType>(Ty); |
5534 | if (ST && ST->getNumElements() == 1) { |
5535 | auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0)); |
5536 | if (AT && AT->getNumElements() == 8 && |
5537 | AT->getElementType()->isIntegerTy(64)) |
5538 | return true; |
5539 | } |
5540 | } |
5541 | return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty); |
5542 | } |
5543 | }; |
5544 | |
5545 | class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { |
5546 | public: |
5547 | WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) |
5548 | : AArch64TargetCodeGenInfo(CGT, K) {} |
5549 | |
5550 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
5551 | CodeGen::CodeGenModule &CGM) const override; |
5552 | |
5553 | void getDependentLibraryOption(llvm::StringRef Lib, |
5554 | llvm::SmallString<24> &Opt) const override { |
5555 | Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); |
5556 | } |
5557 | |
5558 | void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, |
5559 | llvm::SmallString<32> &Opt) const override { |
5560 | Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; |
5561 | } |
5562 | }; |
5563 | |
5564 | void WindowsAArch64TargetCodeGenInfo::setTargetAttributes( |
5565 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { |
5566 | AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); |
5567 | if (GV->isDeclaration()) |
5568 | return; |
5569 | addStackProbeTargetAttributes(D, GV, CGM); |
5570 | } |
5571 | } |
5572 | |
5573 | ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const { |
5574 | assert(Ty->isVectorType() && "expected vector type!"); |
5575 | |
5576 | const auto *VT = Ty->castAs<VectorType>(); |
5577 | if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) { |
5578 | assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); |
5579 | assert(VT->getElementType()->castAs<BuiltinType>()->getKind() == |
5580 | BuiltinType::UChar && |
5581 | "unexpected builtin type for SVE predicate!"); |
5582 | return ABIArgInfo::getDirect(llvm::ScalableVectorType::get( |
5583 | llvm::Type::getInt1Ty(getVMContext()), 16)); |
5584 | } |
5585 | |
5586 | if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) { |
5587 | assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); |
5588 | |
5589 | const auto *BT = VT->getElementType()->castAs<BuiltinType>(); |
5590 | llvm::ScalableVectorType *ResType = nullptr; |
5591 | switch (BT->getKind()) { |
5592 | default: |
5593 | llvm_unreachable("unexpected builtin type for SVE vector!"); |
5594 | case BuiltinType::SChar: |
5595 | case BuiltinType::UChar: |
5596 | ResType = llvm::ScalableVectorType::get( |
5597 | llvm::Type::getInt8Ty(getVMContext()), 16); |
5598 | break; |
5599 | case BuiltinType::Short: |
5600 | case BuiltinType::UShort: |
5601 | ResType = llvm::ScalableVectorType::get( |
5602 | llvm::Type::getInt16Ty(getVMContext()), 8); |
5603 | break; |
5604 | case BuiltinType::Int: |
5605 | case BuiltinType::UInt: |
5606 | ResType = llvm::ScalableVectorType::get( |
5607 | llvm::Type::getInt32Ty(getVMContext()), 4); |
5608 | break; |
5609 | case BuiltinType::Long: |
5610 | case BuiltinType::ULong: |
5611 | ResType = llvm::ScalableVectorType::get( |
5612 | llvm::Type::getInt64Ty(getVMContext()), 2); |
5613 | break; |
5614 | case BuiltinType::Half: |
5615 | ResType = llvm::ScalableVectorType::get( |
5616 | llvm::Type::getHalfTy(getVMContext()), 8); |
5617 | break; |
5618 | case BuiltinType::Float: |
5619 | ResType = llvm::ScalableVectorType::get( |
5620 | llvm::Type::getFloatTy(getVMContext()), 4); |
5621 | break; |
5622 | case BuiltinType::Double: |
5623 | ResType = llvm::ScalableVectorType::get( |
5624 | llvm::Type::getDoubleTy(getVMContext()), 2); |
5625 | break; |
5626 | case BuiltinType::BFloat16: |
5627 | ResType = llvm::ScalableVectorType::get( |
5628 | llvm::Type::getBFloatTy(getVMContext()), 8); |
5629 | break; |
5630 | } |
5631 | return ABIArgInfo::getDirect(ResType); |
5632 | } |
5633 | |
5634 | uint64_t Size = getContext().getTypeSize(Ty); |
5635 | |
5636 | if (isAndroid() && (Size <= 16)) { |
5637 | llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); |
5638 | return ABIArgInfo::getDirect(ResType); |
5639 | } |
5640 | if (Size <= 32) { |
5641 | llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); |
5642 | return ABIArgInfo::getDirect(ResType); |
5643 | } |
5644 | if (Size == 64) { |
5645 | auto *ResType = |
5646 | llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); |
5647 | return ABIArgInfo::getDirect(ResType); |
5648 | } |
5649 | if (Size == 128) { |
5650 | auto *ResType = |
5651 | llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); |
5652 | return ABIArgInfo::getDirect(ResType); |
5653 | } |
5654 | return getNaturalAlignIndirect(Ty, false); |
5655 | } |
5656 | |
5657 | ABIArgInfo |
5658 | AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic, |
5659 | unsigned CallingConvention) const { |
5660 | Ty = useFirstFieldIfTransparentUnion(Ty); |
5661 | |
5662 | |
5663 | if (isIllegalVectorType(Ty)) |
5664 | return coerceIllegalVector(Ty); |
5665 | |
5666 | if (!isAggregateTypeForABI(Ty)) { |
5667 | |
5668 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
5669 | Ty = EnumTy->getDecl()->getIntegerType(); |
5670 | |
5671 | if (const auto *EIT = Ty->getAs<ExtIntType>()) |
5672 | if (EIT->getNumBits() > 128) |
5673 | return getNaturalAlignIndirect(Ty); |
5674 | |
5675 | return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() |
5676 | ? ABIArgInfo::getExtend(Ty) |
5677 | : ABIArgInfo::getDirect()); |
5678 | } |
5679 | |
5680 | |
5681 | |
5682 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { |
5683 | return getNaturalAlignIndirect(Ty, RAA == |
5684 | CGCXXABI::RAA_DirectInMemory); |
5685 | } |
5686 | |
5687 | |
5688 | |
5689 | uint64_t Size = getContext().getTypeSize(Ty); |
5690 | bool IsEmpty = isEmptyRecord(getContext(), Ty, true); |
5691 | if (IsEmpty || Size == 0) { |
5692 | if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) |
5693 | return ABIArgInfo::getIgnore(); |
5694 | |
5695 | |
5696 | |
5697 | if (IsEmpty && Size == 0) |
5698 | return ABIArgInfo::getIgnore(); |
5699 | return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); |
5700 | } |
5701 | |
5702 | |
5703 | const Type *Base = nullptr; |
5704 | uint64_t Members = 0; |
5705 | bool IsWin64 = Kind == Win64 || CallingConvention == llvm::CallingConv::Win64; |
5706 | bool IsWinVariadic = IsWin64 && IsVariadic; |
5707 | |
5708 | |
5709 | if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) { |
5710 | if (Kind != AArch64ABIInfo::AAPCS) |
5711 | return ABIArgInfo::getDirect( |
5712 | llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); |
5713 | |
5714 | |
5715 | |
5716 | unsigned Align = |
5717 | getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); |
5718 | unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity(); |
5719 | Align = (Align > BaseAlign && Align >= 16) ? 16 : 0; |
5720 | return ABIArgInfo::getDirect( |
5721 | llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0, |
5722 | nullptr, true, Align); |
5723 | } |
5724 | |
5725 | |
5726 | if (Size <= 128) { |
5727 | |
5728 | |
5729 | if (getTarget().isRenderScriptTarget()) { |
5730 | return coerceToIntArray(Ty, getContext(), getVMContext()); |
5731 | } |
5732 | unsigned Alignment; |
5733 | if (Kind == AArch64ABIInfo::AAPCS) { |
5734 | Alignment = getContext().getTypeUnadjustedAlign(Ty); |
5735 | Alignment = Alignment < 128 ? 64 : 128; |
5736 | } else { |
5737 | Alignment = std::max(getContext().getTypeAlign(Ty), |
5738 | (unsigned)getTarget().getPointerWidth(0)); |
5739 | } |
5740 | Size = llvm::alignTo(Size, Alignment); |
5741 | |
5742 | |
5743 | |
5744 | llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment); |
5745 | return ABIArgInfo::getDirect( |
5746 | Size == Alignment ? BaseTy |
5747 | : llvm::ArrayType::get(BaseTy, Size / Alignment)); |
5748 | } |
5749 | |
5750 | return getNaturalAlignIndirect(Ty, false); |
5751 | } |
5752 | |
5753 | ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy, |
5754 | bool IsVariadic) const { |
5755 | if (RetTy->isVoidType()) |
5756 | return ABIArgInfo::getIgnore(); |
5757 | |
5758 | if (const auto *VT = RetTy->getAs<VectorType>()) { |
5759 | if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || |
5760 | VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) |
5761 | return coerceIllegalVector(RetTy); |
5762 | } |
5763 | |
5764 | |
5765 | if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) |
5766 | return getNaturalAlignIndirect(RetTy); |
5767 | |
5768 | if (!isAggregateTypeForABI(RetTy)) { |
5769 | |
5770 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
5771 | RetTy = EnumTy->getDecl()->getIntegerType(); |
5772 | |
5773 | if (const auto *EIT = RetTy->getAs<ExtIntType>()) |
5774 | if (EIT->getNumBits() > 128) |
5775 | return getNaturalAlignIndirect(RetTy); |
5776 | |
5777 | return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() |
5778 | ? ABIArgInfo::getExtend(RetTy) |
5779 | : ABIArgInfo::getDirect()); |
5780 | } |
5781 | |
5782 | uint64_t Size = getContext().getTypeSize(RetTy); |
5783 | if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) |
5784 | return ABIArgInfo::getIgnore(); |
5785 | |
5786 | const Type *Base = nullptr; |
5787 | uint64_t Members = 0; |
5788 | if (isHomogeneousAggregate(RetTy, Base, Members) && |
5789 | !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 && |
5790 | IsVariadic)) |
5791 | |
5792 | return ABIArgInfo::getDirect(); |
5793 | |
5794 | |
5795 | if (Size <= 128) { |
5796 | |
5797 | |
5798 | if (getTarget().isRenderScriptTarget()) { |
5799 | return coerceToIntArray(RetTy, getContext(), getVMContext()); |
5800 | } |
5801 | |
5802 | if (Size <= 64 && getDataLayout().isLittleEndian()) { |
5803 | |
5804 | |
5805 | |
5806 | |
5807 | |
5808 | |
5809 | return ABIArgInfo::getDirect( |
5810 | llvm::IntegerType::get(getVMContext(), Size)); |
5811 | } |
5812 | |
5813 | unsigned Alignment = getContext().getTypeAlign(RetTy); |
5814 | Size = llvm::alignTo(Size, 64); |
5815 | |
5816 | |
5817 | |
5818 | if (Alignment < 128 && Size == 128) { |
5819 | llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); |
5820 | return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); |
5821 | } |
5822 | return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); |
5823 | } |
5824 | |
5825 | return getNaturalAlignIndirect(RetTy); |
5826 | } |
5827 | |
5828 | |
5829 | bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { |
5830 | if (const VectorType *VT = Ty->getAs<VectorType>()) { |
5831 | |
5832 | |
5833 | |
5834 | if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || |
5835 | VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) |
5836 | return true; |
5837 | |
5838 | |
5839 | unsigned NumElements = VT->getNumElements(); |
5840 | uint64_t Size = getContext().getTypeSize(VT); |
5841 | |
5842 | if (!llvm::isPowerOf2_32(NumElements)) |
5843 | return true; |
5844 | |
5845 | |
5846 | |
5847 | llvm::Triple Triple = getTarget().getTriple(); |
5848 | if (Triple.getArch() == llvm::Triple::aarch64_32 && |
5849 | Triple.isOSBinFormatMachO()) |
5850 | return Size <= 32; |
5851 | |
5852 | return Size != 64 && (Size != 128 || NumElements == 1); |
5853 | } |
5854 | return false; |
5855 | } |
5856 | |
5857 | bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, |
5858 | llvm::Type *eltTy, |
5859 | unsigned elts) const { |
5860 | if (!llvm::isPowerOf2_32(elts)) |
5861 | return false; |
5862 | if (totalSize.getQuantity() != 8 && |
5863 | (totalSize.getQuantity() != 16 || elts == 1)) |
5864 | return false; |
5865 | return true; |
5866 | } |
5867 | |
5868 | bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
5869 | |
5870 | |
5871 | |
5872 | |
5873 | if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { |
5874 | if (BT->isFloatingPoint()) |
5875 | return true; |
5876 | } else if (const VectorType *VT = Ty->getAs<VectorType>()) { |
5877 | unsigned VecSize = getContext().getTypeSize(VT); |
5878 | if (VecSize == 64 || VecSize == 128) |
5879 | return true; |
5880 | } |
5881 | return false; |
5882 | } |
5883 | |
5884 | bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, |
5885 | uint64_t Members) const { |
5886 | return Members <= 4; |
5887 | } |
5888 | |
5889 | Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty, |
5890 | CodeGenFunction &CGF) const { |
5891 | ABIArgInfo AI = classifyArgumentType(Ty, true, |
5892 | CGF.CurFnInfo->getCallingConvention()); |
5893 | bool IsIndirect = AI.isIndirect(); |
5894 | |
5895 | llvm::Type *BaseTy = CGF.ConvertType(Ty); |
5896 | if (IsIndirect) |
5897 | BaseTy = llvm::PointerType::getUnqual(BaseTy); |
5898 | else if (AI.getCoerceToType()) |
5899 | BaseTy = AI.getCoerceToType(); |
5900 | |
5901 | unsigned NumRegs = 1; |
5902 | if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { |
5903 | BaseTy = ArrTy->getElementType(); |
5904 | NumRegs = ArrTy->getNumElements(); |
5905 | } |
5906 | bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); |
5907 | |
5908 | |
5909 | |
5910 | |
5911 | |
5912 | |
5913 | |
5914 | |
5915 | |
5916 | |
5917 | |
5918 | |
5919 | llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); |
5920 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); |
5921 | llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); |
5922 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); |
5923 | |
5924 | CharUnits TySize = getContext().getTypeSizeInChars(Ty); |
5925 | CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty); |
5926 | |
5927 | Address reg_offs_p = Address::invalid(); |
5928 | llvm::Value *reg_offs = nullptr; |
5929 | int reg_top_index; |
5930 | int RegSize = IsIndirect ? 8 : TySize.getQuantity(); |
5931 | if (!IsFPR) { |
5932 | |
5933 | reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); |
5934 | reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); |
5935 | reg_top_index = 1; |
5936 | RegSize = llvm::alignTo(RegSize, 8); |
5937 | } else { |
5938 | |
5939 | reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); |
5940 | reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); |
5941 | reg_top_index = 2; |
5942 | RegSize = 16 * NumRegs; |
5943 | } |
5944 | |
5945 | |
5946 | |
5947 | |
5948 | |
5949 | |
5950 | |
5951 | |
5952 | |
5953 | llvm::Value *UsingStack = nullptr; |
5954 | UsingStack = CGF.Builder.CreateICmpSGE( |
5955 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); |
5956 | |
5957 | CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); |
5958 | |
5959 | |
5960 | |
5961 | CGF.EmitBlock(MaybeRegBlock); |
5962 | |
5963 | |
5964 | |
5965 | |
5966 | if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { |
5967 | int Align = TyAlign.getQuantity(); |
5968 | |
5969 | reg_offs = CGF.Builder.CreateAdd( |
5970 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), |
5971 | "align_regoffs"); |
5972 | reg_offs = CGF.Builder.CreateAnd( |
5973 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), |
5974 | "aligned_regoffs"); |
5975 | } |
5976 | |
5977 | |
5978 | |
5979 | |
5980 | |
5981 | llvm::Value *NewOffset = nullptr; |
5982 | NewOffset = CGF.Builder.CreateAdd( |
5983 | reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); |
5984 | CGF.Builder.CreateStore(NewOffset, reg_offs_p); |
5985 | |
5986 | |
5987 | |
5988 | llvm::Value *InRegs = nullptr; |
5989 | InRegs = CGF.Builder.CreateICmpSLE( |
5990 | NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); |
5991 | |
5992 | CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); |
5993 | |
5994 | |
5995 | |
5996 | |
5997 | |
5998 | |
5999 | |
6000 | CGF.EmitBlock(InRegBlock); |
6001 | |
6002 | llvm::Value *reg_top = nullptr; |
6003 | Address reg_top_p = |
6004 | CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); |
6005 | reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); |
6006 | Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs), |
6007 | CharUnits::fromQuantity(IsFPR ? 16 : 8)); |
6008 | Address RegAddr = Address::invalid(); |
6009 | llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); |
6010 | |
6011 | if (IsIndirect) { |
6012 | |
6013 | |
6014 | MemTy = llvm::PointerType::getUnqual(MemTy); |
6015 | } |
6016 | |
6017 | const Type *Base = nullptr; |
6018 | uint64_t NumMembers = 0; |
6019 | bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); |
6020 | if (IsHFA && NumMembers > 1) { |
6021 | |
6022 | |
6023 | |
6024 | |
6025 | assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); |
6026 | auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); |
6027 | llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); |
6028 | llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); |
6029 | Address Tmp = CGF.CreateTempAlloca(HFATy, |
6030 | std::max(TyAlign, BaseTyInfo.Align)); |
6031 | |
6032 | |
6033 | int Offset = 0; |
6034 | if (CGF.CGM.getDataLayout().isBigEndian() && |
6035 | BaseTyInfo.Width.getQuantity() < 16) |
6036 | Offset = 16 - BaseTyInfo.Width.getQuantity(); |
6037 | |
6038 | for (unsigned i = 0; i < NumMembers; ++i) { |
6039 | CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); |
6040 | Address LoadAddr = |
6041 | CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); |
6042 | LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); |
6043 | |
6044 | Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); |
6045 | |
6046 | llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); |
6047 | CGF.Builder.CreateStore(Elem, StoreAddr); |
6048 | } |
6049 | |
6050 | RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); |
6051 | } else { |
6052 | |
6053 | |
6054 | |
6055 | CharUnits SlotSize = BaseAddr.getAlignment(); |
6056 | if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && |
6057 | (IsHFA || !isAggregateTypeForABI(Ty)) && |
6058 | TySize < SlotSize) { |
6059 | CharUnits Offset = SlotSize - TySize; |
6060 | BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); |
6061 | } |
6062 | |
6063 | RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); |
6064 | } |
6065 | |
6066 | CGF.EmitBranch(ContBlock); |
6067 | |
6068 | |
6069 | |
6070 | |
6071 | CGF.EmitBlock(OnStackBlock); |
6072 | |
6073 | Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); |
6074 | llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); |
6075 | |
6076 | |
6077 | |
6078 | if (!IsIndirect && TyAlign.getQuantity() > 8) { |
6079 | int Align = TyAlign.getQuantity(); |
6080 | |
6081 | OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); |
6082 | |
6083 | OnStackPtr = CGF.Builder.CreateAdd( |
6084 | OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), |
6085 | "align_stack"); |
6086 | OnStackPtr = CGF.Builder.CreateAnd( |
6087 | OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), |
6088 | "align_stack"); |
6089 | |
6090 | OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); |
6091 | } |
6092 | Address OnStackAddr(OnStackPtr, |
6093 | std::max(CharUnits::fromQuantity(8), TyAlign)); |
6094 | |
6095 | |
6096 | CharUnits StackSlotSize = CharUnits::fromQuantity(8); |
6097 | CharUnits StackSize; |
6098 | if (IsIndirect) |
6099 | StackSize = StackSlotSize; |
6100 | else |
6101 | StackSize = TySize.alignTo(StackSlotSize); |
6102 | |
6103 | llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); |
6104 | llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP( |
6105 | CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack"); |
6106 | |
6107 | |
6108 | CGF.Builder.CreateStore(NewStack, stack_p); |
6109 | |
6110 | if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && |
6111 | TySize < StackSlotSize) { |
6112 | CharUnits Offset = StackSlotSize - TySize; |
6113 | OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); |
6114 | } |
6115 | |
6116 | OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); |
6117 | |
6118 | CGF.EmitBranch(ContBlock); |
6119 | |
6120 | |
6121 | |
6122 | |
6123 | CGF.EmitBlock(ContBlock); |
6124 | |
6125 | Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, |
6126 | On |