Bug Summary

File:src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/lib/Sema/SemaChecking.cpp
Warning:line 16425, column 15
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SemaChecking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangSema/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangSema/obj/../include/clang/Sema -I /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangSema/../include -I /usr/src/gnu/usr.bin/clang/libclangSema/obj -I /usr/src/gnu/usr.bin/clang/libclangSema/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangSema/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/lib/Sema/SemaChecking.cpp

/usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/lib/Sema/SemaChecking.cpp

<
1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements extra semantic analysis beyond what is enforced
10// by the C type system.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/APValue.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/AttrIterator.h"
18#include "clang/AST/CharUnits.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclBase.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/DeclarationName.h"
24#include "clang/AST/EvaluatedExprVisitor.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
28#include "clang/AST/ExprOpenMP.h"
29#include "clang/AST/FormatString.h"
30#include "clang/AST/NSAPI.h"
31#include "clang/AST/NonTrivialTypeVisitor.h"
32#include "clang/AST/OperationKinds.h"
33#include "clang/AST/RecordLayout.h"
34#include "clang/AST/Stmt.h"
35#include "clang/AST/TemplateBase.h"
36#include "clang/AST/Type.h"
37#include "clang/AST/TypeLoc.h"
38#include "clang/AST/UnresolvedSet.h"
39#include "clang/Basic/AddressSpaces.h"
40#include "clang/Basic/CharInfo.h"
41#include "clang/Basic/Diagnostic.h"
42#include "clang/Basic/IdentifierTable.h"
43#include "clang/Basic/LLVM.h"
44#include "clang/Basic/LangOptions.h"
45#include "clang/Basic/OpenCLOptions.h"
46#include "clang/Basic/OperatorKinds.h"
47#include "clang/Basic/PartialDiagnostic.h"
48#include "clang/Basic/SourceLocation.h"
49#include "clang/Basic/SourceManager.h"
50#include "clang/Basic/Specifiers.h"
51#include "clang/Basic/SyncScope.h"
52#include "clang/Basic/TargetBuiltins.h"
53#include "clang/Basic/TargetCXXABI.h"
54#include "clang/Basic/TargetInfo.h"
55#include "clang/Basic/TypeTraits.h"
56#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
57#include "clang/Sema/Initialization.h"
58#include "clang/Sema/Lookup.h"
59#include "clang/Sema/Ownership.h"
60#include "clang/Sema/Scope.h"
61#include "clang/Sema/ScopeInfo.h"
62#include "clang/Sema/Sema.h"
63#include "clang/Sema/SemaInternal.h"
64#include "llvm/ADT/APFloat.h"
65#include "llvm/ADT/APInt.h"
66#include "llvm/ADT/APSInt.h"
67#include "llvm/ADT/ArrayRef.h"
68#include "llvm/ADT/DenseMap.h"
69#include "llvm/ADT/FoldingSet.h"
70#include "llvm/ADT/None.h"
71#include "llvm/ADT/Optional.h"
72#include "llvm/ADT/STLExtras.h"
73#include "llvm/ADT/SmallBitVector.h"
74#include "llvm/ADT/SmallPtrSet.h"
75#include "llvm/ADT/SmallString.h"
76#include "llvm/ADT/SmallVector.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/StringSet.h"
79#include "llvm/ADT/StringSwitch.h"
80#include "llvm/ADT/Triple.h"
81#include "llvm/Support/AtomicOrdering.h"
82#include "llvm/Support/Casting.h"
83#include "llvm/Support/Compiler.h"
84#include "llvm/Support/ConvertUTF.h"
85#include "llvm/Support/ErrorHandling.h"
86#include "llvm/Support/Format.h"
87#include "llvm/Support/Locale.h"
88#include "llvm/Support/MathExtras.h"
89#include "llvm/Support/SaveAndRestore.h"
90#include "llvm/Support/raw_ostream.h"
91#include <algorithm>
92#include <bitset>
93#include <cassert>
94#include <cctype>
95#include <cstddef>
96#include <cstdint>
97#include <functional>
98#include <limits>
99#include <string>
100#include <tuple>
101#include <utility>
102
103using namespace clang;
104using namespace sema;
105
106SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
107 unsigned ByteNo) const {
108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
109 Context.getTargetInfo());
110}
111
112/// Checks that a call expression's argument count is the desired number.
113/// This is useful when doing custom type-checking. Returns true on error.
114static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
115 unsigned argCount = call->getNumArgs();
116 if (argCount == desiredArgCount) return false;
117
118 if (argCount < desiredArgCount)
119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
120 << 0 /*function call*/ << desiredArgCount << argCount
121 << call->getSourceRange();
122
123 // Highlight all the excess arguments.
124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
125 call->getArg(argCount - 1)->getEndLoc());
126
127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
128 << 0 /*function call*/ << desiredArgCount << argCount
129 << call->getArg(1)->getSourceRange();
130}
131
132/// Check that the first argument to __builtin_annotation is an integer
133/// and the second argument is a non-wide string literal.
134static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
135 if (checkArgCount(S, TheCall, 2))
136 return true;
137
138 // First argument should be an integer.
139 Expr *ValArg = TheCall->getArg(0);
140 QualType Ty = ValArg->getType();
141 if (!Ty->isIntegerType()) {
142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
143 << ValArg->getSourceRange();
144 return true;
145 }
146
147 // Second argument should be a constant string.
148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
150 if (!Literal || !Literal->isAscii()) {
151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
152 << StrArg->getSourceRange();
153 return true;
154 }
155
156 TheCall->setType(Ty);
157 return false;
158}
159
160static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
161 // We need at least one argument.
162 if (TheCall->getNumArgs() < 1) {
163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
164 << 0 << 1 << TheCall->getNumArgs()
165 << TheCall->getCallee()->getSourceRange();
166 return true;
167 }
168
169 // All arguments should be wide string literals.
170 for (Expr *Arg : TheCall->arguments()) {
171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
172 if (!Literal || !Literal->isWide()) {
173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
174 << Arg->getSourceRange();
175 return true;
176 }
177 }
178
179 return false;
180}
181
182/// Check that the argument to __builtin_addressof is a glvalue, and set the
183/// result type to the corresponding pointer type.
184static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
185 if (checkArgCount(S, TheCall, 1))
186 return true;
187
188 ExprResult Arg(TheCall->getArg(0));
189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
190 if (ResultType.isNull())
191 return true;
192
193 TheCall->setArg(0, Arg.get());
194 TheCall->setType(ResultType);
195 return false;
196}
197
198/// Check the number of arguments and set the result type to
199/// the argument type.
200static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
201 if (checkArgCount(S, TheCall, 1))
202 return true;
203
204 TheCall->setType(TheCall->getArg(0)->getType());
205 return false;
206}
207
208/// Check that the value argument for __builtin_is_aligned(value, alignment) and
209/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
210/// type (but not a function pointer) and that the alignment is a power-of-two.
211static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
212 if (checkArgCount(S, TheCall, 2))
213 return true;
214
215 clang::Expr *Source = TheCall->getArg(0);
216 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
217
218 auto IsValidIntegerType = [](QualType Ty) {
219 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
220 };
221 QualType SrcTy = Source->getType();
222 // We should also be able to use it with arrays (but not functions!).
223 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
224 SrcTy = S.Context.getDecayedType(SrcTy);
225 }
226 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
227 SrcTy->isFunctionPointerType()) {
228 // FIXME: this is not quite the right error message since we don't allow
229 // floating point types, or member pointers.
230 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
231 << SrcTy;
232 return true;
233 }
234
235 clang::Expr *AlignOp = TheCall->getArg(1);
236 if (!IsValidIntegerType(AlignOp->getType())) {
237 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
238 << AlignOp->getType();
239 return true;
240 }
241 Expr::EvalResult AlignResult;
242 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
243 // We can't check validity of alignment if it is value dependent.
244 if (!AlignOp->isValueDependent() &&
245 AlignOp->EvaluateAsInt(AlignResult, S.Context,
246 Expr::SE_AllowSideEffects)) {
247 llvm::APSInt AlignValue = AlignResult.Val.getInt();
248 llvm::APSInt MaxValue(
249 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
250 if (AlignValue < 1) {
251 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
252 return true;
253 }
254 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
255 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
256 << toString(MaxValue, 10);
257 return true;
258 }
259 if (!AlignValue.isPowerOf2()) {
260 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
261 return true;
262 }
263 if (AlignValue == 1) {
264 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
265 << IsBooleanAlignBuiltin;
266 }
267 }
268
269 ExprResult SrcArg = S.PerformCopyInitialization(
270 InitializedEntity::InitializeParameter(S.Context, SrcTy, false),
271 SourceLocation(), Source);
272 if (SrcArg.isInvalid())
273 return true;
274 TheCall->setArg(0, SrcArg.get());
275 ExprResult AlignArg =
276 S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
277 S.Context, AlignOp->getType(), false),
278 SourceLocation(), AlignOp);
279 if (AlignArg.isInvalid())
280 return true;
281 TheCall->setArg(1, AlignArg.get());
282 // For align_up/align_down, the return type is the same as the (potentially
283 // decayed) argument type including qualifiers. For is_aligned(), the result
284 // is always bool.
285 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
286 return false;
287}
288
289static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
290 unsigned BuiltinID) {
291 if (checkArgCount(S, TheCall, 3))
292 return true;
293
294 // First two arguments should be integers.
295 for (unsigned I = 0; I < 2; ++I) {
296 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I));
297 if (Arg.isInvalid()) return true;
298 TheCall->setArg(I, Arg.get());
299
300 QualType Ty = Arg.get()->getType();
301 if (!Ty->isIntegerType()) {
302 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
303 << Ty << Arg.get()->getSourceRange();
304 return true;
305 }
306 }
307
308 // Third argument should be a pointer to a non-const integer.
309 // IRGen correctly handles volatile, restrict, and address spaces, and
310 // the other qualifiers aren't possible.
311 {
312 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2));
313 if (Arg.isInvalid()) return true;
314 TheCall->setArg(2, Arg.get());
315
316 QualType Ty = Arg.get()->getType();
317 const auto *PtrTy = Ty->getAs<PointerType>();
318 if (!PtrTy ||
319 !PtrTy->getPointeeType()->isIntegerType() ||
320 PtrTy->getPointeeType().isConstQualified()) {
321 S.Diag(Arg.get()->getBeginLoc(),
322 diag::err_overflow_builtin_must_be_ptr_int)
323 << Ty << Arg.get()->getSourceRange();
324 return true;
325 }
326 }
327
328 // Disallow signed ExtIntType args larger than 128 bits to mul function until
329 // we improve backend support.
330 if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
331 for (unsigned I = 0; I < 3; ++I) {
332 const auto Arg = TheCall->getArg(I);
333 // Third argument will be a pointer.
334 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
335 if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
336 S.getASTContext().getIntWidth(Ty) > 128)
337 return S.Diag(Arg->getBeginLoc(),
338 diag::err_overflow_builtin_ext_int_max_size)
339 << 128;
340 }
341 }
342
343 return false;
344}
345
346static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
347 if (checkArgCount(S, BuiltinCall, 2))
348 return true;
349
350 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
351 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
352 Expr *Call = BuiltinCall->getArg(0);
353 Expr *Chain = BuiltinCall->getArg(1);
354
355 if (Call->getStmtClass() != Stmt::CallExprClass) {
356 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
357 << Call->getSourceRange();
358 return true;
359 }
360
361 auto CE = cast<CallExpr>(Call);
362 if (CE->getCallee()->getType()->isBlockPointerType()) {
363 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
364 << Call->getSourceRange();
365 return true;
366 }
367
368 const Decl *TargetDecl = CE->getCalleeDecl();
369 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
370 if (FD->getBuiltinID()) {
371 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
372 << Call->getSourceRange();
373 return true;
374 }
375
376 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
377 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
378 << Call->getSourceRange();
379 return true;
380 }
381
382 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
383 if (ChainResult.isInvalid())
384 return true;
385 if (!ChainResult.get()->getType()->isPointerType()) {
386 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
387 << Chain->getSourceRange();
388 return true;
389 }
390
391 QualType ReturnTy = CE->getCallReturnType(S.Context);
392 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
393 QualType BuiltinTy = S.Context.getFunctionType(
394 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
395 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
396
397 Builtin =
398 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
399
400 BuiltinCall->setType(CE->getType());
401 BuiltinCall->setValueKind(CE->getValueKind());
402 BuiltinCall->setObjectKind(CE->getObjectKind());
403 BuiltinCall->setCallee(Builtin);
404 BuiltinCall->setArg(1, ChainResult.get());
405
406 return false;
407}
408
409namespace {
410
411class EstimateSizeFormatHandler
412 : public analyze_format_string::FormatStringHandler {
413 size_t Size;
414
415public:
416 EstimateSizeFormatHandler(StringRef Format)
417 : Size(std::min(Format.find(0), Format.size()) +
418 1 /* null byte always written by sprintf */) {}
419
420 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
421 const char *, unsigned SpecifierLen) override {
422
423 const size_t FieldWidth = computeFieldWidth(FS);
424 const size_t Precision = computePrecision(FS);
425
426 // The actual format.
427 switch (FS.getConversionSpecifier().getKind()) {
428 // Just a char.
429 case analyze_format_string::ConversionSpecifier::cArg:
430 case analyze_format_string::ConversionSpecifier::CArg:
431 Size += std::max(FieldWidth, (size_t)1);
432 break;
433 // Just an integer.
434 case analyze_format_string::ConversionSpecifier::dArg:
435 case analyze_format_string::ConversionSpecifier::DArg:
436 case analyze_format_string::ConversionSpecifier::iArg:
437 case analyze_format_string::ConversionSpecifier::oArg:
438 case analyze_format_string::ConversionSpecifier::OArg:
439 case analyze_format_string::ConversionSpecifier::uArg:
440 case analyze_format_string::ConversionSpecifier::UArg:
441 case analyze_format_string::ConversionSpecifier::xArg:
442 case analyze_format_string::ConversionSpecifier::XArg:
443 Size += std::max(FieldWidth, Precision);
444 break;
445
446 // %g style conversion switches between %f or %e style dynamically.
447 // %f always takes less space, so default to it.
448 case analyze_format_string::ConversionSpecifier::gArg:
449 case analyze_format_string::ConversionSpecifier::GArg:
450
451 // Floating point number in the form '[+]ddd.ddd'.
452 case analyze_format_string::ConversionSpecifier::fArg:
453 case analyze_format_string::ConversionSpecifier::FArg:
454 Size += std::max(FieldWidth, 1 /* integer part */ +
455 (Precision ? 1 + Precision
456 : 0) /* period + decimal */);
457 break;
458
459 // Floating point number in the form '[-]d.ddde[+-]dd'.
460 case analyze_format_string::ConversionSpecifier::eArg:
461 case analyze_format_string::ConversionSpecifier::EArg:
462 Size +=
463 std::max(FieldWidth,
464 1 /* integer part */ +
465 (Precision ? 1 + Precision : 0) /* period + decimal */ +
466 1 /* e or E letter */ + 2 /* exponent */);
467 break;
468
469 // Floating point number in the form '[-]0xh.hhhhp±dd'.
470 case analyze_format_string::ConversionSpecifier::aArg:
471 case analyze_format_string::ConversionSpecifier::AArg:
472 Size +=
473 std::max(FieldWidth,
474 2 /* 0x */ + 1 /* integer part */ +
475 (Precision ? 1 + Precision : 0) /* period + decimal */ +
476 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
477 break;
478
479 // Just a string.
480 case analyze_format_string::ConversionSpecifier::sArg:
481 case analyze_format_string::ConversionSpecifier::SArg:
482 Size += FieldWidth;
483 break;
484
485 // Just a pointer in the form '0xddd'.
486 case analyze_format_string::ConversionSpecifier::pArg:
487 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
488 break;
489
490 // A plain percent.
491 case analyze_format_string::ConversionSpecifier::PercentArg:
492 Size += 1;
493 break;
494
495 default:
496 break;
497 }
498
499 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
500
501 if (FS.hasAlternativeForm()) {
502 switch (FS.getConversionSpecifier().getKind()) {
503 default:
504 break;
505 // Force a leading '0'.
506 case analyze_format_string::ConversionSpecifier::oArg:
507 Size += 1;
508 break;
509 // Force a leading '0x'.
510 case analyze_format_string::ConversionSpecifier::xArg:
511 case analyze_format_string::ConversionSpecifier::XArg:
512 Size += 2;
513 break;
514 // Force a period '.' before decimal, even if precision is 0.
515 case analyze_format_string::ConversionSpecifier::aArg:
516 case analyze_format_string::ConversionSpecifier::AArg:
517 case analyze_format_string::ConversionSpecifier::eArg:
518 case analyze_format_string::ConversionSpecifier::EArg:
519 case analyze_format_string::ConversionSpecifier::fArg:
520 case analyze_format_string::ConversionSpecifier::FArg:
521 case analyze_format_string::ConversionSpecifier::gArg:
522 case analyze_format_string::ConversionSpecifier::GArg:
523 Size += (Precision ? 0 : 1);
524 break;
525 }
526 }
527 assert(SpecifierLen <= Size && "no underflow")((void)0);
528 Size -= SpecifierLen;
529 return true;
530 }
531
532 size_t getSizeLowerBound() const { return Size; }
533
534private:
535 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
536 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
537 size_t FieldWidth = 0;
538 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
539 FieldWidth = FW.getConstantAmount();
540 return FieldWidth;
541 }
542
543 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
544 const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
545 size_t Precision = 0;
546
547 // See man 3 printf for default precision value based on the specifier.
548 switch (FW.getHowSpecified()) {
549 case analyze_format_string::OptionalAmount::NotSpecified:
550 switch (FS.getConversionSpecifier().getKind()) {
551 default:
552 break;
553 case analyze_format_string::ConversionSpecifier::dArg: // %d
554 case analyze_format_string::ConversionSpecifier::DArg: // %D
555 case analyze_format_string::ConversionSpecifier::iArg: // %i
556 Precision = 1;
557 break;
558 case analyze_format_string::ConversionSpecifier::oArg: // %d
559 case analyze_format_string::ConversionSpecifier::OArg: // %D
560 case analyze_format_string::ConversionSpecifier::uArg: // %d
561 case analyze_format_string::ConversionSpecifier::UArg: // %D
562 case analyze_format_string::ConversionSpecifier::xArg: // %d
563 case analyze_format_string::ConversionSpecifier::XArg: // %D
564 Precision = 1;
565 break;
566 case analyze_format_string::ConversionSpecifier::fArg: // %f
567 case analyze_format_string::ConversionSpecifier::FArg: // %F
568 case analyze_format_string::ConversionSpecifier::eArg: // %e
569 case analyze_format_string::ConversionSpecifier::EArg: // %E
570 case analyze_format_string::ConversionSpecifier::gArg: // %g
571 case analyze_format_string::ConversionSpecifier::GArg: // %G
572 Precision = 6;
573 break;
574 case analyze_format_string::ConversionSpecifier::pArg: // %d
575 Precision = 1;
576 break;
577 }
578 break;
579 case analyze_format_string::OptionalAmount::Constant:
580 Precision = FW.getConstantAmount();
581 break;
582 default:
583 break;
584 }
585 return Precision;
586 }
587};
588
589} // namespace
590
591/// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
592/// __builtin_*_chk function, then use the object size argument specified in the
593/// source. Otherwise, infer the object size using __builtin_object_size.
594void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
595 CallExpr *TheCall) {
596 // FIXME: There are some more useful checks we could be doing here:
597 // - Evaluate strlen of strcpy arguments, use as object size.
598
599 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
600 isConstantEvaluated())
601 return;
602
603 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
604 if (!BuiltinID)
605 return;
606
607 const TargetInfo &TI = getASTContext().getTargetInfo();
608 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
609
610 unsigned DiagID = 0;
611 bool IsChkVariant = false;
612 Optional<llvm::APSInt> UsedSize;
613 unsigned SizeIndex, ObjectIndex;
614 switch (BuiltinID) {
615 default:
616 return;
617 case Builtin::BIsprintf:
618 case Builtin::BI__builtin___sprintf_chk: {
619 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
620 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
621
622 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
623
624 if (!Format->isAscii() && !Format->isUTF8())
625 return;
626
627 StringRef FormatStrRef = Format->getString();
628 EstimateSizeFormatHandler H(FormatStrRef);
629 const char *FormatBytes = FormatStrRef.data();
630 const ConstantArrayType *T =
631 Context.getAsConstantArrayType(Format->getType());
632 assert(T && "String literal not of constant array type!")((void)0);
633 size_t TypeSize = T->getSize().getZExtValue();
634
635 // In case there's a null byte somewhere.
636 size_t StrLen =
637 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
638 if (!analyze_format_string::ParsePrintfString(
639 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
640 Context.getTargetInfo(), false)) {
641 DiagID = diag::warn_fortify_source_format_overflow;
642 UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
643 .extOrTrunc(SizeTypeWidth);
644 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
645 IsChkVariant = true;
646 ObjectIndex = 2;
647 } else {
648 IsChkVariant = false;
649 ObjectIndex = 0;
650 }
651 break;
652 }
653 }
654 return;
655 }
656 case Builtin::BI__builtin___memcpy_chk:
657 case Builtin::BI__builtin___memmove_chk:
658 case Builtin::BI__builtin___memset_chk:
659 case Builtin::BI__builtin___strlcat_chk:
660 case Builtin::BI__builtin___strlcpy_chk:
661 case Builtin::BI__builtin___strncat_chk:
662 case Builtin::BI__builtin___strncpy_chk:
663 case Builtin::BI__builtin___stpncpy_chk:
664 case Builtin::BI__builtin___memccpy_chk:
665 case Builtin::BI__builtin___mempcpy_chk: {
666 DiagID = diag::warn_builtin_chk_overflow;
667 IsChkVariant = true;
668 SizeIndex = TheCall->getNumArgs() - 2;
669 ObjectIndex = TheCall->getNumArgs() - 1;
670 break;
671 }
672
673 case Builtin::BI__builtin___snprintf_chk:
674 case Builtin::BI__builtin___vsnprintf_chk: {
675 DiagID = diag::warn_builtin_chk_overflow;
676 IsChkVariant = true;
677 SizeIndex = 1;
678 ObjectIndex = 3;
679 break;
680 }
681
682 case Builtin::BIstrncat:
683 case Builtin::BI__builtin_strncat:
684 case Builtin::BIstrncpy:
685 case Builtin::BI__builtin_strncpy:
686 case Builtin::BIstpncpy:
687 case Builtin::BI__builtin_stpncpy: {
688 // Whether these functions overflow depends on the runtime strlen of the
689 // string, not just the buffer size, so emitting the "always overflow"
690 // diagnostic isn't quite right. We should still diagnose passing a buffer
691 // size larger than the destination buffer though; this is a runtime abort
692 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
693 DiagID = diag::warn_fortify_source_size_mismatch;
694 SizeIndex = TheCall->getNumArgs() - 1;
695 ObjectIndex = 0;
696 break;
697 }
698
699 case Builtin::BImemcpy:
700 case Builtin::BI__builtin_memcpy:
701 case Builtin::BImemmove:
702 case Builtin::BI__builtin_memmove:
703 case Builtin::BImemset:
704 case Builtin::BI__builtin_memset:
705 case Builtin::BImempcpy:
706 case Builtin::BI__builtin_mempcpy: {
707 DiagID = diag::warn_fortify_source_overflow;
708 SizeIndex = TheCall->getNumArgs() - 1;
709 ObjectIndex = 0;
710 break;
711 }
712 case Builtin::BIsnprintf:
713 case Builtin::BI__builtin_snprintf:
714 case Builtin::BIvsnprintf:
715 case Builtin::BI__builtin_vsnprintf: {
716 DiagID = diag::warn_fortify_source_size_mismatch;
717 SizeIndex = 1;
718 ObjectIndex = 0;
719 break;
720 }
721 }
722
723 llvm::APSInt ObjectSize;
724 // For __builtin___*_chk, the object size is explicitly provided by the caller
725 // (usually using __builtin_object_size). Use that value to check this call.
726 if (IsChkVariant) {
727 Expr::EvalResult Result;
728 Expr *SizeArg = TheCall->getArg(ObjectIndex);
729 if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
730 return;
731 ObjectSize = Result.Val.getInt();
732
733 // Otherwise, try to evaluate an imaginary call to __builtin_object_size.
734 } else {
735 // If the parameter has a pass_object_size attribute, then we should use its
736 // (potentially) more strict checking mode. Otherwise, conservatively assume
737 // type 0.
738 int BOSType = 0;
739 if (const auto *POS =
740 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>())
741 BOSType = POS->getType();
742
743 Expr *ObjArg = TheCall->getArg(ObjectIndex);
744 uint64_t Result;
745 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
746 return;
747 // Get the object size in the target's size_t width.
748 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
749 }
750
751 // Evaluate the number of bytes of the object that this call will use.
752 if (!UsedSize) {
753 Expr::EvalResult Result;
754 Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
755 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
756 return;
757 UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth);
758 }
759
760 if (UsedSize.getValue().ule(ObjectSize))
761 return;
762
763 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
764 // Skim off the details of whichever builtin was called to produce a better
765 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly.
766 if (IsChkVariant) {
767 FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
768 FunctionName = FunctionName.drop_back(std::strlen("_chk"));
769 } else if (FunctionName.startswith("__builtin_")) {
770 FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
771 }
772
773 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
774 PDiag(DiagID)
775 << FunctionName << toString(ObjectSize, /*Radix=*/10)
776 << toString(UsedSize.getValue(), /*Radix=*/10));
777}
778
779static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
780 Scope::ScopeFlags NeededScopeFlags,
781 unsigned DiagID) {
782 // Scopes aren't available during instantiation. Fortunately, builtin
783 // functions cannot be template args so they cannot be formed through template
784 // instantiation. Therefore checking once during the parse is sufficient.
785 if (SemaRef.inTemplateInstantiation())
786 return false;
787
788 Scope *S = SemaRef.getCurScope();
789 while (S && !S->isSEHExceptScope())
790 S = S->getParent();
791 if (!S || !(S->getFlags() & NeededScopeFlags)) {
792 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
793 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
794 << DRE->getDecl()->getIdentifier();
795 return true;
796 }
797
798 return false;
799}
800
801static inline bool isBlockPointer(Expr *Arg) {
802 return Arg->getType()->isBlockPointerType();
803}
804
805/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
806/// void*, which is a requirement of device side enqueue.
807static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
808 const BlockPointerType *BPT =
809 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
810 ArrayRef<QualType> Params =
811 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
812 unsigned ArgCounter = 0;
813 bool IllegalParams = false;
814 // Iterate through the block parameters until either one is found that is not
815 // a local void*, or the block is valid.
816 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
817 I != E; ++I, ++ArgCounter) {
818 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
819 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
820 LangAS::opencl_local) {
821 // Get the location of the error. If a block literal has been passed
822 // (BlockExpr) then we can point straight to the offending argument,
823 // else we just point to the variable reference.
824 SourceLocation ErrorLoc;
825 if (isa<BlockExpr>(BlockArg)) {
826 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
827 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
828 } else if (isa<DeclRefExpr>(BlockArg)) {
829 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
830 }
831 S.Diag(ErrorLoc,
832 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
833 IllegalParams = true;
834 }
835 }
836
837 return IllegalParams;
838}
839
840static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
841 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) {
842 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
843 << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
844 return true;
845 }
846 return false;
847}
848
849static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
850 if (checkArgCount(S, TheCall, 2))
851 return true;
852
853 if (checkOpenCLSubgroupExt(S, TheCall))
854 return true;
855
856 // First argument is an ndrange_t type.
857 Expr *NDRangeArg = TheCall->getArg(0);
858 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
859 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
860 << TheCall->getDirectCallee() << "'ndrange_t'";
861 return true;
862 }
863
864 Expr *BlockArg = TheCall->getArg(1);
865 if (!isBlockPointer(BlockArg)) {
866 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
867 << TheCall->getDirectCallee() << "block";
868 return true;
869 }
870 return checkOpenCLBlockArgs(S, BlockArg);
871}
872
873/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
874/// get_kernel_work_group_size
875/// and get_kernel_preferred_work_group_size_multiple builtin functions.
876static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
877 if (checkArgCount(S, TheCall, 1))
878 return true;
879
880 Expr *BlockArg = TheCall->getArg(0);
881 if (!isBlockPointer(BlockArg)) {
882 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
883 << TheCall->getDirectCallee() << "block";
884 return true;
885 }
886 return checkOpenCLBlockArgs(S, BlockArg);
887}
888
889/// Diagnose integer type and any valid implicit conversion to it.
890static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
891 const QualType &IntType);
892
893static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
894 unsigned Start, unsigned End) {
895 bool IllegalParams = false;
896 for (unsigned I = Start; I <= End; ++I)
897 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
898 S.Context.getSizeType());
899 return IllegalParams;
900}
901
902/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
903/// 'local void*' parameter of passed block.
904static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
905 Expr *BlockArg,
906 unsigned NumNonVarArgs) {
907 const BlockPointerType *BPT =
908 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
909 unsigned NumBlockParams =
910 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
911 unsigned TotalNumArgs = TheCall->getNumArgs();
912
913 // For each argument passed to the block, a corresponding uint needs to
914 // be passed to describe the size of the local memory.
915 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
916 S.Diag(TheCall->getBeginLoc(),
917 diag::err_opencl_enqueue_kernel_local_size_args);
918 return true;
919 }
920
921 // Check that the sizes of the local memory are specified by integers.
922 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
923 TotalNumArgs - 1);
924}
925
926/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
927/// overload formats specified in Table 6.13.17.1.
928/// int enqueue_kernel(queue_t queue,
929/// kernel_enqueue_flags_t flags,
930/// const ndrange_t ndrange,
931/// void (^block)(void))
932/// int enqueue_kernel(queue_t queue,
933/// kernel_enqueue_flags_t flags,
934/// const ndrange_t ndrange,
935/// uint num_events_in_wait_list,
936/// clk_event_t *event_wait_list,
937/// clk_event_t *event_ret,
938/// void (^block)(void))
939/// int enqueue_kernel(queue_t queue,
940/// kernel_enqueue_flags_t flags,
941/// const ndrange_t ndrange,
942/// void (^block)(local void*, ...),
943/// uint size0, ...)
944/// int enqueue_kernel(queue_t queue,
945/// kernel_enqueue_flags_t flags,
946/// const ndrange_t ndrange,
947/// uint num_events_in_wait_list,
948/// clk_event_t *event_wait_list,
949/// clk_event_t *event_ret,
950/// void (^block)(local void*, ...),
951/// uint size0, ...)
952static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
953 unsigned NumArgs = TheCall->getNumArgs();
954
955 if (NumArgs < 4) {
956 S.Diag(TheCall->getBeginLoc(),
957 diag::err_typecheck_call_too_few_args_at_least)
958 << 0 << 4 << NumArgs;
959 return true;
960 }
961
962 Expr *Arg0 = TheCall->getArg(0);
963 Expr *Arg1 = TheCall->getArg(1);
964 Expr *Arg2 = TheCall->getArg(2);
965 Expr *Arg3 = TheCall->getArg(3);
966
967 // First argument always needs to be a queue_t type.
968 if (!Arg0->getType()->isQueueT()) {
969 S.Diag(TheCall->getArg(0)->getBeginLoc(),
970 diag::err_opencl_builtin_expected_type)
971 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
972 return true;
973 }
974
975 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
976 if (!Arg1->getType()->isIntegerType()) {
977 S.Diag(TheCall->getArg(1)->getBeginLoc(),
978 diag::err_opencl_builtin_expected_type)
979 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
980 return true;
981 }
982
983 // Third argument is always an ndrange_t type.
984 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
985 S.Diag(TheCall->getArg(2)->getBeginLoc(),
986 diag::err_opencl_builtin_expected_type)
987 << TheCall->getDirectCallee() << "'ndrange_t'";
988 return true;
989 }
990
991 // With four arguments, there is only one form that the function could be
992 // called in: no events and no variable arguments.
993 if (NumArgs == 4) {
994 // check that the last argument is the right block type.
995 if (!isBlockPointer(Arg3)) {
996 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
997 << TheCall->getDirectCallee() << "block";
998 return true;
999 }
1000 // we have a block type, check the prototype
1001 const BlockPointerType *BPT =
1002 cast<BlockPointerType>(Arg3->getType().getCanonicalType());
1003 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1004 S.Diag(Arg3->getBeginLoc(),
1005 diag::err_opencl_enqueue_kernel_blocks_no_args);
1006 return true;
1007 }
1008 return false;
1009 }
1010 // we can have block + varargs.
1011 if (isBlockPointer(Arg3))
1012 return (checkOpenCLBlockArgs(S, Arg3) ||
1013 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
1014 // last two cases with either exactly 7 args or 7 args and varargs.
1015 if (NumArgs >= 7) {
1016 // check common block argument.
1017 Expr *Arg6 = TheCall->getArg(6);
1018 if (!isBlockPointer(Arg6)) {
1019 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1020 << TheCall->getDirectCallee() << "block";
1021 return true;
1022 }
1023 if (checkOpenCLBlockArgs(S, Arg6))
1024 return true;
1025
1026 // Forth argument has to be any integer type.
1027 if (!Arg3->getType()->isIntegerType()) {
1028 S.Diag(TheCall->getArg(3)->getBeginLoc(),
1029 diag::err_opencl_builtin_expected_type)
1030 << TheCall->getDirectCallee() << "integer";
1031 return true;
1032 }
1033 // check remaining common arguments.
1034 Expr *Arg4 = TheCall->getArg(4);
1035 Expr *Arg5 = TheCall->getArg(5);
1036
1037 // Fifth argument is always passed as a pointer to clk_event_t.
1038 if (!Arg4->isNullPointerConstant(S.Context,
1039 Expr::NPC_ValueDependentIsNotNull) &&
1040 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
1041 S.Diag(TheCall->getArg(4)->getBeginLoc(),
1042 diag::err_opencl_builtin_expected_type)
1043 << TheCall->getDirectCallee()
1044 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1045 return true;
1046 }
1047
1048 // Sixth argument is always passed as a pointer to clk_event_t.
1049 if (!Arg5->isNullPointerConstant(S.Context,
1050 Expr::NPC_ValueDependentIsNotNull) &&
1051 !(Arg5->getType()->isPointerType() &&
1052 Arg5->getType()->getPointeeType()->isClkEventT())) {
1053 S.Diag(TheCall->getArg(5)->getBeginLoc(),
1054 diag::err_opencl_builtin_expected_type)
1055 << TheCall->getDirectCallee()
1056 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1057 return true;
1058 }
1059
1060 if (NumArgs == 7)
1061 return false;
1062
1063 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
1064 }
1065
1066 // None of the specific case has been detected, give generic error
1067 S.Diag(TheCall->getBeginLoc(),
1068 diag::err_opencl_enqueue_kernel_incorrect_args);
1069 return true;
1070}
1071
1072/// Returns OpenCL access qual.
1073static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1074 return D->getAttr<OpenCLAccessAttr>();
1075}
1076
1077/// Returns true if pipe element type is different from the pointer.
1078static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
1079 const Expr *Arg0 = Call->getArg(0);
1080 // First argument type should always be pipe.
1081 if (!Arg0->getType()->isPipeType()) {
1082 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1083 << Call->getDirectCallee() << Arg0->getSourceRange();
1084 return true;
1085 }
1086 OpenCLAccessAttr *AccessQual =
1087 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1088 // Validates the access qualifier is compatible with the call.
1089 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1090 // read_only and write_only, and assumed to be read_only if no qualifier is
1091 // specified.
1092 switch (Call->getDirectCallee()->getBuiltinID()) {
1093 case Builtin::BIread_pipe:
1094 case Builtin::BIreserve_read_pipe:
1095 case Builtin::BIcommit_read_pipe:
1096 case Builtin::BIwork_group_reserve_read_pipe:
1097 case Builtin::BIsub_group_reserve_read_pipe:
1098 case Builtin::BIwork_group_commit_read_pipe:
1099 case Builtin::BIsub_group_commit_read_pipe:
1100 if (!(!AccessQual || AccessQual->isReadOnly())) {
1101 S.Diag(Arg0->getBeginLoc(),
1102 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1103 << "read_only" << Arg0->getSourceRange();
1104 return true;
1105 }
1106 break;
1107 case Builtin::BIwrite_pipe:
1108 case Builtin::BIreserve_write_pipe:
1109 case Builtin::BIcommit_write_pipe:
1110 case Builtin::BIwork_group_reserve_write_pipe:
1111 case Builtin::BIsub_group_reserve_write_pipe:
1112 case Builtin::BIwork_group_commit_write_pipe:
1113 case Builtin::BIsub_group_commit_write_pipe:
1114 if (!(AccessQual && AccessQual->isWriteOnly())) {
1115 S.Diag(Arg0->getBeginLoc(),
1116 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1117 << "write_only" << Arg0->getSourceRange();
1118 return true;
1119 }
1120 break;
1121 default:
1122 break;
1123 }
1124 return false;
1125}
1126
1127/// Returns true if pipe element type is different from the pointer.
1128static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1129 const Expr *Arg0 = Call->getArg(0);
1130 const Expr *ArgIdx = Call->getArg(Idx);
1131 const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
1132 const QualType EltTy = PipeTy->getElementType();
1133 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1134 // The Idx argument should be a pointer and the type of the pointer and
1135 // the type of pipe element should also be the same.
1136 if (!ArgTy ||
1137 !S.Context.hasSameType(
1138 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1139 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1140 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1141 << ArgIdx->getType() << ArgIdx->getSourceRange();
1142 return true;
1143 }
1144 return false;
1145}
1146
1147// Performs semantic analysis for the read/write_pipe call.
1148// \param S Reference to the semantic analyzer.
1149// \param Call A pointer to the builtin call.
1150// \return True if a semantic error has been found, false otherwise.
1151static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
1152 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1153 // functions have two forms.
1154 switch (Call->getNumArgs()) {
1155 case 2:
1156 if (checkOpenCLPipeArg(S, Call))
1157 return true;
1158 // The call with 2 arguments should be
1159 // read/write_pipe(pipe T, T*).
1160 // Check packet type T.
1161 if (checkOpenCLPipePacketType(S, Call, 1))
1162 return true;
1163 break;
1164
1165 case 4: {
1166 if (checkOpenCLPipeArg(S, Call))
1167 return true;
1168 // The call with 4 arguments should be
1169 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1170 // Check reserve_id_t.
1171 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1172 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1173 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1174 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1175 return true;
1176 }
1177
1178 // Check the index.
1179 const Expr *Arg2 = Call->getArg(2);
1180 if (!Arg2->getType()->isIntegerType() &&
1181 !Arg2->getType()->isUnsignedIntegerType()) {
1182 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1183 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1184 << Arg2->getType() << Arg2->getSourceRange();
1185 return true;
1186 }
1187
1188 // Check packet type T.
1189 if (checkOpenCLPipePacketType(S, Call, 3))
1190 return true;
1191 } break;
1192 default:
1193 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1194 << Call->getDirectCallee() << Call->getSourceRange();
1195 return true;
1196 }
1197
1198 return false;
1199}
1200
1201// Performs a semantic analysis on the {work_group_/sub_group_
1202// /_}reserve_{read/write}_pipe
1203// \param S Reference to the semantic analyzer.
1204// \param Call The call to the builtin function to be analyzed.
1205// \return True if a semantic error was found, false otherwise.
1206static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
1207 if (checkArgCount(S, Call, 2))
1208 return true;
1209
1210 if (checkOpenCLPipeArg(S, Call))
1211 return true;
1212
1213 // Check the reserve size.
1214 if (!Call->getArg(1)->getType()->isIntegerType() &&
1215 !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
1216 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1217 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1218 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1219 return true;
1220 }
1221
1222 // Since return type of reserve_read/write_pipe built-in function is
1223 // reserve_id_t, which is not defined in the builtin def file , we used int
1224 // as return type and need to override the return type of these functions.
1225 Call->setType(S.Context.OCLReserveIDTy);
1226
1227 return false;
1228}
1229
1230// Performs a semantic analysis on {work_group_/sub_group_
1231// /_}commit_{read/write}_pipe
1232// \param S Reference to the semantic analyzer.
1233// \param Call The call to the builtin function to be analyzed.
1234// \return True if a semantic error was found, false otherwise.
1235static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1236 if (checkArgCount(S, Call, 2))
1237 return true;
1238
1239 if (checkOpenCLPipeArg(S, Call))
1240 return true;
1241
1242 // Check reserve_id_t.
1243 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1244 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1245 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1246 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1247 return true;
1248 }
1249
1250 return false;
1251}
1252
1253// Performs a semantic analysis on the call to built-in Pipe
1254// Query Functions.
1255// \param S Reference to the semantic analyzer.
1256// \param Call The call to the builtin function to be analyzed.
1257// \return True if a semantic error was found, false otherwise.
1258static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
1259 if (checkArgCount(S, Call, 1))
1260 return true;
1261
1262 if (!Call->getArg(0)->getType()->isPipeType()) {
1263 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1264 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1265 return true;
1266 }
1267
1268 return false;
1269}
1270
1271// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1272// Performs semantic analysis for the to_global/local/private call.
1273// \param S Reference to the semantic analyzer.
1274// \param BuiltinID ID of the builtin function.
1275// \param Call A pointer to the builtin call.
1276// \return True if a semantic error has been found, false otherwise.
1277static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
1278 CallExpr *Call) {
1279 if (checkArgCount(S, Call, 1))
1280 return true;
1281
1282 auto RT = Call->getArg(0)->getType();
1283 if (!RT->isPointerType() || RT->getPointeeType()
1284 .getAddressSpace() == LangAS::opencl_constant) {
1285 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1286 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1287 return true;
1288 }
1289
1290 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1291 S.Diag(Call->getArg(0)->getBeginLoc(),
1292 diag::warn_opencl_generic_address_space_arg)
1293 << Call->getDirectCallee()->getNameInfo().getAsString()
1294 << Call->getArg(0)->getSourceRange();
1295 }
1296
1297 RT = RT->getPointeeType();
1298 auto Qual = RT.getQualifiers();
1299 switch (BuiltinID) {
1300 case Builtin::BIto_global:
1301 Qual.setAddressSpace(LangAS::opencl_global);
1302 break;
1303 case Builtin::BIto_local:
1304 Qual.setAddressSpace(LangAS::opencl_local);
1305 break;
1306 case Builtin::BIto_private:
1307 Qual.setAddressSpace(LangAS::opencl_private);
1308 break;
1309 default:
1310 llvm_unreachable("Invalid builtin function")__builtin_unreachable();
1311 }
1312 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
1313 RT.getUnqualifiedType(), Qual)));
1314
1315 return false;
1316}
1317
1318static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
1319 if (checkArgCount(S, TheCall, 1))
1320 return ExprError();
1321
1322 // Compute __builtin_launder's parameter type from the argument.
1323 // The parameter type is:
1324 // * The type of the argument if it's not an array or function type,
1325 // Otherwise,
1326 // * The decayed argument type.
1327 QualType ParamTy = [&]() {
1328 QualType ArgTy = TheCall->getArg(0)->getType();
1329 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1330 return S.Context.getPointerType(Ty->getElementType());
1331 if (ArgTy->isFunctionType()) {
1332 return S.Context.getPointerType(ArgTy);
1333 }
1334 return ArgTy;
1335 }();
1336
1337 TheCall->setType(ParamTy);
1338
1339 auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
1340 if (!ParamTy->isPointerType())
1341 return 0;
1342 if (ParamTy->isFunctionPointerType())
1343 return 1;
1344 if (ParamTy->isVoidPointerType())
1345 return 2;
1346 return llvm::Optional<unsigned>{};
1347 }();
1348 if (DiagSelect.hasValue()) {
1349 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
1350 << DiagSelect.getValue() << TheCall->getSourceRange();
1351 return ExprError();
1352 }
1353
1354 // We either have an incomplete class type, or we have a class template
1355 // whose instantiation has not been forced. Example:
1356 //
1357 // template <class T> struct Foo { T value; };
1358 // Foo<int> *p = nullptr;
1359 // auto *d = __builtin_launder(p);
1360 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
1361 diag::err_incomplete_type))
1362 return ExprError();
1363
1364 assert(ParamTy->getPointeeType()->isObjectType() &&((void)0)
1365 "Unhandled non-object pointer case")((void)0);
1366
1367 InitializedEntity Entity =
1368 InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
1369 ExprResult Arg =
1370 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
1371 if (Arg.isInvalid())
1372 return ExprError();
1373 TheCall->setArg(0, Arg.get());
1374
1375 return TheCall;
1376}
1377
1378// Emit an error and return true if the current architecture is not in the list
1379// of supported architectures.
1380static bool
1381CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1382 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
1383 llvm::Triple::ArchType CurArch =
1384 S.getASTContext().getTargetInfo().getTriple().getArch();
1385 if (llvm::is_contained(SupportedArchs, CurArch))
1386 return false;
1387 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1388 << TheCall->getSourceRange();
1389 return true;
1390}
1391
1392static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
1393 SourceLocation CallSiteLoc);
1394
1395bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
1396 CallExpr *TheCall) {
1397 switch (TI.getTriple().getArch()) {
1398 default:
1399 // Some builtins don't require additional checking, so just consider these
1400 // acceptable.
1401 return false;
1402 case llvm::Triple::arm:
1403 case llvm::Triple::armeb:
1404 case llvm::Triple::thumb:
1405 case llvm::Triple::thumbeb:
1406 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
1407 case llvm::Triple::aarch64:
1408 case llvm::Triple::aarch64_32:
1409 case llvm::Triple::aarch64_be:
1410 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
1411 case llvm::Triple::bpfeb:
1412 case llvm::Triple::bpfel:
1413 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
1414 case llvm::Triple::hexagon:
1415 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
1416 case llvm::Triple::mips:
1417 case llvm::Triple::mipsel:
1418 case llvm::Triple::mips64:
1419 case llvm::Triple::mips64el:
1420 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
1421 case llvm::Triple::systemz:
1422 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
1423 case llvm::Triple::x86:
1424 case llvm::Triple::x86_64:
1425 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
1426 case llvm::Triple::ppc:
1427 case llvm::Triple::ppcle:
1428 case llvm::Triple::ppc64:
1429 case llvm::Triple::ppc64le:
1430 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
1431 case llvm::Triple::amdgcn:
1432 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
1433 case llvm::Triple::riscv32:
1434 case llvm::Triple::riscv64:
1435 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
1436 }
1437}
1438
1439ExprResult
1440Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
1441 CallExpr *TheCall) {
1442 ExprResult TheCallResult(TheCall);
1443
1444 // Find out if any arguments are required to be integer constant expressions.
1445 unsigned ICEArguments = 0;
1446 ASTContext::GetBuiltinTypeError Error;
1447 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
1448 if (Error != ASTContext::GE_None)
1449 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
1450
1451 // If any arguments are required to be ICE's, check and diagnose.
1452 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
1453 // Skip arguments not required to be ICE's.
1454 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
1455
1456 llvm::APSInt Result;
1457 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
1458 return true;
1459 ICEArguments &= ~(1 << ArgNo);
1460 }
1461
1462 switch (BuiltinID) {
1463 case Builtin::BI__builtin___CFStringMakeConstantString:
1464 assert(TheCall->getNumArgs() == 1 &&((void)0)
1465 "Wrong # arguments to builtin CFStringMakeConstantString")((void)0);
1466 if (CheckObjCString(TheCall->getArg(0)))
1467 return ExprError();
1468 break;
1469 case Builtin::BI__builtin_ms_va_start:
1470 case Builtin::BI__builtin_stdarg_start:
1471 case Builtin::BI__builtin_va_start:
1472 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1473 return ExprError();
1474 break;
1475 case Builtin::BI__va_start: {
1476 switch (Context.getTargetInfo().getTriple().getArch()) {
1477 case llvm::Triple::aarch64:
1478 case llvm::Triple::arm:
1479 case llvm::Triple::thumb:
1480 if (SemaBuiltinVAStartARMMicrosoft(TheCall))
1481 return ExprError();
1482 break;
1483 default:
1484 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1485 return ExprError();
1486 break;
1487 }
1488 break;
1489 }
1490
1491 // The acquire, release, and no fence variants are ARM and AArch64 only.
1492 case Builtin::BI_interlockedbittestandset_acq:
1493 case Builtin::BI_interlockedbittestandset_rel:
1494 case Builtin::BI_interlockedbittestandset_nf:
1495 case Builtin::BI_interlockedbittestandreset_acq:
1496 case Builtin::BI_interlockedbittestandreset_rel:
1497 case Builtin::BI_interlockedbittestandreset_nf:
1498 if (CheckBuiltinTargetSupport(
1499 *this, BuiltinID, TheCall,
1500 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
1501 return ExprError();
1502 break;
1503
1504 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
1505 case Builtin::BI_bittest64:
1506 case Builtin::BI_bittestandcomplement64:
1507 case Builtin::BI_bittestandreset64:
1508 case Builtin::BI_bittestandset64:
1509 case Builtin::BI_interlockedbittestandreset64:
1510 case Builtin::BI_interlockedbittestandset64:
1511 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
1512 {llvm::Triple::x86_64, llvm::Triple::arm,
1513 llvm::Triple::thumb, llvm::Triple::aarch64}))
1514 return ExprError();
1515 break;
1516
1517 case Builtin::BI__builtin_isgreater:
1518 case Builtin::BI__builtin_isgreaterequal:
1519 case Builtin::BI__builtin_isless:
1520 case Builtin::BI__builtin_islessequal:
1521 case Builtin::BI__builtin_islessgreater:
1522 case Builtin::BI__builtin_isunordered:
1523 if (SemaBuiltinUnorderedCompare(TheCall))
1524 return ExprError();
1525 break;
1526 case Builtin::BI__builtin_fpclassify:
1527 if (SemaBuiltinFPClassification(TheCall, 6))
1528 return ExprError();
1529 break;
1530 case Builtin::BI__builtin_isfinite:
1531 case Builtin::BI__builtin_isinf:
1532 case Builtin::BI__builtin_isinf_sign:
1533 case Builtin::BI__builtin_isnan:
1534 case Builtin::BI__builtin_isnormal:
1535 case Builtin::BI__builtin_signbit:
1536 case Builtin::BI__builtin_signbitf:
1537 case Builtin::BI__builtin_signbitl:
1538 if (SemaBuiltinFPClassification(TheCall, 1))
1539 return ExprError();
1540 break;
1541 case Builtin::BI__builtin_shufflevector:
1542 return SemaBuiltinShuffleVector(TheCall);
1543 // TheCall will be freed by the smart pointer here, but that's fine, since
1544 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
1545 case Builtin::BI__builtin_prefetch:
1546 if (SemaBuiltinPrefetch(TheCall))
1547 return ExprError();
1548 break;
1549 case Builtin::BI__builtin_alloca_with_align:
1550 if (SemaBuiltinAllocaWithAlign(TheCall))
1551 return ExprError();
1552 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1553 case Builtin::BI__builtin_alloca:
1554 Diag(TheCall->getBeginLoc(), diag::warn_alloca)
1555 << TheCall->getDirectCallee();
1556 break;
1557 case Builtin::BI__arithmetic_fence:
1558 if (SemaBuiltinArithmeticFence(TheCall))
1559 return ExprError();
1560 break;
1561 case Builtin::BI__assume:
1562 case Builtin::BI__builtin_assume:
1563 if (SemaBuiltinAssume(TheCall))
1564 return ExprError();
1565 break;
1566 case Builtin::BI__builtin_assume_aligned:
1567 if (SemaBuiltinAssumeAligned(TheCall))
1568 return ExprError();
1569 break;
1570 case Builtin::BI__builtin_dynamic_object_size:
1571 case Builtin::BI__builtin_object_size:
1572 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
1573 return ExprError();
1574 break;
1575 case Builtin::BI__builtin_longjmp:
1576 if (SemaBuiltinLongjmp(TheCall))
1577 return ExprError();
1578 break;
1579 case Builtin::BI__builtin_setjmp:
1580 if (SemaBuiltinSetjmp(TheCall))
1581 return ExprError();
1582 break;
1583 case Builtin::BI__builtin_classify_type:
1584 if (checkArgCount(*this, TheCall, 1)) return true;
1585 TheCall->setType(Context.IntTy);
1586 break;
1587 case Builtin::BI__builtin_complex:
1588 if (SemaBuiltinComplex(TheCall))
1589 return ExprError();
1590 break;
1591 case Builtin::BI__builtin_constant_p: {
1592 if (checkArgCount(*this, TheCall, 1)) return true;
1593 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
1594 if (Arg.isInvalid()) return true;
1595 TheCall->setArg(0, Arg.get());
1596 TheCall->setType(Context.IntTy);
1597 break;
1598 }
1599 case Builtin::BI__builtin_launder:
1600 return SemaBuiltinLaunder(*this, TheCall);
1601 case Builtin::BI__sync_fetch_and_add:
1602 case Builtin::BI__sync_fetch_and_add_1:
1603 case Builtin::BI__sync_fetch_and_add_2:
1604 case Builtin::BI__sync_fetch_and_add_4:
1605 case Builtin::BI__sync_fetch_and_add_8:
1606 case Builtin::BI__sync_fetch_and_add_16:
1607 case Builtin::BI__sync_fetch_and_sub:
1608 case Builtin::BI__sync_fetch_and_sub_1:
1609 case Builtin::BI__sync_fetch_and_sub_2:
1610 case Builtin::BI__sync_fetch_and_sub_4:
1611 case Builtin::BI__sync_fetch_and_sub_8:
1612 case Builtin::BI__sync_fetch_and_sub_16:
1613 case Builtin::BI__sync_fetch_and_or:
1614 case Builtin::BI__sync_fetch_and_or_1:
1615 case Builtin::BI__sync_fetch_and_or_2:
1616 case Builtin::BI__sync_fetch_and_or_4:
1617 case Builtin::BI__sync_fetch_and_or_8:
1618 case Builtin::BI__sync_fetch_and_or_16:
1619 case Builtin::BI__sync_fetch_and_and:
1620 case Builtin::BI__sync_fetch_and_and_1:
1621 case Builtin::BI__sync_fetch_and_and_2:
1622 case Builtin::BI__sync_fetch_and_and_4:
1623 case Builtin::BI__sync_fetch_and_and_8:
1624 case Builtin::BI__sync_fetch_and_and_16:
1625 case Builtin::BI__sync_fetch_and_xor:
1626 case Builtin::BI__sync_fetch_and_xor_1:
1627 case Builtin::BI__sync_fetch_and_xor_2:
1628 case Builtin::BI__sync_fetch_and_xor_4:
1629 case Builtin::BI__sync_fetch_and_xor_8:
1630 case Builtin::BI__sync_fetch_and_xor_16:
1631 case Builtin::BI__sync_fetch_and_nand:
1632 case Builtin::BI__sync_fetch_and_nand_1:
1633 case Builtin::BI__sync_fetch_and_nand_2:
1634 case Builtin::BI__sync_fetch_and_nand_4:
1635 case Builtin::BI__sync_fetch_and_nand_8:
1636 case Builtin::BI__sync_fetch_and_nand_16:
1637 case Builtin::BI__sync_add_and_fetch:
1638 case Builtin::BI__sync_add_and_fetch_1:
1639 case Builtin::BI__sync_add_and_fetch_2:
1640 case Builtin::BI__sync_add_and_fetch_4:
1641 case Builtin::BI__sync_add_and_fetch_8:
1642 case Builtin::BI__sync_add_and_fetch_16:
1643 case Builtin::BI__sync_sub_and_fetch:
1644 case Builtin::BI__sync_sub_and_fetch_1:
1645 case Builtin::BI__sync_sub_and_fetch_2:
1646 case Builtin::BI__sync_sub_and_fetch_4:
1647 case Builtin::BI__sync_sub_and_fetch_8:
1648 case Builtin::BI__sync_sub_and_fetch_16:
1649 case Builtin::BI__sync_and_and_fetch:
1650 case Builtin::BI__sync_and_and_fetch_1:
1651 case Builtin::BI__sync_and_and_fetch_2:
1652 case Builtin::BI__sync_and_and_fetch_4:
1653 case Builtin::BI__sync_and_and_fetch_8:
1654 case Builtin::BI__sync_and_and_fetch_16:
1655 case Builtin::BI__sync_or_and_fetch:
1656 case Builtin::BI__sync_or_and_fetch_1:
1657 case Builtin::BI__sync_or_and_fetch_2:
1658 case Builtin::BI__sync_or_and_fetch_4:
1659 case Builtin::BI__sync_or_and_fetch_8:
1660 case Builtin::BI__sync_or_and_fetch_16:
1661 case Builtin::BI__sync_xor_and_fetch:
1662 case Builtin::BI__sync_xor_and_fetch_1:
1663 case Builtin::BI__sync_xor_and_fetch_2:
1664 case Builtin::BI__sync_xor_and_fetch_4:
1665 case Builtin::BI__sync_xor_and_fetch_8:
1666 case Builtin::BI__sync_xor_and_fetch_16:
1667 case Builtin::BI__sync_nand_and_fetch:
1668 case Builtin::BI__sync_nand_and_fetch_1:
1669 case Builtin::BI__sync_nand_and_fetch_2:
1670 case Builtin::BI__sync_nand_and_fetch_4:
1671 case Builtin::BI__sync_nand_and_fetch_8:
1672 case Builtin::BI__sync_nand_and_fetch_16:
1673 case Builtin::BI__sync_val_compare_and_swap:
1674 case Builtin::BI__sync_val_compare_and_swap_1:
1675 case Builtin::BI__sync_val_compare_and_swap_2:
1676 case Builtin::BI__sync_val_compare_and_swap_4:
1677 case Builtin::BI__sync_val_compare_and_swap_8:
1678 case Builtin::BI__sync_val_compare_and_swap_16:
1679 case Builtin::BI__sync_bool_compare_and_swap:
1680 case Builtin::BI__sync_bool_compare_and_swap_1:
1681 case Builtin::BI__sync_bool_compare_and_swap_2:
1682 case Builtin::BI__sync_bool_compare_and_swap_4:
1683 case Builtin::BI__sync_bool_compare_and_swap_8:
1684 case Builtin::BI__sync_bool_compare_and_swap_16:
1685 case Builtin::BI__sync_lock_test_and_set:
1686 case Builtin::BI__sync_lock_test_and_set_1:
1687 case Builtin::BI__sync_lock_test_and_set_2:
1688 case Builtin::BI__sync_lock_test_and_set_4:
1689 case Builtin::BI__sync_lock_test_and_set_8:
1690 case Builtin::BI__sync_lock_test_and_set_16:
1691 case Builtin::BI__sync_lock_release:
1692 case Builtin::BI__sync_lock_release_1:
1693 case Builtin::BI__sync_lock_release_2:
1694 case Builtin::BI__sync_lock_release_4:
1695 case Builtin::BI__sync_lock_release_8:
1696 case Builtin::BI__sync_lock_release_16:
1697 case Builtin::BI__sync_swap:
1698 case Builtin::BI__sync_swap_1:
1699 case Builtin::BI__sync_swap_2:
1700 case Builtin::BI__sync_swap_4:
1701 case Builtin::BI__sync_swap_8:
1702 case Builtin::BI__sync_swap_16:
1703 return SemaBuiltinAtomicOverloaded(TheCallResult);
1704 case Builtin::BI__sync_synchronize:
1705 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
1706 << TheCall->getCallee()->getSourceRange();
1707 break;
1708 case Builtin::BI__builtin_nontemporal_load:
1709 case Builtin::BI__builtin_nontemporal_store:
1710 return SemaBuiltinNontemporalOverloaded(TheCallResult);
1711 case Builtin::BI__builtin_memcpy_inline: {
1712 clang::Expr *SizeOp = TheCall->getArg(2);
1713 // We warn about copying to or from `nullptr` pointers when `size` is
1714 // greater than 0. When `size` is value dependent we cannot evaluate its
1715 // value so we bail out.
1716 if (SizeOp->isValueDependent())
1717 break;
1718 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) {
1719 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
1720 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
1721 }
1722 break;
1723 }
1724#define BUILTIN(ID, TYPE, ATTRS)
1725#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
1726 case Builtin::BI##ID: \
1727 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
1728#include "clang/Basic/Builtins.def"
1729 case Builtin::BI__annotation:
1730 if (SemaBuiltinMSVCAnnotation(*this, TheCall))
1731 return ExprError();
1732 break;
1733 case Builtin::BI__builtin_annotation:
1734 if (SemaBuiltinAnnotation(*this, TheCall))
1735 return ExprError();
1736 break;
1737 case Builtin::BI__builtin_addressof:
1738 if (SemaBuiltinAddressof(*this, TheCall))
1739 return ExprError();
1740 break;
1741 case Builtin::BI__builtin_is_aligned:
1742 case Builtin::BI__builtin_align_up:
1743 case Builtin::BI__builtin_align_down:
1744 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
1745 return ExprError();
1746 break;
1747 case Builtin::BI__builtin_add_overflow:
1748 case Builtin::BI__builtin_sub_overflow:
1749 case Builtin::BI__builtin_mul_overflow:
1750 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
1751 return ExprError();
1752 break;
1753 case Builtin::BI__builtin_operator_new:
1754 case Builtin::BI__builtin_operator_delete: {
1755 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
1756 ExprResult Res =
1757 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
1758 if (Res.isInvalid())
1759 CorrectDelayedTyposInExpr(TheCallResult.get());
1760 return Res;
1761 }
1762 case Builtin::BI__builtin_dump_struct: {
1763 // We first want to ensure we are called with 2 arguments
1764 if (checkArgCount(*this, TheCall, 2))
1765 return ExprError();
1766 // Ensure that the first argument is of type 'struct XX *'
1767 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
1768 const QualType PtrArgType = PtrArg->getType();
1769 if (!PtrArgType->isPointerType() ||
1770 !PtrArgType->getPointeeType()->isRecordType()) {
1771 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1772 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
1773 << "structure pointer";
1774 return ExprError();
1775 }
1776
1777 // Ensure that the second argument is of type 'FunctionType'
1778 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
1779 const QualType FnPtrArgType = FnPtrArg->getType();
1780 if (!FnPtrArgType->isPointerType()) {
1781 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1782 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1783 << FnPtrArgType << "'int (*)(const char *, ...)'";
1784 return ExprError();
1785 }
1786
1787 const auto *FuncType =
1788 FnPtrArgType->getPointeeType()->getAs<FunctionType>();
1789
1790 if (!FuncType) {
1791 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1792 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1793 << FnPtrArgType << "'int (*)(const char *, ...)'";
1794 return ExprError();
1795 }
1796
1797 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
1798 if (!FT->getNumParams()) {
1799 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1800 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1801 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1802 return ExprError();
1803 }
1804 QualType PT = FT->getParamType(0);
1805 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
1806 !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
1807 !PT->getPointeeType().isConstQualified()) {
1808 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1809 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1810 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1811 return ExprError();
1812 }
1813 }
1814
1815 TheCall->setType(Context.IntTy);
1816 break;
1817 }
1818 case Builtin::BI__builtin_expect_with_probability: {
1819 // We first want to ensure we are called with 3 arguments
1820 if (checkArgCount(*this, TheCall, 3))
1821 return ExprError();
1822 // then check probability is constant float in range [0.0, 1.0]
1823 const Expr *ProbArg = TheCall->getArg(2);
1824 SmallVector<PartialDiagnosticAt, 8> Notes;
1825 Expr::EvalResult Eval;
1826 Eval.Diag = &Notes;
1827 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
1828 !Eval.Val.isFloat()) {
1829 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
1830 << ProbArg->getSourceRange();
1831 for (const PartialDiagnosticAt &PDiag : Notes)
1832 Diag(PDiag.first, PDiag.second);
1833 return ExprError();
1834 }
1835 llvm::APFloat Probability = Eval.Val.getFloat();
1836 bool LoseInfo = false;
1837 Probability.convert(llvm::APFloat::IEEEdouble(),
1838 llvm::RoundingMode::Dynamic, &LoseInfo);
1839 if (!(Probability >= llvm::APFloat(0.0) &&
1840 Probability <= llvm::APFloat(1.0))) {
1841 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
1842 << ProbArg->getSourceRange();
1843 return ExprError();
1844 }
1845 break;
1846 }
1847 case Builtin::BI__builtin_preserve_access_index:
1848 if (SemaBuiltinPreserveAI(*this, TheCall))
1849 return ExprError();
1850 break;
1851 case Builtin::BI__builtin_call_with_static_chain:
1852 if (SemaBuiltinCallWithStaticChain(*this, TheCall))
1853 return ExprError();
1854 break;
1855 case Builtin::BI__exception_code:
1856 case Builtin::BI_exception_code:
1857 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
1858 diag::err_seh___except_block))
1859 return ExprError();
1860 break;
1861 case Builtin::BI__exception_info:
1862 case Builtin::BI_exception_info:
1863 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
1864 diag::err_seh___except_filter))
1865 return ExprError();
1866 break;
1867 case Builtin::BI__GetExceptionInfo:
1868 if (checkArgCount(*this, TheCall, 1))
1869 return ExprError();
1870
1871 if (CheckCXXThrowOperand(
1872 TheCall->getBeginLoc(),
1873 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
1874 TheCall))
1875 return ExprError();
1876
1877 TheCall->setType(Context.VoidPtrTy);
1878 break;
1879 // OpenCL v2.0, s6.13.16 - Pipe functions
1880 case Builtin::BIread_pipe:
1881 case Builtin::BIwrite_pipe:
1882 // Since those two functions are declared with var args, we need a semantic
1883 // check for the argument.
1884 if (SemaBuiltinRWPipe(*this, TheCall))
1885 return ExprError();
1886 break;
1887 case Builtin::BIreserve_read_pipe:
1888 case Builtin::BIreserve_write_pipe:
1889 case Builtin::BIwork_group_reserve_read_pipe:
1890 case Builtin::BIwork_group_reserve_write_pipe:
1891 if (SemaBuiltinReserveRWPipe(*this, TheCall))
1892 return ExprError();
1893 break;
1894 case Builtin::BIsub_group_reserve_read_pipe:
1895 case Builtin::BIsub_group_reserve_write_pipe:
1896 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1897 SemaBuiltinReserveRWPipe(*this, TheCall))
1898 return ExprError();
1899 break;
1900 case Builtin::BIcommit_read_pipe:
1901 case Builtin::BIcommit_write_pipe:
1902 case Builtin::BIwork_group_commit_read_pipe:
1903 case Builtin::BIwork_group_commit_write_pipe:
1904 if (SemaBuiltinCommitRWPipe(*this, TheCall))
1905 return ExprError();
1906 break;
1907 case Builtin::BIsub_group_commit_read_pipe:
1908 case Builtin::BIsub_group_commit_write_pipe:
1909 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1910 SemaBuiltinCommitRWPipe(*this, TheCall))
1911 return ExprError();
1912 break;
1913 case Builtin::BIget_pipe_num_packets:
1914 case Builtin::BIget_pipe_max_packets:
1915 if (SemaBuiltinPipePackets(*this, TheCall))
1916 return ExprError();
1917 break;
1918 case Builtin::BIto_global:
1919 case Builtin::BIto_local:
1920 case Builtin::BIto_private:
1921 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
1922 return ExprError();
1923 break;
1924 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
1925 case Builtin::BIenqueue_kernel:
1926 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
1927 return ExprError();
1928 break;
1929 case Builtin::BIget_kernel_work_group_size:
1930 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1931 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
1932 return ExprError();
1933 break;
1934 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1935 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1936 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
1937 return ExprError();
1938 break;
1939 case Builtin::BI__builtin_os_log_format:
1940 Cleanup.setExprNeedsCleanups(true);
1941 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1942 case Builtin::BI__builtin_os_log_format_buffer_size:
1943 if (SemaBuiltinOSLogFormat(TheCall))
1944 return ExprError();
1945 break;
1946 case Builtin::BI__builtin_frame_address:
1947 case Builtin::BI__builtin_return_address: {
1948 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
1949 return ExprError();
1950
1951 // -Wframe-address warning if non-zero passed to builtin
1952 // return/frame address.
1953 Expr::EvalResult Result;
1954 if (!TheCall->getArg(0)->isValueDependent() &&
1955 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
1956 Result.Val.getInt() != 0)
1957 Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
1958 << ((BuiltinID == Builtin::BI__builtin_return_address)
1959 ? "__builtin_return_address"
1960 : "__builtin_frame_address")
1961 << TheCall->getSourceRange();
1962 break;
1963 }
1964
1965 case Builtin::BI__builtin_matrix_transpose:
1966 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
1967
1968 case Builtin::BI__builtin_matrix_column_major_load:
1969 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
1970
1971 case Builtin::BI__builtin_matrix_column_major_store:
1972 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
1973
1974 case Builtin::BI__builtin_get_device_side_mangled_name: {
1975 auto Check = [](CallExpr *TheCall) {
1976 if (TheCall->getNumArgs() != 1)
1977 return false;
1978 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
1979 if (!DRE)
1980 return false;
1981 auto *D = DRE->getDecl();
1982 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
1983 return false;
1984 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
1985 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
1986 };
1987 if (!Check(TheCall)) {
1988 Diag(TheCall->getBeginLoc(),
1989 diag::err_hip_invalid_args_builtin_mangled_name);
1990 return ExprError();
1991 }
1992 }
1993 }
1994
1995 // Since the target specific builtins for each arch overlap, only check those
1996 // of the arch we are compiling for.
1997 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
1998 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
1999 assert(Context.getAuxTargetInfo() &&((void)0)
2000 "Aux Target Builtin, but not an aux target?")((void)0);
2001
2002 if (CheckTSBuiltinFunctionCall(
2003 *Context.getAuxTargetInfo(),
2004 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
2005 return ExprError();
2006 } else {
2007 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
2008 TheCall))
2009 return ExprError();
2010 }
2011 }
2012
2013 return TheCallResult;
2014}
2015
2016// Get the valid immediate range for the specified NEON type code.
2017static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
2018 NeonTypeFlags Type(t);
2019 int IsQuad = ForceQuad ? true : Type.isQuad();
2020 switch (Type.getEltType()) {
2021 case NeonTypeFlags::Int8:
2022 case NeonTypeFlags::Poly8:
2023 return shift ? 7 : (8 << IsQuad) - 1;
2024 case NeonTypeFlags::Int16:
2025 case NeonTypeFlags::Poly16:
2026 return shift ? 15 : (4 << IsQuad) - 1;
2027 case NeonTypeFlags::Int32:
2028 return shift ? 31 : (2 << IsQuad) - 1;
2029 case NeonTypeFlags::Int64:
2030 case NeonTypeFlags::Poly64:
2031 return shift ? 63 : (1 << IsQuad) - 1;
2032 case NeonTypeFlags::Poly128:
2033 return shift ? 127 : (1 << IsQuad) - 1;
2034 case NeonTypeFlags::Float16:
2035 assert(!shift && "cannot shift float types!")((void)0);
2036 return (4 << IsQuad) - 1;
2037 case NeonTypeFlags::Float32:
2038 assert(!shift && "cannot shift float types!")((void)0);
2039 return (2 << IsQuad) - 1;
2040 case NeonTypeFlags::Float64:
2041 assert(!shift && "cannot shift float types!")((void)0);
2042 return (1 << IsQuad) - 1;
2043 case NeonTypeFlags::BFloat16:
2044 assert(!shift && "cannot shift float types!")((void)0);
2045 return (4 << IsQuad) - 1;
2046 }
2047 llvm_unreachable("Invalid NeonTypeFlag!")__builtin_unreachable();
2048}
2049
2050/// getNeonEltType - Return the QualType corresponding to the elements of
2051/// the vector type specified by the NeonTypeFlags. This is used to check
2052/// the pointer arguments for Neon load/store intrinsics.
2053static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
2054 bool IsPolyUnsigned, bool IsInt64Long) {
2055 switch (Flags.getEltType()) {
2056 case NeonTypeFlags::Int8:
2057 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
2058 case NeonTypeFlags::Int16:
2059 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
2060 case NeonTypeFlags::Int32:
2061 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
2062 case NeonTypeFlags::Int64:
2063 if (IsInt64Long)
2064 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
2065 else
2066 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
2067 : Context.LongLongTy;
2068 case NeonTypeFlags::Poly8:
2069 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
2070 case NeonTypeFlags::Poly16:
2071 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
2072 case NeonTypeFlags::Poly64:
2073 if (IsInt64Long)
2074 return Context.UnsignedLongTy;
2075 else
2076 return Context.UnsignedLongLongTy;
2077 case NeonTypeFlags::Poly128:
2078 break;
2079 case NeonTypeFlags::Float16:
2080 return Context.HalfTy;
2081 case NeonTypeFlags::Float32:
2082 return Context.FloatTy;
2083 case NeonTypeFlags::Float64:
2084 return Context.DoubleTy;
2085 case NeonTypeFlags::BFloat16:
2086 return Context.BFloat16Ty;
2087 }
2088 llvm_unreachable("Invalid NeonTypeFlag!")__builtin_unreachable();
2089}
2090
2091bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2092 // Range check SVE intrinsics that take immediate values.
2093 SmallVector<std::tuple<int,int,int>, 3> ImmChecks;
2094
2095 switch (BuiltinID) {
2096 default:
2097 return false;
2098#define GET_SVE_IMMEDIATE_CHECK
2099#include "clang/Basic/arm_sve_sema_rangechecks.inc"
2100#undef GET_SVE_IMMEDIATE_CHECK
2101 }
2102
2103 // Perform all the immediate checks for this builtin call.
2104 bool HasError = false;
2105 for (auto &I : ImmChecks) {
2106 int ArgNum, CheckTy, ElementSizeInBits;
2107 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
2108
2109 typedef bool(*OptionSetCheckFnTy)(int64_t Value);
2110
2111 // Function that checks whether the operand (ArgNum) is an immediate
2112 // that is one of the predefined values.
2113 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
2114 int ErrDiag) -> bool {
2115 // We can't check the value of a dependent argument.
2116 Expr *Arg = TheCall->getArg(ArgNum);
2117 if (Arg->isTypeDependent() || Arg->isValueDependent())
2118 return false;
2119
2120 // Check constant-ness first.
2121 llvm::APSInt Imm;
2122 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
2123 return true;
2124
2125 if (!CheckImm(Imm.getSExtValue()))
2126 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
2127 return false;
2128 };
2129
2130 switch ((SVETypeFlags::ImmCheckType)CheckTy) {
2131 case SVETypeFlags::ImmCheck0_31:
2132 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
2133 HasError = true;
2134 break;
2135 case SVETypeFlags::ImmCheck0_13:
2136 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
2137 HasError = true;
2138 break;
2139 case SVETypeFlags::ImmCheck1_16:
2140 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
2141 HasError = true;
2142 break;
2143 case SVETypeFlags::ImmCheck0_7:
2144 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
2145 HasError = true;
2146 break;
2147 case SVETypeFlags::ImmCheckExtract:
2148 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2149 (2048 / ElementSizeInBits) - 1))
2150 HasError = true;
2151 break;
2152 case SVETypeFlags::ImmCheckShiftRight:
2153 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
2154 HasError = true;
2155 break;
2156 case SVETypeFlags::ImmCheckShiftRightNarrow:
2157 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
2158 ElementSizeInBits / 2))
2159 HasError = true;
2160 break;
2161 case SVETypeFlags::ImmCheckShiftLeft:
2162 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2163 ElementSizeInBits - 1))
2164 HasError = true;
2165 break;
2166 case SVETypeFlags::ImmCheckLaneIndex:
2167 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2168 (128 / (1 * ElementSizeInBits)) - 1))
2169 HasError = true;
2170 break;
2171 case SVETypeFlags::ImmCheckLaneIndexCompRotate:
2172 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2173 (128 / (2 * ElementSizeInBits)) - 1))
2174 HasError = true;
2175 break;
2176 case SVETypeFlags::ImmCheckLaneIndexDot:
2177 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2178 (128 / (4 * ElementSizeInBits)) - 1))
2179 HasError = true;
2180 break;
2181 case SVETypeFlags::ImmCheckComplexRot90_270:
2182 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
2183 diag::err_rotation_argument_to_cadd))
2184 HasError = true;
2185 break;
2186 case SVETypeFlags::ImmCheckComplexRotAll90:
2187 if (CheckImmediateInSet(
2188 [](int64_t V) {
2189 return V == 0 || V == 90 || V == 180 || V == 270;
2190 },
2191 diag::err_rotation_argument_to_cmla))
2192 HasError = true;
2193 break;
2194 case SVETypeFlags::ImmCheck0_1:
2195 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
2196 HasError = true;
2197 break;
2198 case SVETypeFlags::ImmCheck0_2:
2199 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
2200 HasError = true;
2201 break;
2202 case SVETypeFlags::ImmCheck0_3:
2203 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
2204 HasError = true;
2205 break;
2206 }
2207 }
2208
2209 return HasError;
2210}
2211
2212bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
2213 unsigned BuiltinID, CallExpr *TheCall) {
2214 llvm::APSInt Result;
2215 uint64_t mask = 0;
2216 unsigned TV = 0;
2217 int PtrArgNum = -1;
2218 bool HasConstPtr = false;
2219 switch (BuiltinID) {
2220#define GET_NEON_OVERLOAD_CHECK
2221#include "clang/Basic/arm_neon.inc"
2222#include "clang/Basic/arm_fp16.inc"
2223#undef GET_NEON_OVERLOAD_CHECK
2224 }
2225
2226 // For NEON intrinsics which are overloaded on vector element type, validate
2227 // the immediate which specifies which variant to emit.
2228 unsigned ImmArg = TheCall->getNumArgs()-1;
2229 if (mask) {
2230 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
2231 return true;
2232
2233 TV = Result.getLimitedValue(64);
2234 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
2235 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
2236 << TheCall->getArg(ImmArg)->getSourceRange();
2237 }
2238
2239 if (PtrArgNum >= 0) {
2240 // Check that pointer arguments have the specified type.
2241 Expr *Arg = TheCall->getArg(PtrArgNum);
2242 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
2243 Arg = ICE->getSubExpr();
2244 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
2245 QualType RHSTy = RHS.get()->getType();
2246
2247 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
2248 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
2249 Arch == llvm::Triple::aarch64_32 ||
2250 Arch == llvm::Triple::aarch64_be;
2251 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
2252 QualType EltTy =
2253 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
2254 if (HasConstPtr)
2255 EltTy = EltTy.withConst();
2256 QualType LHSTy = Context.getPointerType(EltTy);
2257 AssignConvertType ConvTy;
2258 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
2259 if (RHS.isInvalid())
2260 return true;
2261 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
2262 RHS.get(), AA_Assigning))
2263 return true;
2264 }
2265
2266 // For NEON intrinsics which take an immediate value as part of the
2267 // instruction, range check them here.
2268 unsigned i = 0, l = 0, u = 0;
2269 switch (BuiltinID) {
2270 default:
2271 return false;
2272 #define GET_NEON_IMMEDIATE_CHECK
2273 #include "clang/Basic/arm_neon.inc"
2274 #include "clang/Basic/arm_fp16.inc"
2275 #undef GET_NEON_IMMEDIATE_CHECK
2276 }
2277
2278 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2279}
2280
2281bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2282 switch (BuiltinID) {
2283 default:
2284 return false;
2285 #include "clang/Basic/arm_mve_builtin_sema.inc"
2286 }
2287}
2288
2289bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2290 CallExpr *TheCall) {
2291 bool Err = false;
2292 switch (BuiltinID) {
2293 default:
2294 return false;
2295#include "clang/Basic/arm_cde_builtin_sema.inc"
2296 }
2297
2298 if (Err)
2299 return true;
2300
2301 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
2302}
2303
2304bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
2305 const Expr *CoprocArg, bool WantCDE) {
2306 if (isConstantEvaluated())
2307 return false;
2308
2309 // We can't check the value of a dependent argument.
2310 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
2311 return false;
2312
2313 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
2314 int64_t CoprocNo = CoprocNoAP.getExtValue();
2315 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative")((void)0);
2316
2317 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
2318 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
2319
2320 if (IsCDECoproc != WantCDE)
2321 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
2322 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
2323
2324 return false;
2325}
2326
2327bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
2328 unsigned MaxWidth) {
2329 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||((void)0)
2330 BuiltinID == ARM::BI__builtin_arm_ldaex ||((void)0)
2331 BuiltinID == ARM::BI__builtin_arm_strex ||((void)0)
2332 BuiltinID == ARM::BI__builtin_arm_stlex ||((void)0)
2333 BuiltinID == AArch64::BI__builtin_arm_ldrex ||((void)0)
2334 BuiltinID == AArch64::BI__builtin_arm_ldaex ||((void)0)
2335 BuiltinID == AArch64::BI__builtin_arm_strex ||((void)0)
2336 BuiltinID == AArch64::BI__builtin_arm_stlex) &&((void)0)
2337 "unexpected ARM builtin")((void)0);
2338 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
2339 BuiltinID == ARM::BI__builtin_arm_ldaex ||
2340 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2341 BuiltinID == AArch64::BI__builtin_arm_ldaex;
2342
2343 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
2344
2345 // Ensure that we have the proper number of arguments.
2346 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
2347 return true;
2348
2349 // Inspect the pointer argument of the atomic builtin. This should always be
2350 // a pointer type, whose element is an integral scalar or pointer type.
2351 // Because it is a pointer type, we don't have to worry about any implicit
2352 // casts here.
2353 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
2354 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
2355 if (PointerArgRes.isInvalid())
2356 return true;
2357 PointerArg = PointerArgRes.get();
2358
2359 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
2360 if (!pointerType) {
2361 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
2362 << PointerArg->getType() << PointerArg->getSourceRange();
2363 return true;
2364 }
2365
2366 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
2367 // task is to insert the appropriate casts into the AST. First work out just
2368 // what the appropriate type is.
2369 QualType ValType = pointerType->getPointeeType();
2370 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
2371 if (IsLdrex)
2372 AddrType.addConst();
2373
2374 // Issue a warning if the cast is dodgy.
2375 CastKind CastNeeded = CK_NoOp;
2376 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
2377 CastNeeded = CK_BitCast;
2378 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
2379 << PointerArg->getType() << Context.getPointerType(AddrType)
2380 << AA_Passing << PointerArg->getSourceRange();
2381 }
2382
2383 // Finally, do the cast and replace the argument with the corrected version.
2384 AddrType = Context.getPointerType(AddrType);
2385 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
2386 if (PointerArgRes.isInvalid())
2387 return true;
2388 PointerArg = PointerArgRes.get();
2389
2390 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
2391
2392 // In general, we allow ints, floats and pointers to be loaded and stored.
2393 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
2394 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
2395 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
2396 << PointerArg->getType() << PointerArg->getSourceRange();
2397 return true;
2398 }
2399
2400 // But ARM doesn't have instructions to deal with 128-bit versions.
2401 if (Context.getTypeSize(ValType) > MaxWidth) {
2402 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate")((void)0);
2403 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
2404 << PointerArg->getType() << PointerArg->getSourceRange();
2405 return true;
2406 }
2407
2408 switch (ValType.getObjCLifetime()) {
2409 case Qualifiers::OCL_None:
2410 case Qualifiers::OCL_ExplicitNone:
2411 // okay
2412 break;
2413
2414 case Qualifiers::OCL_Weak:
2415 case Qualifiers::OCL_Strong:
2416 case Qualifiers::OCL_Autoreleasing:
2417 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
2418 << ValType << PointerArg->getSourceRange();
2419 return true;
2420 }
2421
2422 if (IsLdrex) {
2423 TheCall->setType(ValType);
2424 return false;
2425 }
2426
2427 // Initialize the argument to be stored.
2428 ExprResult ValArg = TheCall->getArg(0);
2429 InitializedEntity Entity = InitializedEntity::InitializeParameter(
2430 Context, ValType, /*consume*/ false);
2431 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
2432 if (ValArg.isInvalid())
2433 return true;
2434 TheCall->setArg(0, ValArg.get());
2435
2436 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
2437 // but the custom checker bypasses all default analysis.
2438 TheCall->setType(Context.IntTy);
2439 return false;
2440}
2441
2442bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2443 CallExpr *TheCall) {
2444 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
2445 BuiltinID == ARM::BI__builtin_arm_ldaex ||
2446 BuiltinID == ARM::BI__builtin_arm_strex ||
2447 BuiltinID == ARM::BI__builtin_arm_stlex) {
2448 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
2449 }
2450
2451 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
2452 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2453 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
2454 }
2455
2456 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
2457 BuiltinID == ARM::BI__builtin_arm_wsr64)
2458 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
2459
2460 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
2461 BuiltinID == ARM::BI__builtin_arm_rsrp ||
2462 BuiltinID == ARM::BI__builtin_arm_wsr ||
2463 BuiltinID == ARM::BI__builtin_arm_wsrp)
2464 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2465
2466 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
2467 return true;
2468 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
2469 return true;
2470 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
2471 return true;
2472
2473 // For intrinsics which take an immediate value as part of the instruction,
2474 // range check them here.
2475 // FIXME: VFP Intrinsics should error if VFP not present.
2476 switch (BuiltinID) {
2477 default: return false;
2478 case ARM::BI__builtin_arm_ssat:
2479 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
2480 case ARM::BI__builtin_arm_usat:
2481 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
2482 case ARM::BI__builtin_arm_ssat16:
2483 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
2484 case ARM::BI__builtin_arm_usat16:
2485 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
2486 case ARM::BI__builtin_arm_vcvtr_f:
2487 case ARM::BI__builtin_arm_vcvtr_d:
2488 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
2489 case ARM::BI__builtin_arm_dmb:
2490 case ARM::BI__builtin_arm_dsb:
2491 case ARM::BI__builtin_arm_isb:
2492 case ARM::BI__builtin_arm_dbg:
2493 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
2494 case ARM::BI__builtin_arm_cdp:
2495 case ARM::BI__builtin_arm_cdp2:
2496 case ARM::BI__builtin_arm_mcr:
2497 case ARM::BI__builtin_arm_mcr2:
2498 case ARM::BI__builtin_arm_mrc:
2499 case ARM::BI__builtin_arm_mrc2:
2500 case ARM::BI__builtin_arm_mcrr:
2501 case ARM::BI__builtin_arm_mcrr2:
2502 case ARM::BI__builtin_arm_mrrc:
2503 case ARM::BI__builtin_arm_mrrc2:
2504 case ARM::BI__builtin_arm_ldc:
2505 case ARM::BI__builtin_arm_ldcl:
2506 case ARM::BI__builtin_arm_ldc2:
2507 case ARM::BI__builtin_arm_ldc2l:
2508 case ARM::BI__builtin_arm_stc:
2509 case ARM::BI__builtin_arm_stcl:
2510 case ARM::BI__builtin_arm_stc2:
2511 case ARM::BI__builtin_arm_stc2l:
2512 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
2513 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
2514 /*WantCDE*/ false);
2515 }
2516}
2517
2518bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
2519 unsigned BuiltinID,
2520 CallExpr *TheCall) {
2521 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2522 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
2523 BuiltinID == AArch64::BI__builtin_arm_strex ||
2524 BuiltinID == AArch64::BI__builtin_arm_stlex) {
2525 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
2526 }
2527
2528 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
2529 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2530 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
2531 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
2532 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
2533 }
2534
2535 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
2536 BuiltinID == AArch64::BI__builtin_arm_wsr64)
2537 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2538
2539 // Memory Tagging Extensions (MTE) Intrinsics
2540 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
2541 BuiltinID == AArch64::BI__builtin_arm_addg ||
2542 BuiltinID == AArch64::BI__builtin_arm_gmi ||
2543 BuiltinID == AArch64::BI__builtin_arm_ldg ||
2544 BuiltinID == AArch64::BI__builtin_arm_stg ||
2545 BuiltinID == AArch64::BI__builtin_arm_subp) {
2546 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
2547 }
2548
2549 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
2550 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
2551 BuiltinID == AArch64::BI__builtin_arm_wsr ||
2552 BuiltinID == AArch64::BI__builtin_arm_wsrp)
2553 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2554
2555 // Only check the valid encoding range. Any constant in this range would be
2556 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
2557 // an exception for incorrect registers. This matches MSVC behavior.
2558 if (BuiltinID == AArch64::BI_ReadStatusReg ||
2559 BuiltinID == AArch64::BI_WriteStatusReg)
2560 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
2561
2562 if (BuiltinID == AArch64::BI__getReg)
2563 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
2564
2565 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
2566 return true;
2567
2568 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
2569 return true;
2570
2571 // For intrinsics which take an immediate value as part of the instruction,
2572 // range check them here.
2573 unsigned i = 0, l = 0, u = 0;
2574 switch (BuiltinID) {
2575 default: return false;
2576 case AArch64::BI__builtin_arm_dmb:
2577 case AArch64::BI__builtin_arm_dsb:
2578 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
2579 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
2580 }
2581
2582 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2583}
2584
2585static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
2586 if (Arg->getType()->getAsPlaceholderType())
2587 return false;
2588
2589 // The first argument needs to be a record field access.
2590 // If it is an array element access, we delay decision
2591 // to BPF backend to check whether the access is a
2592 // field access or not.
2593 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
2594 dyn_cast<MemberExpr>(Arg->IgnoreParens()) ||
2595 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()));
2596}
2597
2598static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S,
2599 QualType VectorTy, QualType EltTy) {
2600 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType();
2601 if (!Context.hasSameType(VectorEltTy, EltTy)) {
2602 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types)
2603 << Call->getSourceRange() << VectorEltTy << EltTy;
2604 return false;
2605 }
2606 return true;
2607}
2608
2609static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
2610 QualType ArgType = Arg->getType();
2611 if (ArgType->getAsPlaceholderType())
2612 return false;
2613
2614 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type
2615 // format:
2616 // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
2617 // 2. <type> var;
2618 // __builtin_preserve_type_info(var, flag);
2619 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) &&
2620 !dyn_cast<UnaryOperator>(Arg->IgnoreParens()))
2621 return false;
2622
2623 // Typedef type.
2624 if (ArgType->getAs<TypedefType>())
2625 return true;
2626
2627 // Record type or Enum type.
2628 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
2629 if (const auto *RT = Ty->getAs<RecordType>()) {
2630 if (!RT->getDecl()->getDeclName().isEmpty())
2631 return true;
2632 } else if (const auto *ET = Ty->getAs<EnumType>()) {
2633 if (!ET->getDecl()->getDeclName().isEmpty())
2634 return true;
2635 }
2636
2637 return false;
2638}
2639
2640static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
2641 QualType ArgType = Arg->getType();
2642 if (ArgType->getAsPlaceholderType())
2643 return false;
2644
2645 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
2646 // format:
2647 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
2648 // flag);
2649 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
2650 if (!UO)
2651 return false;
2652
2653 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
2654 if (!CE)
2655 return false;
2656 if (CE->getCastKind() != CK_IntegralToPointer &&
2657 CE->getCastKind() != CK_NullToPointer)
2658 return false;
2659
2660 // The integer must be from an EnumConstantDecl.
2661 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
2662 if (!DR)
2663 return false;
2664
2665 const EnumConstantDecl *Enumerator =
2666 dyn_cast<EnumConstantDecl>(DR->getDecl());
2667 if (!Enumerator)
2668 return false;
2669
2670 // The type must be EnumType.
2671 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
2672 const auto *ET = Ty->getAs<EnumType>();
2673 if (!ET)
2674 return false;
2675
2676 // The enum value must be supported.
2677 for (auto *EDI : ET->getDecl()->enumerators()) {
2678 if (EDI == Enumerator)
2679 return true;
2680 }
2681
2682 return false;
2683}
2684
2685bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
2686 CallExpr *TheCall) {
2687 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||((void)0)
2688 BuiltinID == BPF::BI__builtin_btf_type_id ||((void)0)
2689 BuiltinID == BPF::BI__builtin_preserve_type_info ||((void)0)
2690 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&((void)0)
2691 "unexpected BPF builtin")((void)0);
2692
2693 if (checkArgCount(*this, TheCall, 2))
2694 return true;
2695
2696 // The second argument needs to be a constant int
2697 Expr *Arg = TheCall->getArg(1);
2698 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
2699 diag::kind kind;
2700 if (!Value) {
2701 if (BuiltinID == BPF::BI__builtin_preserve_field_info)
2702 kind = diag::err_preserve_field_info_not_const;
2703 else if (BuiltinID == BPF::BI__builtin_btf_type_id)
2704 kind = diag::err_btf_type_id_not_const;
2705 else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
2706 kind = diag::err_preserve_type_info_not_const;
2707 else
2708 kind = diag::err_preserve_enum_value_not_const;
2709 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
2710 return true;
2711 }
2712
2713 // The first argument
2714 Arg = TheCall->getArg(0);
2715 bool InvalidArg = false;
2716 bool ReturnUnsignedInt = true;
2717 if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
2718 if (!isValidBPFPreserveFieldInfoArg(Arg)) {
2719 InvalidArg = true;
2720 kind = diag::err_preserve_field_info_not_field;
2721 }
2722 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
2723 if (!isValidBPFPreserveTypeInfoArg(Arg)) {
2724 InvalidArg = true;
2725 kind = diag::err_preserve_type_info_invalid;
2726 }
2727 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
2728 if (!isValidBPFPreserveEnumValueArg(Arg)) {
2729 InvalidArg = true;
2730 kind = diag::err_preserve_enum_value_invalid;
2731 }
2732 ReturnUnsignedInt = false;
2733 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
2734 ReturnUnsignedInt = false;
2735 }
2736
2737 if (InvalidArg) {
2738 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
2739 return true;
2740 }
2741
2742 if (ReturnUnsignedInt)
2743 TheCall->setType(Context.UnsignedIntTy);
2744 else
2745 TheCall->setType(Context.UnsignedLongTy);
2746 return false;
2747}
2748
2749bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
2750 struct ArgInfo {
2751 uint8_t OpNum;
2752 bool IsSigned;
2753 uint8_t BitWidth;
2754 uint8_t Align;
2755 };
2756 struct BuiltinInfo {
2757 unsigned BuiltinID;
2758 ArgInfo Infos[2];
2759 };
2760
2761 static BuiltinInfo Infos[] = {
2762 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
2763 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
2764 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
2765 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
2766 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
2767 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
2768 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
2769 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
2770 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
2771 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
2772 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
2773
2774 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
2775 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
2776 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
2777 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
2778 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
2779 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
2780 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
2781 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
2782 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
2783 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
2784 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
2785
2786 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
2787 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
2788 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
2789 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
2790 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
2791 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
2792 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
2793 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
2794 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
2795 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
2796 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
2797 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
2798 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
2799 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
2800 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
2801 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
2802 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
2803 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
2804 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
2805 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
2806 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
2807 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
2808 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
2809 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
2810 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
2811 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
2812 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
2813 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
2814 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
2815 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
2816 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
2817 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
2818 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
2819 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
2820 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
2821 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
2822 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
2823 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
2824 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
2825 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
2826 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
2827 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
2828 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
2829 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
2830 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
2831 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
2832 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
2833 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
2834 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
2835 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
2836 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
2837 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
2838 {{ 1, false, 6, 0 }} },
2839 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
2840 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
2841 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
2842 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
2843 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
2844 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
2845 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
2846 {{ 1, false, 5, 0 }} },
2847 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
2848 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
2849 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
2850 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
2851 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
2852 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
2853 { 2, false, 5, 0 }} },
2854 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
2855 { 2, false, 6, 0 }} },
2856 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
2857 { 3, false, 5, 0 }} },
2858 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
2859 { 3, false, 6, 0 }} },
2860 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
2861 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
2862 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
2863 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
2864 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
2865 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
2866 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
2867 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
2868 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
2869 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
2870 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
2871 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
2872 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
2873 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
2874 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
2875 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
2876 {{ 2, false, 4, 0 },
2877 { 3, false, 5, 0 }} },
2878 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
2879 {{ 2, false, 4, 0 },
2880 { 3, false, 5, 0 }} },
2881 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
2882 {{ 2, false, 4, 0 },
2883 { 3, false, 5, 0 }} },
2884 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
2885 {{ 2, false, 4, 0 },
2886 { 3, false, 5, 0 }} },
2887 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
2888 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
2889 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
2890 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
2891 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
2892 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
2893 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
2894 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
2895 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
2896 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
2897 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
2898 { 2, false, 5, 0 }} },
2899 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
2900 { 2, false, 6, 0 }} },
2901 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
2902 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
2903 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
2904 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
2905 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
2906 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
2907 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
2908 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
2909 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
2910 {{ 1, false, 4, 0 }} },
2911 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
2912 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
2913 {{ 1, false, 4, 0 }} },
2914 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
2915 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
2916 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
2917 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
2918 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
2919 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
2920 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
2921 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
2922 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
2923 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
2924 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
2925 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
2926 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
2927 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
2928 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
2929 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
2930 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
2931 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
2932 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
2933 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
2934 {{ 3, false, 1, 0 }} },
2935 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
2936 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
2937 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
2938 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
2939 {{ 3, false, 1, 0 }} },
2940 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
2941 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
2942 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
2943 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
2944 {{ 3, false, 1, 0 }} },
2945 };
2946
2947 // Use a dynamically initialized static to sort the table exactly once on
2948 // first run.
2949 static const bool SortOnce =
2950 (llvm::sort(Infos,
2951 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
2952 return LHS.BuiltinID < RHS.BuiltinID;
2953 }),
2954 true);
2955 (void)SortOnce;
2956
2957 const BuiltinInfo *F = llvm::partition_point(
2958 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
2959 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
2960 return false;
2961
2962 bool Error = false;
2963
2964 for (const ArgInfo &A : F->Infos) {
2965 // Ignore empty ArgInfo elements.
2966 if (A.BitWidth == 0)
2967 continue;
2968
2969 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
2970 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
2971 if (!A.Align) {
2972 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
2973 } else {
2974 unsigned M = 1 << A.Align;
2975 Min *= M;
2976 Max *= M;
2977 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
2978 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
2979 }
2980 }
2981 return Error;
2982}
2983
2984bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
2985 CallExpr *TheCall) {
2986 return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
2987}
2988
2989bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
2990 unsigned BuiltinID, CallExpr *TheCall) {
2991 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
2992 CheckMipsBuiltinArgument(BuiltinID, TheCall);
2993}
2994
2995bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
2996 CallExpr *TheCall) {
2997
2998 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
2999 BuiltinID <= Mips::BI__builtin_mips_lwx) {
3000 if (!TI.hasFeature("dsp"))
3001 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
3002 }
3003
3004 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
3005 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
3006 if (!TI.hasFeature("dspr2"))
3007 return Diag(TheCall->getBeginLoc(),
3008 diag::err_mips_builtin_requires_dspr2);
3009 }
3010
3011 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
3012 BuiltinID <= Mips::BI__builtin_msa_xori_b) {
3013 if (!TI.hasFeature("msa"))
3014 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
3015 }
3016
3017 return false;
3018}
3019
3020// CheckMipsBuiltinArgument - Checks the constant value passed to the
3021// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
3022// ordering for DSP is unspecified. MSA is ordered by the data format used
3023// by the underlying instruction i.e., df/m, df/n and then by size.
3024//
3025// FIXME: The size tests here should instead be tablegen'd along with the
3026// definitions from include/clang/Basic/BuiltinsMips.def.
3027// FIXME: GCC is strict on signedness for some of these intrinsics, we should
3028// be too.
3029bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
3030 unsigned i = 0, l = 0, u = 0, m = 0;
3031 switch (BuiltinID) {
3032 default: return false;
3033 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
3034 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
3035 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
3036 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
3037 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
3038 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
3039 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
3040 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
3041 // df/m field.
3042 // These intrinsics take an unsigned 3 bit immediate.
3043 case Mips::BI__builtin_msa_bclri_b:
3044 case Mips::BI__builtin_msa_bnegi_b:
3045 case Mips::BI__builtin_msa_bseti_b:
3046 case Mips::BI__builtin_msa_sat_s_b:
3047 case Mips::BI__builtin_msa_sat_u_b:
3048 case Mips::BI__builtin_msa_slli_b:
3049 case Mips::BI__builtin_msa_srai_b:
3050 case Mips::BI__builtin_msa_srari_b:
3051 case Mips::BI__builtin_msa_srli_b:
3052 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
3053 case Mips::BI__builtin_msa_binsli_b:
3054 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
3055 // These intrinsics take an unsigned 4 bit immediate.
3056 case Mips::BI__builtin_msa_bclri_h:
3057 case Mips::BI__builtin_msa_bnegi_h:
3058 case Mips::BI__builtin_msa_bseti_h:
3059 case Mips::BI__builtin_msa_sat_s_h:
3060 case Mips::BI__builtin_msa_sat_u_h:
3061 case Mips::BI__builtin_msa_slli_h:
3062 case Mips::BI__builtin_msa_srai_h:
3063 case Mips::BI__builtin_msa_srari_h:
3064 case Mips::BI__builtin_msa_srli_h:
3065 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
3066 case Mips::BI__builtin_msa_binsli_h:
3067 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
3068 // These intrinsics take an unsigned 5 bit immediate.
3069 // The first block of intrinsics actually have an unsigned 5 bit field,
3070 // not a df/n field.
3071 case Mips::BI__builtin_msa_cfcmsa:
3072 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
3073 case Mips::BI__builtin_msa_clei_u_b:
3074 case Mips::BI__builtin_msa_clei_u_h:
3075 case Mips::BI__builtin_msa_clei_u_w:
3076 case Mips::BI__builtin_msa_clei_u_d:
3077 case Mips::BI__builtin_msa_clti_u_b:
3078 case Mips::BI__builtin_msa_clti_u_h:
3079 case Mips::BI__builtin_msa_clti_u_w:
3080 case Mips::BI__builtin_msa_clti_u_d:
3081 case Mips::BI__builtin_msa_maxi_u_b:
3082 case Mips::BI__builtin_msa_maxi_u_h:
3083 case Mips::BI__builtin_msa_maxi_u_w:
3084 case Mips::BI__builtin_msa_maxi_u_d:
3085 case Mips::BI__builtin_msa_mini_u_b:
3086 case Mips::BI__builtin_msa_mini_u_h:
3087 case Mips::BI__builtin_msa_mini_u_w:
3088 case Mips::BI__builtin_msa_mini_u_d:
3089 case Mips::BI__builtin_msa_addvi_b:
3090 case Mips::BI__builtin_msa_addvi_h:
3091 case Mips::BI__builtin_msa_addvi_w:
3092 case Mips::BI__builtin_msa_addvi_d:
3093 case Mips::BI__builtin_msa_bclri_w:
3094 case Mips::BI__builtin_msa_bnegi_w:
3095 case Mips::BI__builtin_msa_bseti_w:
3096 case Mips::BI__builtin_msa_sat_s_w:
3097 case Mips::BI__builtin_msa_sat_u_w:
3098 case Mips::BI__builtin_msa_slli_w:
3099 case Mips::BI__builtin_msa_srai_w:
3100 case Mips::BI__builtin_msa_srari_w:
3101 case Mips::BI__builtin_msa_srli_w:
3102 case Mips::BI__builtin_msa_srlri_w:
3103 case Mips::BI__builtin_msa_subvi_b:
3104 case Mips::BI__builtin_msa_subvi_h:
3105 case Mips::BI__builtin_msa_subvi_w:
3106 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
3107 case Mips::BI__builtin_msa_binsli_w:
3108 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
3109 // These intrinsics take an unsigned 6 bit immediate.
3110 case Mips::BI__builtin_msa_bclri_d:
3111 case Mips::BI__builtin_msa_bnegi_d:
3112 case Mips::BI__builtin_msa_bseti_d:
3113 case Mips::BI__builtin_msa_sat_s_d:
3114 case Mips::BI__builtin_msa_sat_u_d:
3115 case Mips::BI__builtin_msa_slli_d:
3116 case Mips::BI__builtin_msa_srai_d:
3117 case Mips::BI__builtin_msa_srari_d:
3118 case Mips::BI__builtin_msa_srli_d:
3119 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
3120 case Mips::BI__builtin_msa_binsli_d:
3121 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
3122 // These intrinsics take a signed 5 bit immediate.
3123 case Mips::BI__builtin_msa_ceqi_b:
3124 case Mips::BI__builtin_msa_ceqi_h:
3125 case Mips::BI__builtin_msa_ceqi_w:
3126 case Mips::BI__builtin_msa_ceqi_d:
3127 case Mips::BI__builtin_msa_clti_s_b:
3128 case Mips::BI__builtin_msa_clti_s_h:
3129 case Mips::BI__builtin_msa_clti_s_w:
3130 case Mips::BI__builtin_msa_clti_s_d:
3131 case Mips::BI__builtin_msa_clei_s_b:
3132 case Mips::BI__builtin_msa_clei_s_h:
3133 case Mips::BI__builtin_msa_clei_s_w:
3134 case Mips::BI__builtin_msa_clei_s_d:
3135 case Mips::BI__builtin_msa_maxi_s_b:
3136 case Mips::BI__builtin_msa_maxi_s_h:
3137 case Mips::BI__builtin_msa_maxi_s_w:
3138 case Mips::BI__builtin_msa_maxi_s_d:
3139 case Mips::BI__builtin_msa_mini_s_b:
3140 case Mips::BI__builtin_msa_mini_s_h:
3141 case Mips::BI__builtin_msa_mini_s_w:
3142 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
3143 // These intrinsics take an unsigned 8 bit immediate.
3144 case Mips::BI__builtin_msa_andi_b:
3145 case Mips::BI__builtin_msa_nori_b:
3146 case Mips::BI__builtin_msa_ori_b:
3147 case Mips::BI__builtin_msa_shf_b:
3148 case Mips::BI__builtin_msa_shf_h:
3149 case Mips::BI__builtin_msa_shf_w:
3150 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
3151 case Mips::BI__builtin_msa_bseli_b:
3152 case Mips::BI__builtin_msa_bmnzi_b:
3153 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
3154 // df/n format
3155 // These intrinsics take an unsigned 4 bit immediate.
3156 case Mips::BI__builtin_msa_copy_s_b:
3157 case Mips::BI__builtin_msa_copy_u_b:
3158 case Mips::BI__builtin_msa_insve_b:
3159 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
3160 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
3161 // These intrinsics take an unsigned 3 bit immediate.
3162 case Mips::BI__builtin_msa_copy_s_h:
3163 case Mips::BI__builtin_msa_copy_u_h:
3164 case Mips::BI__builtin_msa_insve_h:
3165 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
3166 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
3167 // These intrinsics take an unsigned 2 bit immediate.
3168 case Mips::BI__builtin_msa_copy_s_w:
3169 case Mips::BI__builtin_msa_copy_u_w:
3170 case Mips::BI__builtin_msa_insve_w:
3171 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
3172 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
3173 // These intrinsics take an unsigned 1 bit immediate.
3174 case Mips::BI__builtin_msa_copy_s_d:
3175 case Mips::BI__builtin_msa_copy_u_d:
3176 case Mips::BI__builtin_msa_insve_d:
3177 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
3178 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
3179 // Memory offsets and immediate loads.
3180 // These intrinsics take a signed 10 bit immediate.
3181 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
3182 case Mips::BI__builtin_msa_ldi_h:
3183 case Mips::BI__builtin_msa_ldi_w:
3184 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
3185 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
3186 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
3187 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
3188 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
3189 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
3190 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
3191 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
3192 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
3193 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
3194 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
3195 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
3196 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
3197 }
3198
3199 if (!m)
3200 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3201
3202 return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
3203 SemaBuiltinConstantArgMultiple(TheCall, i, m);
3204}
3205
3206/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
3207/// advancing the pointer over the consumed characters. The decoded type is
3208/// returned. If the decoded type represents a constant integer with a
3209/// constraint on its value then Mask is set to that value. The type descriptors
3210/// used in Str are specific to PPC MMA builtins and are documented in the file
3211/// defining the PPC builtins.
3212static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
3213 unsigned &Mask) {
3214 bool RequireICE = false;
3215 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
3216 switch (*Str++) {
3217 case 'V':
3218 return Context.getVectorType(Context.UnsignedCharTy, 16,
3219 VectorType::VectorKind::AltiVecVector);
3220 case 'i': {
3221 char *End;
3222 unsigned size = strtoul(Str, &End, 10);
3223 assert(End != Str && "Missing constant parameter constraint")((void)0);
3224 Str = End;
3225 Mask = size;
3226 return Context.IntTy;
3227 }
3228 case 'W': {
3229 char *End;
3230 unsigned size = strtoul(Str, &End, 10);
3231 assert(End != Str && "Missing PowerPC MMA type size")((void)0);
3232 Str = End;
3233 QualType Type;
3234 switch (size) {
3235 #define PPC_VECTOR_TYPE(typeName, Id, size) \
3236 case size: Type = Context.Id##Ty; break;
3237 #include "clang/Basic/PPCTypes.def"
3238 default: llvm_unreachable("Invalid PowerPC MMA vector type")__builtin_unreachable();
3239 }
3240 bool CheckVectorArgs = false;
3241 while (!CheckVectorArgs) {
3242 switch (*Str++) {
3243 case '*':
3244 Type = Context.getPointerType(Type);
3245 break;
3246 case 'C':
3247 Type = Type.withConst();
3248 break;
3249 default:
3250 CheckVectorArgs = true;
3251 --Str;
3252 break;
3253 }
3254 }
3255 return Type;
3256 }
3257 default:
3258 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
3259 }
3260}
3261
3262static bool isPPC_64Builtin(unsigned BuiltinID) {
3263 // These builtins only work on PPC 64bit targets.
3264 switch (BuiltinID) {
3265 case PPC::BI__builtin_divde:
3266 case PPC::BI__builtin_divdeu:
3267 case PPC::BI__builtin_bpermd:
3268 case PPC::BI__builtin_ppc_ldarx:
3269 case PPC::BI__builtin_ppc_stdcx:
3270 case PPC::BI__builtin_ppc_tdw:
3271 case PPC::BI__builtin_ppc_trapd:
3272 case PPC::BI__builtin_ppc_cmpeqb:
3273 case PPC::BI__builtin_ppc_setb:
3274 case PPC::BI__builtin_ppc_mulhd:
3275 case PPC::BI__builtin_ppc_mulhdu:
3276 case PPC::BI__builtin_ppc_maddhd:
3277 case PPC::BI__builtin_ppc_maddhdu:
3278 case PPC::BI__builtin_ppc_maddld:
3279 case PPC::BI__builtin_ppc_load8r:
3280 case PPC::BI__builtin_ppc_store8r:
3281 case PPC::BI__builtin_ppc_insert_exp:
3282 case PPC::BI__builtin_ppc_extract_sig:
3283 return true;
3284 }
3285 return false;
3286}
3287
3288static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall,
3289 StringRef FeatureToCheck, unsigned DiagID,
3290 StringRef DiagArg = "") {
3291 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck))
3292 return false;
3293
3294 if (DiagArg.empty())
3295 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange();
3296 else
3297 S.Diag(TheCall->getBeginLoc(), DiagID)
3298 << DiagArg << TheCall->getSourceRange();
3299
3300 return true;
3301}
3302
3303/// Returns true if the argument consists of one contiguous run of 1s with any
3304/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
3305/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
3306/// since all 1s are not contiguous.
3307bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
3308 llvm::APSInt Result;
3309 // We can't check the value of a dependent argument.
3310 Expr *Arg = TheCall->getArg(ArgNum);
3311 if (Arg->isTypeDependent() || Arg->isValueDependent())
3312 return false;
3313
3314 // Check constant-ness first.
3315 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3316 return true;
3317
3318 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
3319 if (Result.isShiftedMask() || (~Result).isShiftedMask())
3320 return false;
3321
3322 return Diag(TheCall->getBeginLoc(),
3323 diag::err_argument_not_contiguous_bit_field)
3324 << ArgNum << Arg->getSourceRange();
3325}
3326
3327bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3328 CallExpr *TheCall) {
3329 unsigned i = 0, l = 0, u = 0;
3330 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
3331 llvm::APSInt Result;
3332
3333 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
3334 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
3335 << TheCall->getSourceRange();
3336
3337 switch (BuiltinID) {
3338 default: return false;
3339 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
3340 case PPC::BI__builtin_altivec_crypto_vshasigmad:
3341 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3342 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3343 case PPC::BI__builtin_altivec_dss:
3344 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
3345 case PPC::BI__builtin_tbegin:
3346 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
3347 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
3348 case PPC::BI__builtin_tabortwc:
3349 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
3350 case PPC::BI__builtin_tabortwci:
3351 case PPC::BI__builtin_tabortdci:
3352 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
3353 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
3354 case PPC::BI__builtin_altivec_dst:
3355 case PPC::BI__builtin_altivec_dstt:
3356 case PPC::BI__builtin_altivec_dstst:
3357 case PPC::BI__builtin_altivec_dststt:
3358 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
3359 case PPC::BI__builtin_vsx_xxpermdi:
3360 case PPC::BI__builtin_vsx_xxsldwi:
3361 return SemaBuiltinVSX(TheCall);
3362 case PPC::BI__builtin_divwe:
3363 case PPC::BI__builtin_divweu:
3364 case PPC::BI__builtin_divde:
3365 case PPC::BI__builtin_divdeu:
3366 return SemaFeatureCheck(*this, TheCall, "extdiv",
3367 diag::err_ppc_builtin_only_on_arch, "7");
3368 case PPC::BI__builtin_bpermd:
3369 return SemaFeatureCheck(*this, TheCall, "bpermd",
3370 diag::err_ppc_builtin_only_on_arch, "7");
3371 case PPC::BI__builtin_unpack_vector_int128:
3372 return SemaFeatureCheck(*this, TheCall, "vsx",
3373 diag::err_ppc_builtin_only_on_arch, "7") ||
3374 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3375 case PPC::BI__builtin_pack_vector_int128:
3376 return SemaFeatureCheck(*this, TheCall, "vsx",
3377 diag::err_ppc_builtin_only_on_arch, "7");
3378 case PPC::BI__builtin_altivec_vgnb:
3379 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
3380 case PPC::BI__builtin_altivec_vec_replace_elt:
3381 case PPC::BI__builtin_altivec_vec_replace_unaligned: {
3382 QualType VecTy = TheCall->getArg(0)->getType();
3383 QualType EltTy = TheCall->getArg(1)->getType();
3384 unsigned Width = Context.getIntWidth(EltTy);
3385 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) ||
3386 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy);
3387 }
3388 case PPC::BI__builtin_vsx_xxeval:
3389 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
3390 case PPC::BI__builtin_altivec_vsldbi:
3391 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
3392 case PPC::BI__builtin_altivec_vsrdbi:
3393 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
3394 case PPC::BI__builtin_vsx_xxpermx:
3395 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
3396 case PPC::BI__builtin_ppc_tw:
3397 case PPC::BI__builtin_ppc_tdw:
3398 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
3399 case PPC::BI__builtin_ppc_cmpeqb:
3400 case PPC::BI__builtin_ppc_setb:
3401 case PPC::BI__builtin_ppc_maddhd:
3402 case PPC::BI__builtin_ppc_maddhdu:
3403 case PPC::BI__builtin_ppc_maddld:
3404 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3405 diag::err_ppc_builtin_only_on_arch, "9");
3406 case PPC::BI__builtin_ppc_cmprb:
3407 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3408 diag::err_ppc_builtin_only_on_arch, "9") ||
3409 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
3410 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
3411 // be a constant that represents a contiguous bit field.
3412 case PPC::BI__builtin_ppc_rlwnm:
3413 return SemaBuiltinConstantArg(TheCall, 1, Result) ||
3414 SemaValueIsRunOfOnes(TheCall, 2);
3415 case PPC::BI__builtin_ppc_rlwimi:
3416 case PPC::BI__builtin_ppc_rldimi:
3417 return SemaBuiltinConstantArg(TheCall, 2, Result) ||
3418 SemaValueIsRunOfOnes(TheCall, 3);
3419 case PPC::BI__builtin_ppc_extract_exp:
3420 case PPC::BI__builtin_ppc_extract_sig:
3421 case PPC::BI__builtin_ppc_insert_exp:
3422 return SemaFeatureCheck(*this, TheCall, "power9-vector",
3423 diag::err_ppc_builtin_only_on_arch, "9");
3424 case PPC::BI__builtin_ppc_mtfsb0:
3425 case PPC::BI__builtin_ppc_mtfsb1:
3426 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
3427 case PPC::BI__builtin_ppc_mtfsf:
3428 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255);
3429 case PPC::BI__builtin_ppc_mtfsfi:
3430 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
3431 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
3432 case PPC::BI__builtin_ppc_alignx:
3433 return SemaBuiltinConstantArgPower2(TheCall, 0);
3434 case PPC::BI__builtin_ppc_rdlam:
3435 return SemaValueIsRunOfOnes(TheCall, 2);
3436 case PPC::BI__builtin_ppc_icbt:
3437 case PPC::BI__builtin_ppc_sthcx:
3438 case PPC::BI__builtin_ppc_stbcx:
3439 case PPC::BI__builtin_ppc_lharx:
3440 case PPC::BI__builtin_ppc_lbarx:
3441 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
3442 diag::err_ppc_builtin_only_on_arch, "8");
3443 case PPC::BI__builtin_vsx_ldrmb:
3444 case PPC::BI__builtin_vsx_strmb:
3445 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
3446 diag::err_ppc_builtin_only_on_arch, "8") ||
3447 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
3448#define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
3449 case PPC::BI__builtin_##Name: \
3450 return SemaBuiltinPPCMMACall(TheCall, Types);
3451#include "clang/Basic/BuiltinsPPC.def"
3452 }
3453 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3454}
3455
3456// Check if the given type is a non-pointer PPC MMA type. This function is used
3457// in Sema to prevent invalid uses of restricted PPC MMA types.
3458bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
3459 if (Type->isPointerType() || Type->isArrayType())
3460 return false;
3461
3462 QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
3463#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
3464 if (false
3465#include "clang/Basic/PPCTypes.def"
3466 ) {
3467 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
3468 return true;
3469 }
3470 return false;
3471}
3472
3473bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
3474 CallExpr *TheCall) {
3475 // position of memory order and scope arguments in the builtin
3476 unsigned OrderIndex, ScopeIndex;
3477 switch (BuiltinID) {
3478 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
3479 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
3480 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
3481 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
3482 OrderIndex = 2;
3483 ScopeIndex = 3;
3484 break;
3485 case AMDGPU::BI__builtin_amdgcn_fence:
3486 OrderIndex = 0;
3487 ScopeIndex = 1;
3488 break;
3489 default:
3490 return false;
3491 }
3492
3493 ExprResult Arg = TheCall->getArg(OrderIndex);
3494 auto ArgExpr = Arg.get();
3495 Expr::EvalResult ArgResult;
3496
3497 if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
3498 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
3499 << ArgExpr->getType();
3500 auto Ord = ArgResult.Val.getInt().getZExtValue();
3501
3502 // Check valididty of memory ordering as per C11 / C++11's memody model.
3503 // Only fence needs check. Atomic dec/inc allow all memory orders.
3504 if (!llvm::isValidAtomicOrderingCABI(Ord))
3505 return Diag(ArgExpr->getBeginLoc(),
3506 diag::warn_atomic_op_has_invalid_memory_order)
3507 << ArgExpr->getSourceRange();
3508 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
3509 case llvm::AtomicOrderingCABI::relaxed:
3510 case llvm::AtomicOrderingCABI::consume:
3511 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
3512 return Diag(ArgExpr->getBeginLoc(),
3513 diag::warn_atomic_op_has_invalid_memory_order)
3514 << ArgExpr->getSourceRange();
3515 break;
3516 case llvm::AtomicOrderingCABI::acquire:
3517 case llvm::AtomicOrderingCABI::release:
3518 case llvm::AtomicOrderingCABI::acq_rel:
3519 case llvm::AtomicOrderingCABI::seq_cst:
3520 break;
3521 }
3522
3523 Arg = TheCall->getArg(ScopeIndex);
3524 ArgExpr = Arg.get();
3525 Expr::EvalResult ArgResult1;
3526 // Check that sync scope is a constant literal
3527 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
3528 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
3529 << ArgExpr->getType();
3530
3531 return false;
3532}
3533
3534bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
3535 llvm::APSInt Result;
3536
3537 // We can't check the value of a dependent argument.
3538 Expr *Arg = TheCall->getArg(ArgNum);
3539 if (Arg->isTypeDependent() || Arg->isValueDependent())
3540 return false;
3541
3542 // Check constant-ness first.
3543 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3544 return true;
3545
3546 int64_t Val = Result.getSExtValue();
3547 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
3548 return false;
3549
3550 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
3551 << Arg->getSourceRange();
3552}
3553
3554bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
3555 unsigned BuiltinID,
3556 CallExpr *TheCall) {
3557 // CodeGenFunction can also detect this, but this gives a better error
3558 // message.
3559 bool FeatureMissing = false;
3560 SmallVector<StringRef> ReqFeatures;
3561 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
3562 Features.split(ReqFeatures, ',');
3563
3564 // Check if each required feature is included
3565 for (StringRef F : ReqFeatures) {
3566 if (TI.hasFeature(F))
3567 continue;
3568
3569 // If the feature is 64bit, alter the string so it will print better in
3570 // the diagnostic.
3571 if (F == "64bit")
3572 F = "RV64";
3573
3574 // Convert features like "zbr" and "experimental-zbr" to "Zbr".
3575 F.consume_front("experimental-");
3576 std::string FeatureStr = F.str();
3577 FeatureStr[0] = std::toupper(FeatureStr[0]);
3578
3579 // Error message
3580 FeatureMissing = true;
3581 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
3582 << TheCall->getSourceRange() << StringRef(FeatureStr);
3583 }
3584
3585 if (FeatureMissing)
3586 return true;
3587
3588 switch (BuiltinID) {
3589 case RISCV::BI__builtin_rvv_vsetvli:
3590 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
3591 CheckRISCVLMUL(TheCall, 2);
3592 case RISCV::BI__builtin_rvv_vsetvlimax:
3593 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
3594 CheckRISCVLMUL(TheCall, 1);
3595 case RISCV::BI__builtin_rvv_vget_v_i8m2_i8m1:
3596 case RISCV::BI__builtin_rvv_vget_v_i16m2_i16m1:
3597 case RISCV::BI__builtin_rvv_vget_v_i32m2_i32m1:
3598 case RISCV::BI__builtin_rvv_vget_v_i64m2_i64m1:
3599 case RISCV::BI__builtin_rvv_vget_v_f32m2_f32m1:
3600 case RISCV::BI__builtin_rvv_vget_v_f64m2_f64m1:
3601 case RISCV::BI__builtin_rvv_vget_v_u8m2_u8m1:
3602 case RISCV::BI__builtin_rvv_vget_v_u16m2_u16m1:
3603 case RISCV::BI__builtin_rvv_vget_v_u32m2_u32m1:
3604 case RISCV::BI__builtin_rvv_vget_v_u64m2_u64m1:
3605 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m2:
3606 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m2:
3607 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m2:
3608 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m2:
3609 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m2:
3610 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m2:
3611 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m2:
3612 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m2:
3613 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m2:
3614 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m2:
3615 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m4:
3616 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m4:
3617 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m4:
3618 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m4:
3619 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m4:
3620 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m4:
3621 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m4:
3622 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m4:
3623 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m4:
3624 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m4:
3625 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3626 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m1:
3627 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m1:
3628 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m1:
3629 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m1:
3630 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m1:
3631 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m1:
3632 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m1:
3633 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m1:
3634 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m1:
3635 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m1:
3636 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m2:
3637 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m2:
3638 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m2:
3639 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m2:
3640 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m2:
3641 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m2:
3642 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m2:
3643 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m2:
3644 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m2:
3645 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m2:
3646 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3647 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m1:
3648 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m1:
3649 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m1:
3650 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m1:
3651 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m1:
3652 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m1:
3653 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m1:
3654 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m1:
3655 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m1:
3656 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m1:
3657 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3658 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m2:
3659 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m2:
3660 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m2:
3661 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m2:
3662 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m2:
3663 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m2:
3664 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m2:
3665 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m2:
3666 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m2:
3667 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m2:
3668 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m4:
3669 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m4:
3670 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m4:
3671 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m4:
3672 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m4:
3673 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m4:
3674 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m4:
3675 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m4:
3676 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m4:
3677 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m4:
3678 case RISCV::BI__builtin_rvv_vset_v_i8m4_i8m8:
3679 case RISCV::BI__builtin_rvv_vset_v_i16m4_i16m8:
3680 case RISCV::BI__builtin_rvv_vset_v_i32m4_i32m8:
3681 case RISCV::BI__builtin_rvv_vset_v_i64m4_i64m8:
3682 case RISCV::BI__builtin_rvv_vset_v_f32m4_f32m8:
3683 case RISCV::BI__builtin_rvv_vset_v_f64m4_f64m8:
3684 case RISCV::BI__builtin_rvv_vset_v_u8m4_u8m8:
3685 case RISCV::BI__builtin_rvv_vset_v_u16m4_u16m8:
3686 case RISCV::BI__builtin_rvv_vset_v_u32m4_u32m8:
3687 case RISCV::BI__builtin_rvv_vset_v_u64m4_u64m8:
3688 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3689 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m4:
3690 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m4:
3691 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m4:
3692 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m4:
3693 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m4:
3694 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m4:
3695 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m4:
3696 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m4:
3697 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m4:
3698 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m4:
3699 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m8:
3700 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m8:
3701 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m8:
3702 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m8:
3703 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m8:
3704 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m8:
3705 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m8:
3706 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m8:
3707 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m8:
3708 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m8:
3709 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3710 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m8:
3711 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m8:
3712 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m8:
3713 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m8:
3714 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m8:
3715 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m8:
3716 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m8:
3717 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m8:
3718 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m8:
3719 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m8:
3720 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3721 }
3722
3723 return false;
3724}
3725
3726bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
3727 CallExpr *TheCall) {
3728 if (BuiltinID == SystemZ::BI__builtin_tabort) {
3729 Expr *Arg = TheCall->getArg(0);
3730 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context))
3731 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
3732 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
3733 << Arg->getSourceRange();
3734 }
3735
3736 // For intrinsics which take an immediate value as part of the instruction,
3737 // range check them here.
3738 unsigned i = 0, l = 0, u = 0;
3739 switch (BuiltinID) {
3740 default: return false;
3741 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
3742 case SystemZ::BI__builtin_s390_verimb:
3743 case SystemZ::BI__builtin_s390_verimh:
3744 case SystemZ::BI__builtin_s390_verimf:
3745 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
3746 case SystemZ::BI__builtin_s390_vfaeb:
3747 case SystemZ::BI__builtin_s390_vfaeh:
3748 case SystemZ::BI__builtin_s390_vfaef:
3749 case SystemZ::BI__builtin_s390_vfaebs:
3750 case SystemZ::BI__builtin_s390_vfaehs:
3751 case SystemZ::BI__builtin_s390_vfaefs:
3752 case SystemZ::BI__builtin_s390_vfaezb:
3753 case SystemZ::BI__builtin_s390_vfaezh:
3754 case SystemZ::BI__builtin_s390_vfaezf:
3755 case SystemZ::BI__builtin_s390_vfaezbs:
3756 case SystemZ::BI__builtin_s390_vfaezhs:
3757 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
3758 case SystemZ::BI__builtin_s390_vfisb:
3759 case SystemZ::BI__builtin_s390_vfidb:
3760 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
3761 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3762 case SystemZ::BI__builtin_s390_vftcisb:
3763 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
3764 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
3765 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
3766 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
3767 case SystemZ::BI__builtin_s390_vstrcb:
3768 case SystemZ::BI__builtin_s390_vstrch:
3769 case SystemZ::BI__builtin_s390_vstrcf:
3770 case SystemZ::BI__builtin_s390_vstrczb:
3771 case SystemZ::BI__builtin_s390_vstrczh:
3772 case SystemZ::BI__builtin_s390_vstrczf:
3773 case SystemZ::BI__builtin_s390_vstrcbs:
3774 case SystemZ::BI__builtin_s390_vstrchs:
3775 case SystemZ::BI__builtin_s390_vstrcfs:
3776 case SystemZ::BI__builtin_s390_vstrczbs:
3777 case SystemZ::BI__builtin_s390_vstrczhs:
3778 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
3779 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
3780 case SystemZ::BI__builtin_s390_vfminsb:
3781 case SystemZ::BI__builtin_s390_vfmaxsb:
3782 case SystemZ::BI__builtin_s390_vfmindb:
3783 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
3784 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
3785 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
3786 case SystemZ::BI__builtin_s390_vclfnhs:
3787 case SystemZ::BI__builtin_s390_vclfnls:
3788 case SystemZ::BI__builtin_s390_vcfn:
3789 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
3790 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
3791 }
3792 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3793}
3794
3795/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
3796/// This checks that the target supports __builtin_cpu_supports and
3797/// that the string argument is constant and valid.
3798static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
3799 CallExpr *TheCall) {
3800 Expr *Arg = TheCall->getArg(0);
3801
3802 // Check if the argument is a string literal.
3803 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3804 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3805 << Arg->getSourceRange();
3806
3807 // Check the contents of the string.
3808 StringRef Feature =
3809 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3810 if (!TI.validateCpuSupports(Feature))
3811 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
3812 << Arg->getSourceRange();
3813 return false;
3814}
3815
3816/// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
3817/// This checks that the target supports __builtin_cpu_is and
3818/// that the string argument is constant and valid.
3819static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
3820 Expr *Arg = TheCall->getArg(0);
3821
3822 // Check if the argument is a string literal.
3823 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3824 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3825 << Arg->getSourceRange();
3826
3827 // Check the contents of the string.
3828 StringRef Feature =
3829 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3830 if (!TI.validateCpuIs(Feature))
3831 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
3832 << Arg->getSourceRange();
3833 return false;
3834}
3835
3836// Check if the rounding mode is legal.
3837bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
3838 // Indicates if this instruction has rounding control or just SAE.
3839 bool HasRC = false;
3840
3841 unsigned ArgNum = 0;
3842 switch (BuiltinID) {
3843 default:
3844 return false;
3845 case X86::BI__builtin_ia32_vcvttsd2si32:
3846 case X86::BI__builtin_ia32_vcvttsd2si64:
3847 case X86::BI__builtin_ia32_vcvttsd2usi32:
3848 case X86::BI__builtin_ia32_vcvttsd2usi64:
3849 case X86::BI__builtin_ia32_vcvttss2si32:
3850 case X86::BI__builtin_ia32_vcvttss2si64:
3851 case X86::BI__builtin_ia32_vcvttss2usi32:
3852 case X86::BI__builtin_ia32_vcvttss2usi64:
3853 ArgNum = 1;
3854 break;
3855 case X86::BI__builtin_ia32_maxpd512:
3856 case X86::BI__builtin_ia32_maxps512:
3857 case X86::BI__builtin_ia32_minpd512:
3858 case X86::BI__builtin_ia32_minps512:
3859 ArgNum = 2;
3860 break;
3861 case X86::BI__builtin_ia32_cvtps2pd512_mask:
3862 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
3863 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
3864 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
3865 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
3866 case X86::BI__builtin_ia32_cvttps2dq512_mask:
3867 case X86::BI__builtin_ia32_cvttps2qq512_mask:
3868 case X86::BI__builtin_ia32_cvttps2udq512_mask:
3869 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
3870 case X86::BI__builtin_ia32_exp2pd_mask:
3871 case X86::BI__builtin_ia32_exp2ps_mask:
3872 case X86::BI__builtin_ia32_getexppd512_mask:
3873 case X86::BI__builtin_ia32_getexpps512_mask:
3874 case X86::BI__builtin_ia32_rcp28pd_mask:
3875 case X86::BI__builtin_ia32_rcp28ps_mask:
3876 case X86::BI__builtin_ia32_rsqrt28pd_mask:
3877 case X86::BI__builtin_ia32_rsqrt28ps_mask:
3878 case X86::BI__builtin_ia32_vcomisd:
3879 case X86::BI__builtin_ia32_vcomiss:
3880 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
3881 ArgNum = 3;
3882 break;
3883 case X86::BI__builtin_ia32_cmppd512_mask:
3884 case X86::BI__builtin_ia32_cmpps512_mask:
3885 case X86::BI__builtin_ia32_cmpsd_mask:
3886 case X86::BI__builtin_ia32_cmpss_mask:
3887 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
3888 case X86::BI__builtin_ia32_getexpsd128_round_mask:
3889 case X86::BI__builtin_ia32_getexpss128_round_mask:
3890 case X86::BI__builtin_ia32_getmantpd512_mask:
3891 case X86::BI__builtin_ia32_getmantps512_mask:
3892 case X86::BI__builtin_ia32_maxsd_round_mask:
3893 case X86::BI__builtin_ia32_maxss_round_mask:
3894 case X86::BI__builtin_ia32_minsd_round_mask:
3895 case X86::BI__builtin_ia32_minss_round_mask:
3896 case X86::BI__builtin_ia32_rcp28sd_round_mask:
3897 case X86::BI__builtin_ia32_rcp28ss_round_mask:
3898 case X86::BI__builtin_ia32_reducepd512_mask:
3899 case X86::BI__builtin_ia32_reduceps512_mask:
3900 case X86::BI__builtin_ia32_rndscalepd_mask:
3901 case X86::BI__builtin_ia32_rndscaleps_mask:
3902 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
3903 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
3904 ArgNum = 4;
3905 break;
3906 case X86::BI__builtin_ia32_fixupimmpd512_mask:
3907 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
3908 case X86::BI__builtin_ia32_fixupimmps512_mask:
3909 case X86::BI__builtin_ia32_fixupimmps512_maskz:
3910 case X86::BI__builtin_ia32_fixupimmsd_mask:
3911 case X86::BI__builtin_ia32_fixupimmsd_maskz:
3912 case X86::BI__builtin_ia32_fixupimmss_mask:
3913 case X86::BI__builtin_ia32_fixupimmss_maskz:
3914 case X86::BI__builtin_ia32_getmantsd_round_mask:
3915 case X86::BI__builtin_ia32_getmantss_round_mask:
3916 case X86::BI__builtin_ia32_rangepd512_mask:
3917 case X86::BI__builtin_ia32_rangeps512_mask:
3918 case X86::BI__builtin_ia32_rangesd128_round_mask:
3919 case X86::BI__builtin_ia32_rangess128_round_mask:
3920 case X86::BI__builtin_ia32_reducesd_mask:
3921 case X86::BI__builtin_ia32_reducess_mask:
3922 case X86::BI__builtin_ia32_rndscalesd_round_mask:
3923 case X86::BI__builtin_ia32_rndscaless_round_mask:
3924 ArgNum = 5;
3925 break;
3926 case X86::BI__builtin_ia32_vcvtsd2si64:
3927 case X86::BI__builtin_ia32_vcvtsd2si32:
3928 case X86::BI__builtin_ia32_vcvtsd2usi32:
3929 case X86::BI__builtin_ia32_vcvtsd2usi64:
3930 case X86::BI__builtin_ia32_vcvtss2si32:
3931 case X86::BI__builtin_ia32_vcvtss2si64:
3932 case X86::BI__builtin_ia32_vcvtss2usi32:
3933 case X86::BI__builtin_ia32_vcvtss2usi64:
3934 case X86::BI__builtin_ia32_sqrtpd512:
3935 case X86::BI__builtin_ia32_sqrtps512:
3936 ArgNum = 1;
3937 HasRC = true;
3938 break;
3939 case X86::BI__builtin_ia32_addpd512:
3940 case X86::BI__builtin_ia32_addps512:
3941 case X86::BI__builtin_ia32_divpd512:
3942 case X86::BI__builtin_ia32_divps512:
3943 case X86::BI__builtin_ia32_mulpd512:
3944 case X86::BI__builtin_ia32_mulps512:
3945 case X86::BI__builtin_ia32_subpd512:
3946 case X86::BI__builtin_ia32_subps512:
3947 case X86::BI__builtin_ia32_cvtsi2sd64:
3948 case X86::BI__builtin_ia32_cvtsi2ss32:
3949 case X86::BI__builtin_ia32_cvtsi2ss64:
3950 case X86::BI__builtin_ia32_cvtusi2sd64:
3951 case X86::BI__builtin_ia32_cvtusi2ss32:
3952 case X86::BI__builtin_ia32_cvtusi2ss64:
3953 ArgNum = 2;
3954 HasRC = true;
3955 break;
3956 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
3957 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
3958 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
3959 case X86::BI__builtin_ia32_cvtpd2dq512_mask:
3960 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
3961 case X86::BI__builtin_ia32_cvtpd2udq512_mask:
3962 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
3963 case X86::BI__builtin_ia32_cvtps2dq512_mask:
3964 case X86::BI__builtin_ia32_cvtps2qq512_mask:
3965 case X86::BI__builtin_ia32_cvtps2udq512_mask:
3966 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
3967 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
3968 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
3969 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
3970 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
3971 ArgNum = 3;
3972 HasRC = true;
3973 break;
3974 case X86::BI__builtin_ia32_addss_round_mask:
3975 case X86::BI__builtin_ia32_addsd_round_mask:
3976 case X86::BI__builtin_ia32_divss_round_mask:
3977 case X86::BI__builtin_ia32_divsd_round_mask:
3978 case X86::BI__builtin_ia32_mulss_round_mask:
3979 case X86::BI__builtin_ia32_mulsd_round_mask:
3980 case X86::BI__builtin_ia32_subss_round_mask:
3981 case X86::BI__builtin_ia32_subsd_round_mask:
3982 case X86::BI__builtin_ia32_scalefpd512_mask:
3983 case X86::BI__builtin_ia32_scalefps512_mask:
3984 case X86::BI__builtin_ia32_scalefsd_round_mask:
3985 case X86::BI__builtin_ia32_scalefss_round_mask:
3986 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
3987 case X86::BI__builtin_ia32_sqrtsd_round_mask:
3988 case X86::BI__builtin_ia32_sqrtss_round_mask:
3989 case X86::BI__builtin_ia32_vfmaddsd3_mask:
3990 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
3991 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
3992 case X86::BI__builtin_ia32_vfmaddss3_mask:
3993 case X86::BI__builtin_ia32_vfmaddss3_maskz:
3994 case X86::BI__builtin_ia32_vfmaddss3_mask3:
3995 case X86::BI__builtin_ia32_vfmaddpd512_mask:
3996 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
3997 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
3998 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
3999 case X86::BI__builtin_ia32_vfmaddps512_mask:
4000 case X86::BI__builtin_ia32_vfmaddps512_maskz:
4001 case X86::BI__builtin_ia32_vfmaddps512_mask3:
4002 case X86::BI__builtin_ia32_vfmsubps512_mask3:
4003 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
4004 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
4005 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
4006 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
4007 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
4008 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
4009 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
4010 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
4011 ArgNum = 4;
4012 HasRC = true;
4013 break;
4014 }
4015
4016 llvm::APSInt Result;
4017
4018 // We can't check the value of a dependent argument.
4019 Expr *Arg = TheCall->getArg(ArgNum);
4020 if (Arg->isTypeDependent() || Arg->isValueDependent())
4021 return false;
4022
4023 // Check constant-ness first.
4024 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4025 return true;
4026
4027 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
4028 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
4029 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
4030 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
4031 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
4032 Result == 8/*ROUND_NO_EXC*/ ||
4033 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
4034 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
4035 return false;
4036
4037 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
4038 << Arg->getSourceRange();
4039}
4040
4041// Check if the gather/scatter scale is legal.
4042bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
4043 CallExpr *TheCall) {
4044 unsigned ArgNum = 0;
4045 switch (BuiltinID) {
4046 default:
4047 return false;
4048 case X86::BI__builtin_ia32_gatherpfdpd:
4049 case X86::BI__builtin_ia32_gatherpfdps:
4050 case X86::BI__builtin_ia32_gatherpfqpd:
4051 case X86::BI__builtin_ia32_gatherpfqps:
4052 case X86::BI__builtin_ia32_scatterpfdpd:
4053 case X86::BI__builtin_ia32_scatterpfdps:
4054 case X86::BI__builtin_ia32_scatterpfqpd:
4055 case X86::BI__builtin_ia32_scatterpfqps:
4056 ArgNum = 3;
4057 break;
4058 case X86::BI__builtin_ia32_gatherd_pd:
4059 case X86::BI__builtin_ia32_gatherd_pd256:
4060 case X86::BI__builtin_ia32_gatherq_pd:
4061 case X86::BI__builtin_ia32_gatherq_pd256:
4062 case X86::BI__builtin_ia32_gatherd_ps:
4063 case X86::BI__builtin_ia32_gatherd_ps256:
4064 case X86::BI__builtin_ia32_gatherq_ps:
4065 case X86::BI__builtin_ia32_gatherq_ps256:
4066 case X86::BI__builtin_ia32_gatherd_q:
4067 case X86::BI__builtin_ia32_gatherd_q256:
4068 case X86::BI__builtin_ia32_gatherq_q:
4069 case X86::BI__builtin_ia32_gatherq_q256:
4070 case X86::BI__builtin_ia32_gatherd_d:
4071 case X86::BI__builtin_ia32_gatherd_d256:
4072 case X86::BI__builtin_ia32_gatherq_d:
4073 case X86::BI__builtin_ia32_gatherq_d256:
4074 case X86::BI__builtin_ia32_gather3div2df:
4075 case X86::BI__builtin_ia32_gather3div2di:
4076 case X86::BI__builtin_ia32_gather3div4df:
4077 case X86::BI__builtin_ia32_gather3div4di:
4078 case X86::BI__builtin_ia32_gather3div4sf:
4079 case X86::BI__builtin_ia32_gather3div4si:
4080 case X86::BI__builtin_ia32_gather3div8sf:
4081 case X86::BI__builtin_ia32_gather3div8si:
4082 case X86::BI__builtin_ia32_gather3siv2df:
4083 case X86::BI__builtin_ia32_gather3siv2di:
4084 case X86::BI__builtin_ia32_gather3siv4df:
4085 case X86::BI__builtin_ia32_gather3siv4di:
4086 case X86::BI__builtin_ia32_gather3siv4sf:
4087 case X86::BI__builtin_ia32_gather3siv4si:
4088 case X86::BI__builtin_ia32_gather3siv8sf:
4089 case X86::BI__builtin_ia32_gather3siv8si:
4090 case X86::BI__builtin_ia32_gathersiv8df:
4091 case X86::BI__builtin_ia32_gathersiv16sf:
4092 case X86::BI__builtin_ia32_gatherdiv8df:
4093 case X86::BI__builtin_ia32_gatherdiv16sf:
4094 case X86::BI__builtin_ia32_gathersiv8di:
4095 case X86::BI__builtin_ia32_gathersiv16si:
4096 case X86::BI__builtin_ia32_gatherdiv8di:
4097 case X86::BI__builtin_ia32_gatherdiv16si:
4098 case X86::BI__builtin_ia32_scatterdiv2df:
4099 case X86::BI__builtin_ia32_scatterdiv2di:
4100 case X86::BI__builtin_ia32_scatterdiv4df:
4101 case X86::BI__builtin_ia32_scatterdiv4di:
4102 case X86::BI__builtin_ia32_scatterdiv4sf:
4103 case X86::BI__builtin_ia32_scatterdiv4si:
4104 case X86::BI__builtin_ia32_scatterdiv8sf:
4105 case X86::BI__builtin_ia32_scatterdiv8si:
4106 case X86::BI__builtin_ia32_scattersiv2df:
4107 case X86::BI__builtin_ia32_scattersiv2di:
4108 case X86::BI__builtin_ia32_scattersiv4df:
4109 case X86::BI__builtin_ia32_scattersiv4di:
4110 case X86::BI__builtin_ia32_scattersiv4sf:
4111 case X86::BI__builtin_ia32_scattersiv4si:
4112 case X86::BI__builtin_ia32_scattersiv8sf:
4113 case X86::BI__builtin_ia32_scattersiv8si:
4114 case X86::BI__builtin_ia32_scattersiv8df:
4115 case X86::BI__builtin_ia32_scattersiv16sf:
4116 case X86::BI__builtin_ia32_scatterdiv8df:
4117 case X86::BI__builtin_ia32_scatterdiv16sf:
4118 case X86::BI__builtin_ia32_scattersiv8di:
4119 case X86::BI__builtin_ia32_scattersiv16si:
4120 case X86::BI__builtin_ia32_scatterdiv8di:
4121 case X86::BI__builtin_ia32_scatterdiv16si:
4122 ArgNum = 4;
4123 break;
4124 }
4125
4126 llvm::APSInt Result;
4127
4128 // We can't check the value of a dependent argument.
4129 Expr *Arg = TheCall->getArg(ArgNum);
4130 if (Arg->isTypeDependent() || Arg->isValueDependent())
4131 return false;
4132
4133 // Check constant-ness first.
4134 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4135 return true;
4136
4137 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
4138 return false;
4139
4140 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
4141 << Arg->getSourceRange();
4142}
4143
4144enum { TileRegLow = 0, TileRegHigh = 7 };
4145
4146bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
4147 ArrayRef<int> ArgNums) {
4148 for (int ArgNum : ArgNums) {
4149 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
4150 return true;
4151 }
4152 return false;
4153}
4154
4155bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
4156 ArrayRef<int> ArgNums) {
4157 // Because the max number of tile register is TileRegHigh + 1, so here we use
4158 // each bit to represent the usage of them in bitset.
4159 std::bitset<TileRegHigh + 1> ArgValues;
4160 for (int ArgNum : ArgNums) {
4161 Expr *Arg = TheCall->getArg(ArgNum);
4162 if (Arg->isTypeDependent() || Arg->isValueDependent())
4163 continue;
4164
4165 llvm::APSInt Result;
4166 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4167 return true;
4168 int ArgExtValue = Result.getExtValue();
4169 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&((void)0)
4170 "Incorrect tile register num.")((void)0);
4171 if (ArgValues.test(ArgExtValue))
4172 return Diag(TheCall->getBeginLoc(),
4173 diag::err_x86_builtin_tile_arg_duplicate)
4174 << TheCall->getArg(ArgNum)->getSourceRange();
4175 ArgValues.set(ArgExtValue);
4176 }
4177 return false;
4178}
4179
4180bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
4181 ArrayRef<int> ArgNums) {
4182 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
4183 CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
4184}
4185
4186bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
4187 switch (BuiltinID) {
4188 default:
4189 return false;
4190 case X86::BI__builtin_ia32_tileloadd64:
4191 case X86::BI__builtin_ia32_tileloaddt164:
4192 case X86::BI__builtin_ia32_tilestored64:
4193 case X86::BI__builtin_ia32_tilezero:
4194 return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
4195 case X86::BI__builtin_ia32_tdpbssd:
4196 case X86::BI__builtin_ia32_tdpbsud:
4197 case X86::BI__builtin_ia32_tdpbusd:
4198 case X86::BI__builtin_ia32_tdpbuud:
4199 case X86::BI__builtin_ia32_tdpbf16ps:
4200 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
4201 }
4202}
4203static bool isX86_32Builtin(unsigned BuiltinID) {
4204 // These builtins only work on x86-32 targets.
4205 switch (BuiltinID) {
4206 case X86::BI__builtin_ia32_readeflags_u32:
4207 case X86::BI__builtin_ia32_writeeflags_u32:
4208 return true;
4209 }
4210
4211 return false;
4212}
4213
4214bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
4215 CallExpr *TheCall) {
4216 if (BuiltinID == X86::BI__builtin_cpu_supports)
4217 return SemaBuiltinCpuSupports(*this, TI, TheCall);
4218
4219 if (BuiltinID == X86::BI__builtin_cpu_is)
4220 return SemaBuiltinCpuIs(*this, TI, TheCall);
4221
4222 // Check for 32-bit only builtins on a 64-bit target.
4223 const llvm::Triple &TT = TI.getTriple();
4224 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
4225 return Diag(TheCall->getCallee()->getBeginLoc(),
4226 diag::err_32_bit_builtin_64_bit_tgt);
4227
4228 // If the intrinsic has rounding or SAE make sure its valid.
4229 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
4230 return true;
4231
4232 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
4233 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
4234 return true;
4235
4236 // If the intrinsic has a tile arguments, make sure they are valid.
4237 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
4238 return true;
4239
4240 // For intrinsics which take an immediate value as part of the instruction,
4241 // range check them here.
4242 int i = 0, l = 0, u = 0;
4243 switch (BuiltinID) {
4244 default:
4245 return false;
4246 case X86::BI__builtin_ia32_vec_ext_v2si:
4247 case X86::BI__builtin_ia32_vec_ext_v2di:
4248 case X86::BI__builtin_ia32_vextractf128_pd256:
4249 case X86::BI__builtin_ia32_vextractf128_ps256:
4250 case X86::BI__builtin_ia32_vextractf128_si256:
4251 case X86::BI__builtin_ia32_extract128i256:
4252 case X86::BI__builtin_ia32_extractf64x4_mask:
4253 case X86::BI__builtin_ia32_extracti64x4_mask:
4254 case X86::BI__builtin_ia32_extractf32x8_mask:
4255 case X86::BI__builtin_ia32_extracti32x8_mask:
4256 case X86::BI__builtin_ia32_extractf64x2_256_mask:
4257 case X86::BI__builtin_ia32_extracti64x2_256_mask:
4258 case X86::BI__builtin_ia32_extractf32x4_256_mask:
4259 case X86::BI__builtin_ia32_extracti32x4_256_mask:
4260 i = 1; l = 0; u = 1;
4261 break;
4262 case X86::BI__builtin_ia32_vec_set_v2di:
4263 case X86::BI__builtin_ia32_vinsertf128_pd256:
4264 case X86::BI__builtin_ia32_vinsertf128_ps256:
4265 case X86::BI__builtin_ia32_vinsertf128_si256:
4266 case X86::BI__builtin_ia32_insert128i256:
4267 case X86::BI__builtin_ia32_insertf32x8:
4268 case X86::BI__builtin_ia32_inserti32x8:
4269 case X86::BI__builtin_ia32_insertf64x4:
4270 case X86::BI__builtin_ia32_inserti64x4:
4271 case X86::BI__builtin_ia32_insertf64x2_256:
4272 case X86::BI__builtin_ia32_inserti64x2_256:
4273 case X86::BI__builtin_ia32_insertf32x4_256:
4274 case X86::BI__builtin_ia32_inserti32x4_256:
4275 i = 2; l = 0; u = 1;
4276 break;
4277 case X86::BI__builtin_ia32_vpermilpd:
4278 case X86::BI__builtin_ia32_vec_ext_v4hi:
4279 case X86::BI__builtin_ia32_vec_ext_v4si:
4280 case X86::BI__builtin_ia32_vec_ext_v4sf:
4281 case X86::BI__builtin_ia32_vec_ext_v4di:
4282 case X86::BI__builtin_ia32_extractf32x4_mask:
4283 case X86::BI__builtin_ia32_extracti32x4_mask:
4284 case X86::BI__builtin_ia32_extractf64x2_512_mask:
4285 case X86::BI__builtin_ia32_extracti64x2_512_mask:
4286 i = 1; l = 0; u = 3;
4287 break;
4288 case X86::BI_mm_prefetch:
4289 case X86::BI__builtin_ia32_vec_ext_v8hi:
4290 case X86::BI__builtin_ia32_vec_ext_v8si:
4291 i = 1; l = 0; u = 7;
4292 break;
4293 case X86::BI__builtin_ia32_sha1rnds4:
4294 case X86::BI__builtin_ia32_blendpd:
4295 case X86::BI__builtin_ia32_shufpd:
4296 case X86::BI__builtin_ia32_vec_set_v4hi:
4297 case X86::BI__builtin_ia32_vec_set_v4si:
4298 case X86::BI__builtin_ia32_vec_set_v4di:
4299 case X86::BI__builtin_ia32_shuf_f32x4_256:
4300 case X86::BI__builtin_ia32_shuf_f64x2_256:
4301 case X86::BI__builtin_ia32_shuf_i32x4_256:
4302 case X86::BI__builtin_ia32_shuf_i64x2_256:
4303 case X86::BI__builtin_ia32_insertf64x2_512:
4304 case X86::BI__builtin_ia32_inserti64x2_512:
4305 case X86::BI__builtin_ia32_insertf32x4:
4306 case X86::BI__builtin_ia32_inserti32x4:
4307 i = 2; l = 0; u = 3;
4308 break;
4309 case X86::BI__builtin_ia32_vpermil2pd:
4310 case X86::BI__builtin_ia32_vpermil2pd256:
4311 case X86::BI__builtin_ia32_vpermil2ps:
4312 case X86::BI__builtin_ia32_vpermil2ps256:
4313 i = 3; l = 0; u = 3;
4314 break;
4315 case X86::BI__builtin_ia32_cmpb128_mask:
4316 case X86::BI__builtin_ia32_cmpw128_mask:
4317 case X86::BI__builtin_ia32_cmpd128_mask:
4318 case X86::BI__builtin_ia32_cmpq128_mask:
4319 case X86::BI__builtin_ia32_cmpb256_mask:
4320 case X86::BI__builtin_ia32_cmpw256_mask:
4321 case X86::BI__builtin_ia32_cmpd256_mask:
4322 case X86::BI__builtin_ia32_cmpq256_mask:
4323 case X86::BI__builtin_ia32_cmpb512_mask:
4324 case X86::BI__builtin_ia32_cmpw512_mask:
4325 case X86::BI__builtin_ia32_cmpd512_mask:
4326 case X86::BI__builtin_ia32_cmpq512_mask:
4327 case X86::BI__builtin_ia32_ucmpb128_mask:
4328 case X86::BI__builtin_ia32_ucmpw128_mask:
4329 case X86::BI__builtin_ia32_ucmpd128_mask:
4330 case X86::BI__builtin_ia32_ucmpq128_mask:
4331 case X86::BI__builtin_ia32_ucmpb256_mask:
4332 case X86::BI__builtin_ia32_ucmpw256_mask:
4333 case X86::BI__builtin_ia32_ucmpd256_mask:
4334 case X86::BI__builtin_ia32_ucmpq256_mask:
4335 case X86::BI__builtin_ia32_ucmpb512_mask:
4336 case X86::BI__builtin_ia32_ucmpw512_mask:
4337 case X86::BI__builtin_ia32_ucmpd512_mask:
4338 case X86::BI__builtin_ia32_ucmpq512_mask:
4339 case X86::BI__builtin_ia32_vpcomub:
4340 case X86::BI__builtin_ia32_vpcomuw:
4341 case X86::BI__builtin_ia32_vpcomud:
4342 case X86::BI__builtin_ia32_vpcomuq:
4343 case X86::BI__builtin_ia32_vpcomb:
4344 case X86::BI__builtin_ia32_vpcomw:
4345 case X86::BI__builtin_ia32_vpcomd:
4346 case X86::BI__builtin_ia32_vpcomq:
4347 case X86::BI__builtin_ia32_vec_set_v8hi:
4348 case X86::BI__builtin_ia32_vec_set_v8si:
4349 i = 2; l = 0; u = 7;
4350 break;
4351 case X86::BI__builtin_ia32_vpermilpd256:
4352 case X86::BI__builtin_ia32_roundps:
4353 case X86::BI__builtin_ia32_roundpd:
4354 case X86::BI__builtin_ia32_roundps256:
4355 case X86::BI__builtin_ia32_roundpd256:
4356 case X86::BI__builtin_ia32_getmantpd128_mask:
4357 case X86::BI__builtin_ia32_getmantpd256_mask:
4358 case X86::BI__builtin_ia32_getmantps128_mask:
4359 case X86::BI__builtin_ia32_getmantps256_mask:
4360 case X86::BI__builtin_ia32_getmantpd512_mask:
4361 case X86::BI__builtin_ia32_getmantps512_mask:
4362 case X86::BI__builtin_ia32_vec_ext_v16qi:
4363 case X86::BI__builtin_ia32_vec_ext_v16hi:
4364 i = 1; l = 0; u = 15;
4365 break;
4366 case X86::BI__builtin_ia32_pblendd128:
4367 case X86::BI__builtin_ia32_blendps:
4368 case X86::BI__builtin_ia32_blendpd256:
4369 case X86::BI__builtin_ia32_shufpd256:
4370 case X86::BI__builtin_ia32_roundss:
4371 case X86::BI__builtin_ia32_roundsd:
4372 case X86::BI__builtin_ia32_rangepd128_mask:
4373 case X86::BI__builtin_ia32_rangepd256_mask:
4374 case X86::BI__builtin_ia32_rangepd512_mask:
4375 case X86::BI__builtin_ia32_rangeps128_mask:
4376 case X86::BI__builtin_ia32_rangeps256_mask:
4377 case X86::BI__builtin_ia32_rangeps512_mask:
4378 case X86::BI__builtin_ia32_getmantsd_round_mask:
4379 case X86::BI__builtin_ia32_getmantss_round_mask:
4380 case X86::BI__builtin_ia32_vec_set_v16qi:
4381 case X86::BI__builtin_ia32_vec_set_v16hi:
4382 i = 2; l = 0; u = 15;
4383 break;
4384 case X86::BI__builtin_ia32_vec_ext_v32qi:
4385 i = 1; l = 0; u = 31;
4386 break;
4387 case X86::BI__builtin_ia32_cmpps:
4388 case X86::BI__builtin_ia32_cmpss:
4389 case X86::BI__builtin_ia32_cmppd:
4390 case X86::BI__builtin_ia32_cmpsd:
4391 case X86::BI__builtin_ia32_cmpps256:
4392 case X86::BI__builtin_ia32_cmppd256:
4393 case X86::BI__builtin_ia32_cmpps128_mask:
4394 case X86::BI__builtin_ia32_cmppd128_mask:
4395 case X86::BI__builtin_ia32_cmpps256_mask:
4396 case X86::BI__builtin_ia32_cmppd256_mask:
4397 case X86::BI__builtin_ia32_cmpps512_mask:
4398 case X86::BI__builtin_ia32_cmppd512_mask:
4399 case X86::BI__builtin_ia32_cmpsd_mask:
4400 case X86::BI__builtin_ia32_cmpss_mask:
4401 case X86::BI__builtin_ia32_vec_set_v32qi:
4402 i = 2; l = 0; u = 31;
4403 break;
4404 case X86::BI__builtin_ia32_permdf256:
4405 case X86::BI__builtin_ia32_permdi256:
4406 case X86::BI__builtin_ia32_permdf512:
4407 case X86::BI__builtin_ia32_permdi512:
4408 case X86::BI__builtin_ia32_vpermilps:
4409 case X86::BI__builtin_ia32_vpermilps256:
4410 case X86::BI__builtin_ia32_vpermilpd512:
4411 case X86::BI__builtin_ia32_vpermilps512:
4412 case X86::BI__builtin_ia32_pshufd:
4413 case X86::BI__builtin_ia32_pshufd256:
4414 case X86::BI__builtin_ia32_pshufd512:
4415 case X86::BI__builtin_ia32_pshufhw:
4416 case X86::BI__builtin_ia32_pshufhw256:
4417 case X86::BI__builtin_ia32_pshufhw512:
4418 case X86::BI__builtin_ia32_pshuflw:
4419 case X86::BI__builtin_ia32_pshuflw256:
4420 case X86::BI__builtin_ia32_pshuflw512:
4421 case X86::BI__builtin_ia32_vcvtps2ph:
4422 case X86::BI__builtin_ia32_vcvtps2ph_mask:
4423 case X86::BI__builtin_ia32_vcvtps2ph256:
4424 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
4425 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
4426 case X86::BI__builtin_ia32_rndscaleps_128_mask:
4427 case X86::BI__builtin_ia32_rndscalepd_128_mask:
4428 case X86::BI__builtin_ia32_rndscaleps_256_mask:
4429 case X86::BI__builtin_ia32_rndscalepd_256_mask:
4430 case X86::BI__builtin_ia32_rndscaleps_mask:
4431 case X86::BI__builtin_ia32_rndscalepd_mask:
4432 case X86::BI__builtin_ia32_reducepd128_mask:
4433 case X86::BI__builtin_ia32_reducepd256_mask:
4434 case X86::BI__builtin_ia32_reducepd512_mask:
4435 case X86::BI__builtin_ia32_reduceps128_mask:
4436 case X86::BI__builtin_ia32_reduceps256_mask:
4437 case X86::BI__builtin_ia32_reduceps512_mask:
4438 case X86::BI__builtin_ia32_prold512:
4439 case X86::BI__builtin_ia32_prolq512:
4440 case X86::BI__builtin_ia32_prold128:
4441 case X86::BI__builtin_ia32_prold256:
4442 case X86::BI__builtin_ia32_prolq128:
4443 case X86::BI__builtin_ia32_prolq256:
4444 case X86::BI__builtin_ia32_prord512:
4445 case X86::BI__builtin_ia32_prorq512:
4446 case X86::BI__builtin_ia32_prord128:
4447 case X86::BI__builtin_ia32_prord256:
4448 case X86::BI__builtin_ia32_prorq128:
4449 case X86::BI__builtin_ia32_prorq256:
4450 case X86::BI__builtin_ia32_fpclasspd128_mask:
4451 case X86::BI__builtin_ia32_fpclasspd256_mask:
4452 case X86::BI__builtin_ia32_fpclassps128_mask:
4453 case X86::BI__builtin_ia32_fpclassps256_mask:
4454 case X86::BI__builtin_ia32_fpclassps512_mask:
4455 case X86::BI__builtin_ia32_fpclasspd512_mask:
4456 case X86::BI__builtin_ia32_fpclasssd_mask:
4457 case X86::BI__builtin_ia32_fpclassss_mask:
4458 case X86::BI__builtin_ia32_pslldqi128_byteshift:
4459 case X86::BI__builtin_ia32_pslldqi256_byteshift:
4460 case X86::BI__builtin_ia32_pslldqi512_byteshift:
4461 case X86::BI__builtin_ia32_psrldqi128_byteshift:
4462 case X86::BI__builtin_ia32_psrldqi256_byteshift:
4463 case X86::BI__builtin_ia32_psrldqi512_byteshift:
4464 case X86::BI__builtin_ia32_kshiftliqi:
4465 case X86::BI__builtin_ia32_kshiftlihi:
4466 case X86::BI__builtin_ia32_kshiftlisi:
4467 case X86::BI__builtin_ia32_kshiftlidi:
4468 case X86::BI__builtin_ia32_kshiftriqi:
4469 case X86::BI__builtin_ia32_kshiftrihi:
4470 case X86::BI__builtin_ia32_kshiftrisi:
4471 case X86::BI__builtin_ia32_kshiftridi:
4472 i = 1; l = 0; u = 255;
4473 break;
4474 case X86::BI__builtin_ia32_vperm2f128_pd256:
4475 case X86::BI__builtin_ia32_vperm2f128_ps256:
4476 case X86::BI__builtin_ia32_vperm2f128_si256:
4477 case X86::BI__builtin_ia32_permti256:
4478 case X86::BI__builtin_ia32_pblendw128:
4479 case X86::BI__builtin_ia32_pblendw256:
4480 case X86::BI__builtin_ia32_blendps256:
4481 case X86::BI__builtin_ia32_pblendd256:
4482 case X86::BI__builtin_ia32_palignr128:
4483 case X86::BI__builtin_ia32_palignr256:
4484 case X86::BI__builtin_ia32_palignr512:
4485 case X86::BI__builtin_ia32_alignq512:
4486 case X86::BI__builtin_ia32_alignd512:
4487 case X86::BI__builtin_ia32_alignd128:
4488 case X86::BI__builtin_ia32_alignd256:
4489 case X86::BI__builtin_ia32_alignq128:
4490 case X86::BI__builtin_ia32_alignq256:
4491 case X86::BI__builtin_ia32_vcomisd:
4492 case X86::BI__builtin_ia32_vcomiss:
4493 case X86::BI__builtin_ia32_shuf_f32x4:
4494 case X86::BI__builtin_ia32_shuf_f64x2:
4495 case X86::BI__builtin_ia32_shuf_i32x4:
4496 case X86::BI__builtin_ia32_shuf_i64x2:
4497 case X86::BI__builtin_ia32_shufpd512:
4498 case X86::BI__builtin_ia32_shufps:
4499 case X86::BI__builtin_ia32_shufps256:
4500 case X86::BI__builtin_ia32_shufps512:
4501 case X86::BI__builtin_ia32_dbpsadbw128:
4502 case X86::BI__builtin_ia32_dbpsadbw256:
4503 case X86::BI__builtin_ia32_dbpsadbw512:
4504 case X86::BI__builtin_ia32_vpshldd128:
4505 case X86::BI__builtin_ia32_vpshldd256:
4506 case X86::BI__builtin_ia32_vpshldd512:
4507 case X86::BI__builtin_ia32_vpshldq128:
4508 case X86::BI__builtin_ia32_vpshldq256:
4509 case X86::BI__builtin_ia32_vpshldq512:
4510 case X86::BI__builtin_ia32_vpshldw128:
4511 case X86::BI__builtin_ia32_vpshldw256:
4512 case X86::BI__builtin_ia32_vpshldw512:
4513 case X86::BI__builtin_ia32_vpshrdd128:
4514 case X86::BI__builtin_ia32_vpshrdd256:
4515 case X86::BI__builtin_ia32_vpshrdd512:
4516 case X86::BI__builtin_ia32_vpshrdq128:
4517 case X86::BI__builtin_ia32_vpshrdq256:
4518 case X86::BI__builtin_ia32_vpshrdq512:
4519 case X86::BI__builtin_ia32_vpshrdw128:
4520 case X86::BI__builtin_ia32_vpshrdw256:
4521 case X86::BI__builtin_ia32_vpshrdw512:
4522 i = 2; l = 0; u = 255;
4523 break;
4524 case X86::BI__builtin_ia32_fixupimmpd512_mask:
4525 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
4526 case X86::BI__builtin_ia32_fixupimmps512_mask:
4527 case X86::BI__builtin_ia32_fixupimmps512_maskz:
4528 case X86::BI__builtin_ia32_fixupimmsd_mask:
4529 case X86::BI__builtin_ia32_fixupimmsd_maskz:
4530 case X86::BI__builtin_ia32_fixupimmss_mask:
4531 case X86::BI__builtin_ia32_fixupimmss_maskz:
4532 case X86::BI__builtin_ia32_fixupimmpd128_mask:
4533 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
4534 case X86::BI__builtin_ia32_fixupimmpd256_mask:
4535 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
4536 case X86::BI__builtin_ia32_fixupimmps128_mask:
4537 case X86::BI__builtin_ia32_fixupimmps128_maskz:
4538 case X86::BI__builtin_ia32_fixupimmps256_mask:
4539 case X86::BI__builtin_ia32_fixupimmps256_maskz:
4540 case X86::BI__builtin_ia32_pternlogd512_mask:
4541 case X86::BI__builtin_ia32_pternlogd512_maskz:
4542 case X86::BI__builtin_ia32_pternlogq512_mask:
4543 case X86::BI__builtin_ia32_pternlogq512_maskz:
4544 case X86::BI__builtin_ia32_pternlogd128_mask:
4545 case X86::BI__builtin_ia32_pternlogd128_maskz:
4546 case X86::BI__builtin_ia32_pternlogd256_mask:
4547 case X86::BI__builtin_ia32_pternlogd256_maskz:
4548 case X86::BI__builtin_ia32_pternlogq128_mask:
4549 case X86::BI__builtin_ia32_pternlogq128_maskz:
4550 case X86::BI__builtin_ia32_pternlogq256_mask:
4551 case X86::BI__builtin_ia32_pternlogq256_maskz:
4552 i = 3; l = 0; u = 255;
4553 break;
4554 case X86::BI__builtin_ia32_gatherpfdpd:
4555 case X86::BI__builtin_ia32_gatherpfdps:
4556 case X86::BI__builtin_ia32_gatherpfqpd:
4557 case X86::BI__builtin_ia32_gatherpfqps:
4558 case X86::BI__builtin_ia32_scatterpfdpd:
4559 case X86::BI__builtin_ia32_scatterpfdps:
4560 case X86::BI__builtin_ia32_scatterpfqpd:
4561 case X86::BI__builtin_ia32_scatterpfqps:
4562 i = 4; l = 2; u = 3;
4563 break;
4564 case X86::BI__builtin_ia32_reducesd_mask:
4565 case X86::BI__builtin_ia32_reducess_mask:
4566 case X86::BI__builtin_ia32_rndscalesd_round_mask:
4567 case X86::BI__builtin_ia32_rndscaless_round_mask:
4568 i = 4; l = 0; u = 255;
4569 break;
4570 }
4571
4572 // Note that we don't force a hard error on the range check here, allowing
4573 // template-generated or macro-generated dead code to potentially have out-of-
4574 // range values. These need to code generate, but don't need to necessarily
4575 // make any sense. We use a warning that defaults to an error.
4576 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
4577}
4578
4579/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
4580/// parameter with the FormatAttr's correct format_idx and firstDataArg.
4581/// Returns true when the format fits the function and the FormatStringInfo has
4582/// been populated.
4583bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
4584 FormatStringInfo *FSI) {
4585 FSI->HasVAListArg = Format->getFirstArg() == 0;
4586 FSI->FormatIdx = Format->getFormatIdx() - 1;
4587 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
4588
4589 // The way the format attribute works in GCC, the implicit this argument
4590 // of member functions is counted. However, it doesn't appear in our own
4591 // lists, so decrement format_idx in that case.
4592 if (IsCXXMember) {
4593 if(FSI->FormatIdx == 0)
4594 return false;
4595 --FSI->FormatIdx;
4596 if (FSI->FirstDataArg != 0)
4597 --FSI->FirstDataArg;
4598 }
4599 return true;
4600}
4601
4602/// Checks if a the given expression evaluates to null.
4603///
4604/// Returns true if the value evaluates to null.
4605static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
4606 // If the expression has non-null type, it doesn't evaluate to null.
4607 if (auto nullability
4608 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
4609 if (*nullability == NullabilityKind::NonNull)
4610 return false;
4611 }
4612
4613 // As a special case, transparent unions initialized with zero are
4614 // considered null for the purposes of the nonnull attribute.
4615 if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
4616 if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
4617 if (const CompoundLiteralExpr *CLE =
4618 dyn_cast<CompoundLiteralExpr>(Expr))
4619 if (const InitListExpr *ILE =
4620 dyn_cast<InitListExpr>(CLE->getInitializer()))
4621 Expr = ILE->getInit(0);
4622 }
4623
4624 bool Result;
4625 return (!Expr->isValueDependent() &&
4626 Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
4627 !Result);
4628}
4629
4630static void CheckNonNullArgument(Sema &S,
4631 const Expr *ArgExpr,
4632 SourceLocation CallSiteLoc) {
4633 if (CheckNonNullExpr(S, ArgExpr))
4634 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
4635 S.PDiag(diag::warn_null_arg)
4636 << ArgExpr->getSourceRange());
4637}
4638
4639bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
4640 FormatStringInfo FSI;
4641 if ((GetFormatStringType(Format) == FST_NSString) &&
4642 getFormatStringInfo(Format, false, &FSI)) {
4643 Idx = FSI.FormatIdx;
4644 return true;
4645 }
4646 return false;
4647}
4648
4649/// Diagnose use of %s directive in an NSString which is being passed
4650/// as formatting string to formatting method.
4651static void
4652DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
4653 const NamedDecl *FDecl,
4654 Expr **Args,
4655 unsigned NumArgs) {
4656 unsigned Idx = 0;
4657 bool Format = false;
4658 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
4659 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
4660 Idx = 2;
4661 Format = true;
4662 }
4663 else
4664 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4665 if (S.GetFormatNSStringIdx(I, Idx)) {
4666 Format = true;
4667 break;
4668 }
4669 }
4670 if (!Format || NumArgs <= Idx)
4671 return;
4672 const Expr *FormatExpr = Args[Idx];
4673 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
4674 FormatExpr = CSCE->getSubExpr();
4675 const StringLiteral *FormatString;
4676 if (const ObjCStringLiteral *OSL =
4677 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
4678 FormatString = OSL->getString();
4679 else
4680 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
4681 if (!FormatString)
4682 return;
4683 if (S.FormatStringHasSArg(FormatString)) {
4684 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
4685 << "%s" << 1 << 1;
4686 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
4687 << FDecl->getDeclName();
4688 }
4689}
4690
4691/// Determine whether the given type has a non-null nullability annotation.
4692static bool isNonNullType(ASTContext &ctx, QualType type) {
4693 if (auto nullability = type->getNullability(ctx))
4694 return *nullability == NullabilityKind::NonNull;
4695
4696 return false;
4697}
4698
4699static void CheckNonNullArguments(Sema &S,
4700 const NamedDecl *FDecl,
4701 const FunctionProtoType *Proto,
4702 ArrayRef<const Expr *> Args,
4703 SourceLocation CallSiteLoc) {
4704 assert((FDecl || Proto) && "Need a function declaration or prototype")((void)0);
4705
4706 // Already checked by by constant evaluator.
4707 if (S.isConstantEvaluated())
4708 return;
4709 // Check the attributes attached to the method/function itself.
4710 llvm::SmallBitVector NonNullArgs;
4711 if (FDecl) {
4712 // Handle the nonnull attribute on the function/method declaration itself.
4713 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
4714 if (!NonNull->args_size()) {
4715 // Easy case: all pointer arguments are nonnull.
4716 for (const auto *Arg : Args)
4717 if (S.isValidPointerAttrType(Arg->getType()))
4718 CheckNonNullArgument(S, Arg, CallSiteLoc);
4719 return;
4720 }
4721
4722 for (const ParamIdx &Idx : NonNull->args()) {
4723 unsigned IdxAST = Idx.getASTIndex();
4724 if (IdxAST >= Args.size())
4725 continue;
4726 if (NonNullArgs.empty())
4727 NonNullArgs.resize(Args.size());
4728 NonNullArgs.set(IdxAST);
4729 }
4730 }
4731 }
4732
4733 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
4734 // Handle the nonnull attribute on the parameters of the
4735 // function/method.
4736 ArrayRef<ParmVarDecl*> parms;
4737 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
4738 parms = FD->parameters();
4739 else
4740 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
4741
4742 unsigned ParamIndex = 0;
4743 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
4744 I != E; ++I, ++ParamIndex) {
4745 const ParmVarDecl *PVD = *I;
4746 if (PVD->hasAttr<NonNullAttr>() ||
4747 isNonNullType(S.Context, PVD->getType())) {
4748 if (NonNullArgs.empty())
4749 NonNullArgs.resize(Args.size());
4750
4751 NonNullArgs.set(ParamIndex);
4752 }
4753 }
4754 } else {
4755 // If we have a non-function, non-method declaration but no
4756 // function prototype, try to dig out the function prototype.
4757 if (!Proto) {
4758 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
4759 QualType type = VD->getType().getNonReferenceType();
4760 if (auto pointerType = type->getAs<PointerType>())
4761 type = pointerType->getPointeeType();
4762 else if (auto blockType = type->getAs<BlockPointerType>())
4763 type = blockType->getPointeeType();
4764 // FIXME: data member pointers?
4765
4766 // Dig out the function prototype, if there is one.
4767 Proto = type->getAs<FunctionProtoType>();
4768 }
4769 }
4770
4771 // Fill in non-null argument information from the nullability
4772 // information on the parameter types (if we have them).
4773 if (Proto) {
4774 unsigned Index = 0;
4775 for (auto paramType : Proto->getParamTypes()) {
4776 if (isNonNullType(S.Context, paramType)) {
4777 if (NonNullArgs.empty())
4778 NonNullArgs.resize(Args.size());
4779
4780 NonNullArgs.set(Index);
4781 }
4782
4783 ++Index;
4784 }
4785 }
4786 }
4787
4788 // Check for non-null arguments.
4789 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
4790 ArgIndex != ArgIndexEnd; ++ArgIndex) {
4791 if (NonNullArgs[ArgIndex])
4792 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
4793 }
4794}
4795
4796/// Warn if a pointer or reference argument passed to a function points to an
4797/// object that is less aligned than the parameter. This can happen when
4798/// creating a typedef with a lower alignment than the original type and then
4799/// calling functions defined in terms of the original type.
4800void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
4801 StringRef ParamName, QualType ArgTy,
4802 QualType ParamTy) {
4803
4804 // If a function accepts a pointer or reference type
4805 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
4806 return;
4807
4808 // If the parameter is a pointer type, get the pointee type for the
4809 // argument too. If the parameter is a reference type, don't try to get
4810 // the pointee type for the argument.
4811 if (ParamTy->isPointerType())
4812 ArgTy = ArgTy->getPointeeType();
4813
4814 // Remove reference or pointer
4815 ParamTy = ParamTy->getPointeeType();
4816
4817 // Find expected alignment, and the actual alignment of the passed object.
4818 // getTypeAlignInChars requires complete types
4819 if (ArgTy.isNull() || ParamTy->isIncompleteType() ||
4820 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() ||
4821 ArgTy->isUndeducedType())
4822 return;
4823
4824 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
4825 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
4826
4827 // If the argument is less aligned than the parameter, there is a
4828 // potential alignment issue.
4829 if (ArgAlign < ParamAlign)
4830 Diag(Loc, diag::warn_param_mismatched_alignment)
4831 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
4832 << ParamName << FDecl;
4833}
4834
4835/// Handles the checks for format strings, non-POD arguments to vararg
4836/// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
4837/// attributes.
4838void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
4839 const Expr *ThisArg, ArrayRef<const Expr *> Args,
4840 bool IsMemberFunction, SourceLocation Loc,
4841 SourceRange Range, VariadicCallType CallType) {
4842 // FIXME: We should check as much as we can in the template definition.
4843 if (CurContext->isDependentContext())
4844 return;
4845
4846 // Printf and scanf checking.
4847 llvm::SmallBitVector CheckedVarArgs;
4848 if (FDecl) {
4849 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4850 // Only create vector if there are format attributes.
4851 CheckedVarArgs.resize(Args.size());
4852
4853 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
4854 CheckedVarArgs);
4855 }
4856 }
4857
4858 // Refuse POD arguments that weren't caught by the format string
4859 // checks above.
4860 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
4861 if (CallType != VariadicDoesNotApply &&
4862 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
4863 unsigned NumParams = Proto ? Proto->getNumParams()
4864 : FDecl && isa<FunctionDecl>(FDecl)
4865 ? cast<FunctionDecl>(FDecl)->getNumParams()
4866 : FDecl && isa<ObjCMethodDecl>(FDecl)
4867 ? cast<ObjCMethodDecl>(FDecl)->param_size()
4868 : 0;
4869
4870 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
4871 // Args[ArgIdx] can be null in malformed code.
4872 if (const Expr *Arg = Args[ArgIdx]) {
4873 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
4874 checkVariadicArgument(Arg, CallType);
4875 }
4876 }
4877 }
4878
4879 if (FDecl || Proto) {
4880 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
4881
4882 // Type safety checking.
4883 if (FDecl) {
4884 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
4885 CheckArgumentWithTypeTag(I, Args, Loc);
4886 }
4887 }
4888
4889 // Check that passed arguments match the alignment of original arguments.
4890 // Try to get the missing prototype from the declaration.
4891 if (!Proto && FDecl) {
4892 const auto *FT = FDecl->getFunctionType();
4893 if (isa_and_nonnull<FunctionProtoType>(FT))
4894 Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
4895 }
4896 if (Proto) {
4897 // For variadic functions, we may have more args than parameters.
4898 // For some K&R functions, we may have less args than parameters.
4899 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
4900 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
4901 // Args[ArgIdx] can be null in malformed code.
4902 if (const Expr *Arg = Args[ArgIdx]) {
4903 if (Arg->containsErrors())
4904 continue;
4905
4906 QualType ParamTy = Proto->getParamType(ArgIdx);
4907 QualType ArgTy = Arg->getType();
4908 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
4909 ArgTy, ParamTy);
4910 }
4911 }
4912 }
4913
4914 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
4915 auto *AA = FDecl->getAttr<AllocAlignAttr>();
4916 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
4917 if (!Arg->isValueDependent()) {
4918 Expr::EvalResult Align;
4919 if (Arg->EvaluateAsInt(Align, Context)) {
4920 const llvm::APSInt &I = Align.Val.getInt();
4921 if (!I.isPowerOf2())
4922 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
4923 << Arg->getSourceRange();
4924
4925 if (I > Sema::MaximumAlignment)
4926 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
4927 << Arg->getSourceRange() << Sema::MaximumAlignment;
4928 }
4929 }
4930 }
4931
4932 if (FD)
4933 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
4934}
4935
4936/// CheckConstructorCall - Check a constructor call for correctness and safety
4937/// properties not enforced by the C type system.
4938void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
4939 ArrayRef<const Expr *> Args,
4940 const FunctionProtoType *Proto,
4941 SourceLocation Loc) {
4942 VariadicCallType CallType =
4943 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
4944
4945 auto *Ctor = cast<CXXConstructorDecl>(FDecl);
4946 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType),
4947 Context.getPointerType(Ctor->getThisObjectType()));
4948
4949 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
4950 Loc, SourceRange(), CallType);
4951}
4952
4953/// CheckFunctionCall - Check a direct function call for various correctness
4954/// and safety properties not strictly enforced by the C type system.
4955bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
4956 const FunctionProtoType *Proto) {
4957 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
4958 isa<CXXMethodDecl>(FDecl);
4959 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
4960 IsMemberOperatorCall;
4961 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
4962 TheCall->getCallee());
4963 Expr** Args = TheCall->getArgs();
4964 unsigned NumArgs = TheCall->getNumArgs();
4965
4966 Expr *ImplicitThis = nullptr;
4967 if (IsMemberOperatorCall) {
4968 // If this is a call to a member operator, hide the first argument
4969 // from checkCall.
4970 // FIXME: Our choice of AST representation here is less than ideal.
4971 ImplicitThis = Args[0];
4972 ++Args;
4973 --NumArgs;
4974 } else if (IsMemberFunction)
4975 ImplicitThis =
4976 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
4977
4978 if (ImplicitThis) {
4979 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
4980 // used.
4981 QualType ThisType = ImplicitThis->getType();
4982 if (!ThisType->isPointerType()) {
4983 assert(!ThisType->isReferenceType())((void)0);
4984 ThisType = Context.getPointerType(ThisType);
4985 }
4986
4987 QualType ThisTypeFromDecl =
4988 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType());
4989
4990 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
4991 ThisTypeFromDecl);
4992 }
4993
4994 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
4995 IsMemberFunction, TheCall->getRParenLoc(),
4996 TheCall->getCallee()->getSourceRange(), CallType);
4997
4998 IdentifierInfo *FnInfo = FDecl->getIdentifier();
4999 // None of the checks below are needed for functions that don't have
5000 // simple names (e.g., C++ conversion functions).
5001 if (!FnInfo)
5002 return false;
5003
5004 CheckTCBEnforcement(TheCall, FDecl);
5005
5006 CheckAbsoluteValueFunction(TheCall, FDecl);
5007 CheckMaxUnsignedZero(TheCall, FDecl);
5008
5009 if (getLangOpts().ObjC)
5010 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
5011
5012 unsigned CMId = FDecl->getMemoryFunctionKind();
5013
5014 // Handle memory setting and copying functions.
5015 switch (CMId) {
5016 case 0:
5017 return false;
5018 case Builtin::BIstrlcpy: // fallthrough
5019 case Builtin::BIstrlcat:
5020 CheckStrlcpycatArguments(TheCall, FnInfo);
5021 break;
5022 case Builtin::BIstrncat:
5023 CheckStrncatArguments(TheCall, FnInfo);
5024 break;
5025 case Builtin::BIfree:
5026 CheckFreeArguments(TheCall);
5027 break;
5028 default:
5029 CheckMemaccessArguments(TheCall, CMId, FnInfo);
5030 }
5031
5032 return false;
5033}
5034
5035bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
5036 ArrayRef<const Expr *> Args) {
5037 VariadicCallType CallType =
5038 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
5039
5040 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
5041 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
5042 CallType);
5043
5044 return false;
5045}
5046
5047bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
5048 const FunctionProtoType *Proto) {
5049 QualType Ty;
5050 if (const auto *V = dyn_cast<VarDecl>(NDecl))
5051 Ty = V->getType().getNonReferenceType();
5052 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
5053 Ty = F->getType().getNonReferenceType();
5054 else
5055 return false;
5056
5057 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
5058 !Ty->isFunctionProtoType())
5059 return false;
5060
5061 VariadicCallType CallType;
5062 if (!Proto || !Proto->isVariadic()) {
5063 CallType = VariadicDoesNotApply;
5064 } else if (Ty->isBlockPointerType()) {
5065 CallType = VariadicBlock;
5066 } else { // Ty->isFunctionPointerType()
5067 CallType = VariadicFunction;
5068 }
5069
5070 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
5071 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
5072 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
5073 TheCall->getCallee()->getSourceRange(), CallType);
5074
5075 return false;
5076}
5077
5078/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
5079/// such as function pointers returned from functions.
5080bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
5081 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
5082 TheCall->getCallee());
5083 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
5084 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
5085 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
5086 TheCall->getCallee()->getSourceRange(), CallType);
5087
5088 return false;
5089}
5090
5091static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
5092 if (!llvm::isValidAtomicOrderingCABI(Ordering))
5093 return false;
5094
5095 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
5096 switch (Op) {
5097 case AtomicExpr::AO__c11_atomic_init:
5098 case AtomicExpr::AO__opencl_atomic_init:
5099 llvm_unreachable("There is no ordering argument for an init")__builtin_unreachable();
5100
5101 case AtomicExpr::AO__c11_atomic_load:
5102 case AtomicExpr::AO__opencl_atomic_load:
5103 case AtomicExpr::AO__atomic_load_n:
5104 case AtomicExpr::AO__atomic_load:
5105 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
5106 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
5107
5108 case AtomicExpr::AO__c11_atomic_store:
5109 case AtomicExpr::AO__opencl_atomic_store:
5110 case AtomicExpr::AO__atomic_store:
5111 case AtomicExpr::AO__atomic_store_n:
5112 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
5113 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
5114 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
5115
5116 default:
5117 return true;
5118 }
5119}
5120
5121ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
5122 AtomicExpr::AtomicOp Op) {
5123 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
5124 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5125 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
5126 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()},
5127 DRE->getSourceRange(), TheCall->getRParenLoc(), Args,
5128 Op);
5129}
5130
5131ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
5132 SourceLocation RParenLoc, MultiExprArg Args,
5133 AtomicExpr::AtomicOp Op,
5134 AtomicArgumentOrder ArgOrder) {
5135 // All the non-OpenCL operations take one of the following forms.
5136 // The OpenCL operations take the __c11 forms with one extra argument for
5137 // synchronization scope.
5138 enum {
5139 // C __c11_atomic_init(A *, C)
5140 Init,
5141
5142 // C __c11_atomic_load(A *, int)
5143 Load,
5144
5145 // void __atomic_load(A *, CP, int)
5146 LoadCopy,
5147
5148 // void __atomic_store(A *, CP, int)
5149 Copy,
5150
5151 // C __c11_atomic_add(A *, M, int)
5152 Arithmetic,
5153
5154 // C __atomic_exchange_n(A *, CP, int)
5155 Xchg,
5156
5157 // void __atomic_exchange(A *, C *, CP, int)
5158 GNUXchg,
5159
5160 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
5161 C11CmpXchg,
5162
5163 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
5164 GNUCmpXchg
5165 } Form = Init;
5166
5167 const unsigned NumForm = GNUCmpXchg + 1;
5168 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
5169 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
5170 // where:
5171 // C is an appropriate type,
5172 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
5173 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
5174 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
5175 // the int parameters are for orderings.
5176
5177 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
5178 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
5179 "need to update code for modified forms");
5180 static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
5181 AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
5182 AtomicExpr::AO__atomic_load,
5183 "need to update code for modified C11 atomics");
5184 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
5185 Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
5186 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
5187 Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
5188 IsOpenCL;
5189 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
5190 Op == AtomicExpr::AO__atomic_store_n ||
5191 Op == AtomicExpr::AO__atomic_exchange_n ||
5192 Op == AtomicExpr::AO__atomic_compare_exchange_n;
5193 bool IsAddSub = false;
5194
5195 switch (Op) {
5196 case AtomicExpr::AO__c11_atomic_init:
5197 case AtomicExpr::AO__opencl_atomic_init:
5198 Form = Init;
5199 break;
5200
5201 case AtomicExpr::AO__c11_atomic_load:
5202 case AtomicExpr::AO__opencl_atomic_load:
5203 case AtomicExpr::AO__atomic_load_n:
5204 Form = Load;
5205 break;
5206
5207 case AtomicExpr::AO__atomic_load:
5208 Form = LoadCopy;
5209 break;
5210
5211 case AtomicExpr::AO__c11_atomic_store:
5212 case AtomicExpr::AO__opencl_atomic_store:
5213 case AtomicExpr::AO__atomic_store:
5214 case AtomicExpr::AO__atomic_store_n:
5215 Form = Copy;
5216 break;
5217
5218 case AtomicExpr::AO__c11_atomic_fetch_add:
5219 case AtomicExpr::AO__c11_atomic_fetch_sub:
5220 case AtomicExpr::AO__opencl_atomic_fetch_add:
5221 case AtomicExpr::AO__opencl_atomic_fetch_sub:
5222 case AtomicExpr::AO__atomic_fetch_add:
5223 case AtomicExpr::AO__atomic_fetch_sub:
5224 case AtomicExpr::AO__atomic_add_fetch:
5225 case AtomicExpr::AO__atomic_sub_fetch:
5226 IsAddSub = true;
5227 Form = Arithmetic;
5228 break;
5229 case AtomicExpr::AO__c11_atomic_fetch_and:
5230 case AtomicExpr::AO__c11_atomic_fetch_or:
5231 case AtomicExpr::AO__c11_atomic_fetch_xor:
5232 case AtomicExpr::AO__opencl_atomic_fetch_and:
5233 case AtomicExpr::AO__opencl_atomic_fetch_or:
5234 case AtomicExpr::AO__opencl_atomic_fetch_xor:
5235 case AtomicExpr::AO__atomic_fetch_and:
5236 case AtomicExpr::AO__atomic_fetch_or:
5237 case AtomicExpr::AO__atomic_fetch_xor:
5238 case AtomicExpr::AO__atomic_fetch_nand:
5239 case AtomicExpr::AO__atomic_and_fetch:
5240 case AtomicExpr::AO__atomic_or_fetch:
5241 case AtomicExpr::AO__atomic_xor_fetch:
5242 case AtomicExpr::AO__atomic_nand_fetch:
5243 Form = Arithmetic;
5244 break;
5245 case AtomicExpr::AO__c11_atomic_fetch_min:
5246 case AtomicExpr::AO__c11_atomic_fetch_max:
5247 case AtomicExpr::AO__opencl_atomic_fetch_min:
5248 case AtomicExpr::AO__opencl_atomic_fetch_max:
5249 case AtomicExpr::AO__atomic_min_fetch:
5250 case AtomicExpr::AO__atomic_max_fetch:
5251 case AtomicExpr::AO__atomic_fetch_min:
5252 case AtomicExpr::AO__atomic_fetch_max:
5253 Form = Arithmetic;
5254 break;
5255
5256 case AtomicExpr::AO__c11_atomic_exchange:
5257 case AtomicExpr::AO__opencl_atomic_exchange:
5258 case AtomicExpr::AO__atomic_exchange_n:
5259 Form = Xchg;
5260 break;
5261
5262 case AtomicExpr::AO__atomic_exchange:
5263 Form = GNUXchg;
5264 break;
5265
5266 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
5267 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
5268 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
5269 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
5270 Form = C11CmpXchg;
5271 break;
5272
5273 case AtomicExpr::AO__atomic_compare_exchange:
5274 case AtomicExpr::AO__atomic_compare_exchange_n:
5275 Form = GNUCmpXchg;
5276 break;
5277 }
5278
5279 unsigned AdjustedNumArgs = NumArgs[Form];
5280 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
5281 ++AdjustedNumArgs;
5282 // Check we have the right number of arguments.
5283 if (Args.size() < AdjustedNumArgs) {
5284 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
5285 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
5286 << ExprRange;
5287 return ExprError();
5288 } else if (Args.size() > AdjustedNumArgs) {
5289 Diag(Args[AdjustedNumArgs]->getBeginLoc(),
5290 diag::err_typecheck_call_too_many_args)
5291 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
5292 << ExprRange;
5293 return ExprError();
5294 }
5295
5296 // Inspect the first argument of the atomic operation.
5297 Expr *Ptr = Args[0];
5298 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
5299 if (ConvertedPtr.isInvalid())
5300 return ExprError();
5301
5302 Ptr = ConvertedPtr.get();
5303 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
5304 if (!pointerType) {
5305 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
5306 << Ptr->getType() << Ptr->getSourceRange();
5307 return ExprError();
5308 }
5309
5310 // For a __c11 builtin, this should be a pointer to an _Atomic type.
5311 QualType AtomTy = pointerType->getPointeeType(); // 'A'
5312 QualType ValType = AtomTy; // 'C'
5313 if (IsC11) {
5314 if (!AtomTy->isAtomicType()) {
5315 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
5316 << Ptr->getType() << Ptr->getSourceRange();
5317 return ExprError();
5318 }
5319 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
5320 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
5321 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
5322 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
5323 << Ptr->getSourceRange();
5324 return ExprError();
5325 }
5326 ValType = AtomTy->castAs<AtomicType>()->getValueType();
5327 } else if (Form != Load && Form != LoadCopy) {
5328 if (ValType.isConstQualified()) {
5329 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
5330 << Ptr->getType() << Ptr->getSourceRange();
5331 return ExprError();
5332 }
5333 }
5334
5335 // For an arithmetic operation, the implied arithmetic must be well-formed.
5336 if (Form == Arithmetic) {
5337 // gcc does not enforce these rules for GNU atomics, but we do so for
5338 // sanity.
5339 auto IsAllowedValueType = [&](QualType ValType) {
5340 if (ValType->isIntegerType())
5341 return true;
5342 if (ValType->isPointerType())
5343 return true;
5344 if (!ValType->isFloatingType())
5345 return false;
5346 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
5347 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
5348 &Context.getTargetInfo().getLongDoubleFormat() ==
5349 &llvm::APFloat::x87DoubleExtended())
5350 return false;
5351 return true;
5352 };
5353 if (IsAddSub && !IsAllowedValueType(ValType)) {
5354 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp)
5355 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5356 return ExprError();
5357 }
5358 if (!IsAddSub && !ValType->isIntegerType()) {
5359 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int)
5360 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5361 return ExprError();
5362 }
5363 if (IsC11 && ValType->isPointerType() &&
5364 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
5365 diag::err_incomplete_type)) {
5366 return ExprError();
5367 }
5368 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
5369 // For __atomic_*_n operations, the value type must be a scalar integral or
5370 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
5371 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
5372 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5373 return ExprError();
5374 }
5375
5376 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
5377 !AtomTy->isScalarType()) {
5378 // For GNU atomics, require a trivially-copyable type. This is not part of
5379 // the GNU atomics specification, but we enforce it for sanity.
5380 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
5381 << Ptr->getType() << Ptr->getSourceRange();
5382 return ExprError();
5383 }
5384
5385 switch (ValType.getObjCLifetime()) {
5386 case Qualifiers::OCL_None:
5387 case Qualifiers::OCL_ExplicitNone:
5388 // okay
5389 break;
5390
5391 case Qualifiers::OCL_Weak:
5392 case Qualifiers::OCL_Strong:
5393 case Qualifiers::OCL_Autoreleasing:
5394 // FIXME: Can this happen? By this point, ValType should be known
5395 // to be trivially copyable.
5396 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
5397 << ValType << Ptr->getSourceRange();
5398 return ExprError();
5399 }
5400
5401 // All atomic operations have an overload which takes a pointer to a volatile
5402 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
5403 // into the result or the other operands. Similarly atomic_load takes a
5404 // pointer to a const 'A'.
5405 ValType.removeLocalVolatile();
5406 ValType.removeLocalConst();
5407 QualType ResultType = ValType;
5408 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
5409 Form == Init)
5410 ResultType = Context.VoidTy;
5411 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
5412 ResultType = Context.BoolTy;
5413
5414 // The type of a parameter passed 'by value'. In the GNU atomics, such
5415 // arguments are actually passed as pointers.
5416 QualType ByValType = ValType; // 'CP'
5417 bool IsPassedByAddress = false;
5418 if (!IsC11 && !IsN) {
5419 ByValType = Ptr->getType();
5420 IsPassedByAddress = true;
5421 }
5422
5423 SmallVector<Expr *, 5> APIOrderedArgs;
5424 if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
5425 APIOrderedArgs.push_back(Args[0]);
5426 switch (Form) {
5427 case Init:
5428 case Load:
5429 APIOrderedArgs.push_back(Args[1]); // Val1/Order
5430 break;
5431 case LoadCopy:
5432 case Copy:
5433 case Arithmetic:
5434 case Xchg:
5435 APIOrderedArgs.push_back(Args[2]); // Val1
5436 APIOrderedArgs.push_back(Args[1]); // Order
5437 break;
5438 case GNUXchg:
5439 APIOrderedArgs.push_back(Args[2]); // Val1
5440 APIOrderedArgs.push_back(Args[3]); // Val2
5441 APIOrderedArgs.push_back(Args[1]); // Order
5442 break;
5443 case C11CmpXchg:
5444 APIOrderedArgs.push_back(Args[2]); // Val1
5445 APIOrderedArgs.push_back(Args[4]); // Val2
5446 APIOrderedArgs.push_back(Args[1]); // Order
5447 APIOrderedArgs.push_back(Args[3]); // OrderFail
5448 break;
5449 case GNUCmpXchg:
5450 APIOrderedArgs.push_back(Args[2]); // Val1
5451 APIOrderedArgs.push_back(Args[4]); // Val2
5452 APIOrderedArgs.push_back(Args[5]); // Weak
5453 APIOrderedArgs.push_back(Args[1]); // Order
5454 APIOrderedArgs.push_back(Args[3]); // OrderFail
5455 break;
5456 }
5457 } else
5458 APIOrderedArgs.append(Args.begin(), Args.end());
5459
5460 // The first argument's non-CV pointer type is used to deduce the type of
5461 // subsequent arguments, except for:
5462 // - weak flag (always converted to bool)
5463 // - memory order (always converted to int)
5464 // - scope (always converted to int)
5465 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
5466 QualType Ty;
5467 if (i < NumVals[Form] + 1) {
5468 switch (i) {
5469 case 0:
5470 // The first argument is always a pointer. It has a fixed type.
5471 // It is always dereferenced, a nullptr is undefined.
5472 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
5473 // Nothing else to do: we already know all we want about this pointer.
5474 continue;
5475 case 1:
5476 // The second argument is the non-atomic operand. For arithmetic, this
5477 // is always passed by value, and for a compare_exchange it is always
5478 // passed by address. For the rest, GNU uses by-address and C11 uses
5479 // by-value.
5480 assert(Form != Load)((void)0);
5481 if (Form == Arithmetic && ValType->isPointerType())
5482 Ty = Context.getPointerDiffType();
5483 else if (Form == Init || Form == Arithmetic)
5484 Ty = ValType;
5485 else if (Form == Copy || Form == Xchg) {
5486 if (IsPassedByAddress) {
5487 // The value pointer is always dereferenced, a nullptr is undefined.
5488 CheckNonNullArgument(*this, APIOrderedArgs[i],
5489 ExprRange.getBegin());
5490 }
5491 Ty = ByValType;
5492 } else {
5493 Expr *ValArg = APIOrderedArgs[i];
5494 // The value pointer is always dereferenced, a nullptr is undefined.
5495 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin());
5496 LangAS AS = LangAS::Default;
5497 // Keep address space of non-atomic pointer type.
5498 if (const PointerType *PtrTy =
5499 ValArg->getType()->getAs<PointerType>()) {
5500 AS = PtrTy->getPointeeType().getAddressSpace();
5501 }
5502 Ty = Context.getPointerType(
5503 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
5504 }
5505 break;
5506 case 2:
5507 // The third argument to compare_exchange / GNU exchange is the desired
5508 // value, either by-value (for the C11 and *_n variant) or as a pointer.
5509 if (IsPassedByAddress)
5510 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
5511 Ty = ByValType;
5512 break;
5513 case 3:
5514 // The fourth argument to GNU compare_exchange is a 'weak' flag.
5515 Ty = Context.BoolTy;
5516 break;
5517 }
5518 } else {
5519 // The order(s) and scope are always converted to int.
5520 Ty = Context.IntTy;
5521 }
5522
5523 InitializedEntity Entity =
5524 InitializedEntity::InitializeParameter(Context, Ty, false);
5525 ExprResult Arg = APIOrderedArgs[i];
5526 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5527 if (Arg.isInvalid())
5528 return true;
5529 APIOrderedArgs[i] = Arg.get();
5530 }
5531
5532 // Permute the arguments into a 'consistent' order.
5533 SmallVector<Expr*, 5> SubExprs;
5534 SubExprs.push_back(Ptr);
5535 switch (Form) {
5536 case Init:
5537 // Note, AtomicExpr::getVal1() has a special case for this atomic.
5538 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5539 break;
5540 case Load:
5541 SubExprs.push_back(APIOrderedArgs[1]); // Order
5542 break;
5543 case LoadCopy:
5544 case Copy:
5545 case Arithmetic:
5546 case Xchg:
5547 SubExprs.push_back(APIOrderedArgs[2]); // Order
5548 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5549 break;
5550 case GNUXchg:
5551 // Note, AtomicExpr::getVal2() has a special case for this atomic.
5552 SubExprs.push_back(APIOrderedArgs[3]); // Order
5553 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5554 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5555 break;
5556 case C11CmpXchg:
5557 SubExprs.push_back(APIOrderedArgs[3]); // Order
5558 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5559 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail
5560 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5561 break;
5562 case GNUCmpXchg:
5563 SubExprs.push_back(APIOrderedArgs[4]); // Order
5564 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5565 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail
5566 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5567 SubExprs.push_back(APIOrderedArgs[3]); // Weak
5568 break;
5569 }
5570
5571 if (SubExprs.size() >= 2 && Form != Init) {
5572 if (Optional<llvm::APSInt> Result =
5573 SubExprs[1]->getIntegerConstantExpr(Context))
5574 if (!isValidOrderingForOp(Result->getSExtValue(), Op))
5575 Diag(SubExprs[1]->getBeginLoc(),
5576 diag::warn_atomic_op_has_invalid_memory_order)
5577 << SubExprs[1]->getSourceRange();
5578 }
5579
5580 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
5581 auto *Scope = Args[Args.size() - 1];
5582 if (Optional<llvm::APSInt> Result =
5583 Scope->getIntegerConstantExpr(Context)) {
5584 if (!ScopeModel->isValid(Result->getZExtValue()))
5585 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
5586 << Scope->getSourceRange();
5587 }
5588 SubExprs.push_back(Scope);
5589 }
5590
5591 AtomicExpr *AE = new (Context)
5592 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
5593
5594 if ((Op == AtomicExpr::AO__c11_atomic_load ||
5595 Op == AtomicExpr::AO__c11_atomic_store ||
5596 Op == AtomicExpr::AO__opencl_atomic_load ||
5597 Op == AtomicExpr::AO__opencl_atomic_store ) &&
5598 Context.AtomicUsesUnsupportedLibcall(AE))
5599 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
5600 << ((Op == AtomicExpr::AO__c11_atomic_load ||
5601 Op == AtomicExpr::AO__opencl_atomic_load)
5602 ? 0
5603 : 1);
5604
5605 if (ValType->isExtIntType()) {
5606 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit);
5607 return ExprError();
5608 }
5609
5610 return AE;
5611}
5612
5613/// checkBuiltinArgument - Given a call to a builtin function, perform
5614/// normal type-checking on the given argument, updating the call in
5615/// place. This is useful when a builtin function requires custom
5616/// type-checking for some of its arguments but not necessarily all of
5617/// them.
5618///
5619/// Returns true on error.
5620static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
5621 FunctionDecl *Fn = E->getDirectCallee();
5622 assert(Fn && "builtin call without direct callee!")((void)0);
5623
5624 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
5625 InitializedEntity Entity =
5626 InitializedEntity::InitializeParameter(S.Context, Param);
5627
5628 ExprResult Arg = E->getArg(0);
5629 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
5630 if (Arg.isInvalid())
5631 return true;
5632
5633 E->setArg(ArgIndex, Arg.get());
5634 return false;
5635}
5636
5637/// We have a call to a function like __sync_fetch_and_add, which is an
5638/// overloaded function based on the pointer type of its first argument.
5639/// The main BuildCallExpr routines have already promoted the types of
5640/// arguments because all of these calls are prototyped as void(...).
5641///
5642/// This function goes through and does final semantic checking for these
5643/// builtins, as well as generating any warnings.
5644ExprResult
5645Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
5646 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
5647 Expr *Callee = TheCall->getCallee();
5648 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
5649 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5650
5651 // Ensure that we have at least one argument to do type inference from.
5652 if (TheCall->getNumArgs() < 1) {
5653 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5654 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange();
5655 return ExprError();
5656 }
5657
5658 // Inspect the first argument of the atomic builtin. This should always be
5659 // a pointer type, whose element is an integral scalar or pointer type.
5660 // Because it is a pointer type, we don't have to worry about any implicit
5661 // casts here.
5662 // FIXME: We don't allow floating point scalars as input.
5663 Expr *FirstArg = TheCall->getArg(0);
5664 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
5665 if (FirstArgResult.isInvalid())
5666 return ExprError();
5667 FirstArg = FirstArgResult.get();
5668 TheCall->setArg(0, FirstArg);
5669
5670 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
5671 if (!pointerType) {
5672 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
5673 << FirstArg->getType() << FirstArg->getSourceRange();
5674 return ExprError();
5675 }
5676
5677 QualType ValType = pointerType->getPointeeType();
5678 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
5679 !ValType->isBlockPointerType()) {
5680 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
5681 << FirstArg->getType() << FirstArg->getSourceRange();
5682 return ExprError();
5683 }
5684
5685 if (ValType.isConstQualified()) {
5686 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
5687 << FirstArg->getType() << FirstArg->getSourceRange();
5688 return ExprError();
5689 }
5690
5691 switch (ValType.getObjCLifetime()) {
5692 case Qualifiers::OCL_None:
5693 case Qualifiers::OCL_ExplicitNone:
5694 // okay
5695 break;
5696
5697 case Qualifiers::OCL_Weak:
5698 case Qualifiers::OCL_Strong:
5699 case Qualifiers::OCL_Autoreleasing:
5700 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
5701 << ValType << FirstArg->getSourceRange();
5702 return ExprError();
5703 }
5704
5705 // Strip any qualifiers off ValType.
5706 ValType = ValType.getUnqualifiedType();
5707
5708 // The majority of builtins return a value, but a few have special return
5709 // types, so allow them to override appropriately below.
5710 QualType ResultType = ValType;
5711
5712 // We need to figure out which concrete builtin this maps onto. For example,
5713 // __sync_fetch_and_add with a 2 byte object turns into
5714 // __sync_fetch_and_add_2.
5715#define BUILTIN_ROW(x) \
5716 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
5717 Builtin::BI##x##_8, Builtin::BI##x##_16 }
5718
5719 static const unsigned BuiltinIndices[][5] = {
5720 BUILTIN_ROW(__sync_fetch_and_add),
5721 BUILTIN_ROW(__sync_fetch_and_sub),
5722 BUILTIN_ROW(__sync_fetch_and_or),
5723 BUILTIN_ROW(__sync_fetch_and_and),
5724 BUILTIN_ROW(__sync_fetch_and_xor),
5725 BUILTIN_ROW(__sync_fetch_and_nand),
5726
5727 BUILTIN_ROW(__sync_add_and_fetch),
5728 BUILTIN_ROW(__sync_sub_and_fetch),
5729 BUILTIN_ROW(__sync_and_and_fetch),
5730 BUILTIN_ROW(__sync_or_and_fetch),
5731 BUILTIN_ROW(__sync_xor_and_fetch),
5732 BUILTIN_ROW(__sync_nand_and_fetch),
5733
5734 BUILTIN_ROW(__sync_val_compare_and_swap),
5735 BUILTIN_ROW(__sync_bool_compare_and_swap),
5736 BUILTIN_ROW(__sync_lock_test_and_set),
5737 BUILTIN_ROW(__sync_lock_release),
5738 BUILTIN_ROW(__sync_swap)
5739 };
5740#undef BUILTIN_ROW
5741
5742 // Determine the index of the size.
5743 unsigned SizeIndex;
5744 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
5745 case 1: SizeIndex = 0; break;
5746 case 2: SizeIndex = 1; break;
5747 case 4: SizeIndex = 2; break;
5748 case 8: SizeIndex = 3; break;
5749 case 16: SizeIndex = 4; break;
5750 default:
5751 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
5752 << FirstArg->getType() << FirstArg->getSourceRange();
5753 return ExprError();
5754 }
5755
5756 // Each of these builtins has one pointer argument, followed by some number of
5757 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
5758 // that we ignore. Find out which row of BuiltinIndices to read from as well
5759 // as the number of fixed args.
5760 unsigned BuiltinID = FDecl->getBuiltinID();
5761 unsigned BuiltinIndex, NumFixed = 1;
5762 bool WarnAboutSemanticsChange = false;
5763 switch (BuiltinID) {
5764 default: llvm_unreachable("Unknown overloaded atomic builtin!")__builtin_unreachable();
5765 case Builtin::BI__sync_fetch_and_add:
5766 case Builtin::BI__sync_fetch_and_add_1:
5767 case Builtin::BI__sync_fetch_and_add_2:
5768 case Builtin::BI__sync_fetch_and_add_4:
5769 case Builtin::BI__sync_fetch_and_add_8:
5770 case Builtin::BI__sync_fetch_and_add_16:
5771 BuiltinIndex = 0;
5772 break;
5773
5774 case Builtin::BI__sync_fetch_and_sub:
5775 case Builtin::BI__sync_fetch_and_sub_1:
5776 case Builtin::BI__sync_fetch_and_sub_2:
5777 case Builtin::BI__sync_fetch_and_sub_4:
5778 case Builtin::BI__sync_fetch_and_sub_8:
5779 case Builtin::BI__sync_fetch_and_sub_16:
5780 BuiltinIndex = 1;
5781 break;
5782
5783 case Builtin::BI__sync_fetch_and_or:
5784 case Builtin::BI__sync_fetch_and_or_1:
5785 case Builtin::BI__sync_fetch_and_or_2:
5786 case Builtin::BI__sync_fetch_and_or_4:
5787 case Builtin::BI__sync_fetch_and_or_8:
5788 case Builtin::BI__sync_fetch_and_or_16:
5789 BuiltinIndex = 2;
5790 break;
5791
5792 case Builtin::BI__sync_fetch_and_and:
5793 case Builtin::BI__sync_fetch_and_and_1:
5794 case Builtin::BI__sync_fetch_and_and_2:
5795 case Builtin::BI__sync_fetch_and_and_4:
5796 case Builtin::BI__sync_fetch_and_and_8:
5797 case Builtin::BI__sync_fetch_and_and_16:
5798 BuiltinIndex = 3;
5799 break;
5800
5801 case Builtin::BI__sync_fetch_and_xor:
5802 case Builtin::BI__sync_fetch_and_xor_1:
5803 case Builtin::BI__sync_fetch_and_xor_2:
5804 case Builtin::BI__sync_fetch_and_xor_4:
5805 case Builtin::BI__sync_fetch_and_xor_8:
5806 case Builtin::BI__sync_fetch_and_xor_16:
5807 BuiltinIndex = 4;
5808 break;
5809
5810 case Builtin::BI__sync_fetch_and_nand:
5811 case Builtin::BI__sync_fetch_and_nand_1:
5812 case Builtin::BI__sync_fetch_and_nand_2:
5813 case Builtin::BI__sync_fetch_and_nand_4:
5814 case Builtin::BI__sync_fetch_and_nand_8:
5815 case Builtin::BI__sync_fetch_and_nand_16:
5816 BuiltinIndex = 5;
5817 WarnAboutSemanticsChange = true;
5818 break;
5819
5820 case Builtin::BI__sync_add_and_fetch:
5821 case Builtin::BI__sync_add_and_fetch_1:
5822 case Builtin::BI__sync_add_and_fetch_2:
5823 case Builtin::BI__sync_add_and_fetch_4:
5824 case Builtin::BI__sync_add_and_fetch_8:
5825 case Builtin::BI__sync_add_and_fetch_16:
5826 BuiltinIndex = 6;
5827 break;
5828
5829 case Builtin::BI__sync_sub_and_fetch:
5830 case Builtin::BI__sync_sub_and_fetch_1:
5831 case Builtin::BI__sync_sub_and_fetch_2:
5832 case Builtin::BI__sync_sub_and_fetch_4:
5833 case Builtin::BI__sync_sub_and_fetch_8:
5834 case Builtin::BI__sync_sub_and_fetch_16:
5835 BuiltinIndex = 7;
5836 break;
5837
5838 case Builtin::BI__sync_and_and_fetch:
5839 case Builtin::BI__sync_and_and_fetch_1:
5840 case Builtin::BI__sync_and_and_fetch_2:
5841 case Builtin::BI__sync_and_and_fetch_4:
5842 case Builtin::BI__sync_and_and_fetch_8:
5843 case Builtin::BI__sync_and_and_fetch_16:
5844 BuiltinIndex = 8;
5845 break;
5846
5847 case Builtin::BI__sync_or_and_fetch:
5848 case Builtin::BI__sync_or_and_fetch_1:
5849 case Builtin::BI__sync_or_and_fetch_2:
5850 case Builtin::BI__sync_or_and_fetch_4:
5851 case Builtin::BI__sync_or_and_fetch_8:
5852 case Builtin::BI__sync_or_and_fetch_16:
5853 BuiltinIndex = 9;
5854 break;
5855
5856 case Builtin::BI__sync_xor_and_fetch:
5857 case Builtin::BI__sync_xor_and_fetch_1:
5858 case Builtin::BI__sync_xor_and_fetch_2:
5859 case Builtin::BI__sync_xor_and_fetch_4:
5860 case Builtin::BI__sync_xor_and_fetch_8:
5861 case Builtin::BI__sync_xor_and_fetch_16:
5862 BuiltinIndex = 10;
5863 break;
5864
5865 case Builtin::BI__sync_nand_and_fetch:
5866 case Builtin::BI__sync_nand_and_fetch_1:
5867 case Builtin::BI__sync_nand_and_fetch_2:
5868 case Builtin::BI__sync_nand_and_fetch_4:
5869 case Builtin::BI__sync_nand_and_fetch_8:
5870 case Builtin::BI__sync_nand_and_fetch_16:
5871 BuiltinIndex = 11;
5872 WarnAboutSemanticsChange = true;
5873 break;
5874
5875 case Builtin::BI__sync_val_compare_and_swap:
5876 case Builtin::BI__sync_val_compare_and_swap_1:
5877 case Builtin::BI__sync_val_compare_and_swap_2:
5878 case Builtin::BI__sync_val_compare_and_swap_4:
5879 case Builtin::BI__sync_val_compare_and_swap_8:
5880 case Builtin::BI__sync_val_compare_and_swap_16:
5881 BuiltinIndex = 12;
5882 NumFixed = 2;
5883 break;
5884
5885 case Builtin::BI__sync_bool_compare_and_swap:
5886 case Builtin::BI__sync_bool_compare_and_swap_1:
5887 case Builtin::BI__sync_bool_compare_and_swap_2:
5888 case Builtin::BI__sync_bool_compare_and_swap_4:
5889 case Builtin::BI__sync_bool_compare_and_swap_8:
5890 case Builtin::BI__sync_bool_compare_and_swap_16:
5891 BuiltinIndex = 13;
5892 NumFixed = 2;
5893 ResultType = Context.BoolTy;
5894 break;
5895
5896 case Builtin::BI__sync_lock_test_and_set:
5897 case Builtin::BI__sync_lock_test_and_set_1:
5898 case Builtin::BI__sync_lock_test_and_set_2:
5899 case Builtin::BI__sync_lock_test_and_set_4:
5900 case Builtin::BI__sync_lock_test_and_set_8:
5901 case Builtin::BI__sync_lock_test_and_set_16:
5902 BuiltinIndex = 14;
5903 break;
5904
5905 case Builtin::BI__sync_lock_release:
5906 case Builtin::BI__sync_lock_release_1:
5907 case Builtin::BI__sync_lock_release_2:
5908 case Builtin::BI__sync_lock_release_4:
5909 case Builtin::BI__sync_lock_release_8:
5910 case Builtin::BI__sync_lock_release_16:
5911 BuiltinIndex = 15;
5912 NumFixed = 0;
5913 ResultType = Context.VoidTy;
5914 break;
5915
5916 case Builtin::BI__sync_swap:
5917 case Builtin::BI__sync_swap_1:
5918 case Builtin::BI__sync_swap_2:
5919 case Builtin::BI__sync_swap_4:
5920 case Builtin::BI__sync_swap_8:
5921 case Builtin::BI__sync_swap_16:
5922 BuiltinIndex = 16;
5923 break;
5924 }
5925
5926 // Now that we know how many fixed arguments we expect, first check that we
5927 // have at least that many.
5928 if (TheCall->getNumArgs() < 1+NumFixed) {
5929 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5930 << 0 << 1 + NumFixed << TheCall->getNumArgs()
5931 << Callee->getSourceRange();
5932 return ExprError();
5933 }
5934
5935 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
5936 << Callee->getSourceRange();
5937
5938 if (WarnAboutSemanticsChange) {
5939 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
5940 << Callee->getSourceRange();
5941 }
5942
5943 // Get the decl for the concrete builtin from this, we can tell what the
5944 // concrete integer type we should convert to is.
5945 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
5946 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
5947 FunctionDecl *NewBuiltinDecl;
5948 if (NewBuiltinID == BuiltinID)
5949 NewBuiltinDecl = FDecl;
5950 else {
5951 // Perform builtin lookup to avoid redeclaring it.
5952 DeclarationName DN(&Context.Idents.get(NewBuiltinName));
5953 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
5954 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
5955 assert(Res.getFoundDecl())((void)0);
5956 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
5957 if (!NewBuiltinDecl)
5958 return ExprError();
5959 }
5960
5961 // The first argument --- the pointer --- has a fixed type; we
5962 // deduce the types of the rest of the arguments accordingly. Walk
5963 // the remaining arguments, converting them to the deduced value type.
5964 for (unsigned i = 0; i != NumFixed; ++i) {
5965 ExprResult Arg = TheCall->getArg(i+1);
5966
5967 // GCC does an implicit conversion to the pointer or integer ValType. This
5968 // can fail in some cases (1i -> int**), check for this error case now.
5969 // Initialize the argument.
5970 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
5971 ValType, /*consume*/ false);
5972 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5973 if (Arg.isInvalid())
5974 return ExprError();
5975
5976 // Okay, we have something that *can* be converted to the right type. Check
5977 // to see if there is a potentially weird extension going on here. This can
5978 // happen when you do an atomic operation on something like an char* and
5979 // pass in 42. The 42 gets converted to char. This is even more strange
5980 // for things like 45.123 -> char, etc.
5981 // FIXME: Do this check.
5982 TheCall->setArg(i+1, Arg.get());
5983 }
5984
5985 // Create a new DeclRefExpr to refer to the new decl.
5986 DeclRefExpr *NewDRE = DeclRefExpr::Create(
5987 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
5988 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
5989 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
5990
5991 // Set the callee in the CallExpr.
5992 // FIXME: This loses syntactic information.
5993 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
5994 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
5995 CK_BuiltinFnToFnPtr);
5996 TheCall->setCallee(PromotedCall.get());
5997
5998 // Change the result type of the call to match the original value type. This
5999 // is arbitrary, but the codegen for these builtins ins design to handle it
6000 // gracefully.
6001 TheCall->setType(ResultType);
6002
6003 // Prohibit use of _ExtInt with atomic builtins.
6004 // The arguments would have already been converted to the first argument's
6005 // type, so only need to check the first argument.
6006 const auto *ExtIntValType = ValType->getAs<ExtIntType>();
6007 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) {
6008 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
6009 return ExprError();
6010 }
6011
6012 return TheCallResult;
6013}
6014
6015/// SemaBuiltinNontemporalOverloaded - We have a call to
6016/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
6017/// overloaded function based on the pointer type of its last argument.
6018///
6019/// This function goes through and does final semantic checking for these
6020/// builtins.
6021ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
6022 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
6023 DeclRefExpr *DRE =
6024 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());