Bug Summary

File:src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/lib/Sema/SemaChecking.cpp
Warning:line 10829, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SemaChecking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangSema/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangSema/obj/../include/clang/Sema -I /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangSema/../include -I /usr/src/gnu/usr.bin/clang/libclangSema/obj -I /usr/src/gnu/usr.bin/clang/libclangSema/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangSema/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangSema/../../../llvm/clang/lib/Sema/SemaChecking.cpp
1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements extra semantic analysis beyond what is enforced
10// by the C type system.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/APValue.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/AttrIterator.h"
18#include "clang/AST/CharUnits.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclBase.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/DeclarationName.h"
24#include "clang/AST/EvaluatedExprVisitor.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
28#include "clang/AST/ExprOpenMP.h"
29#include "clang/AST/FormatString.h"
30#include "clang/AST/NSAPI.h"
31#include "clang/AST/NonTrivialTypeVisitor.h"
32#include "clang/AST/OperationKinds.h"
33#include "clang/AST/RecordLayout.h"
34#include "clang/AST/Stmt.h"
35#include "clang/AST/TemplateBase.h"
36#include "clang/AST/Type.h"
37#include "clang/AST/TypeLoc.h"
38#include "clang/AST/UnresolvedSet.h"
39#include "clang/Basic/AddressSpaces.h"
40#include "clang/Basic/CharInfo.h"
41#include "clang/Basic/Diagnostic.h"
42#include "clang/Basic/IdentifierTable.h"
43#include "clang/Basic/LLVM.h"
44#include "clang/Basic/LangOptions.h"
45#include "clang/Basic/OpenCLOptions.h"
46#include "clang/Basic/OperatorKinds.h"
47#include "clang/Basic/PartialDiagnostic.h"
48#include "clang/Basic/SourceLocation.h"
49#include "clang/Basic/SourceManager.h"
50#include "clang/Basic/Specifiers.h"
51#include "clang/Basic/SyncScope.h"
52#include "clang/Basic/TargetBuiltins.h"
53#include "clang/Basic/TargetCXXABI.h"
54#include "clang/Basic/TargetInfo.h"
55#include "clang/Basic/TypeTraits.h"
56#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
57#include "clang/Sema/Initialization.h"
58#include "clang/Sema/Lookup.h"
59#include "clang/Sema/Ownership.h"
60#include "clang/Sema/Scope.h"
61#include "clang/Sema/ScopeInfo.h"
62#include "clang/Sema/Sema.h"
63#include "clang/Sema/SemaInternal.h"
64#include "llvm/ADT/APFloat.h"
65#include "llvm/ADT/APInt.h"
66#include "llvm/ADT/APSInt.h"
67#include "llvm/ADT/ArrayRef.h"
68#include "llvm/ADT/DenseMap.h"
69#include "llvm/ADT/FoldingSet.h"
70#include "llvm/ADT/None.h"
71#include "llvm/ADT/Optional.h"
72#include "llvm/ADT/STLExtras.h"
73#include "llvm/ADT/SmallBitVector.h"
74#include "llvm/ADT/SmallPtrSet.h"
75#include "llvm/ADT/SmallString.h"
76#include "llvm/ADT/SmallVector.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/StringSet.h"
79#include "llvm/ADT/StringSwitch.h"
80#include "llvm/ADT/Triple.h"
81#include "llvm/Support/AtomicOrdering.h"
82#include "llvm/Support/Casting.h"
83#include "llvm/Support/Compiler.h"
84#include "llvm/Support/ConvertUTF.h"
85#include "llvm/Support/ErrorHandling.h"
86#include "llvm/Support/Format.h"
87#include "llvm/Support/Locale.h"
88#include "llvm/Support/MathExtras.h"
89#include "llvm/Support/SaveAndRestore.h"
90#include "llvm/Support/raw_ostream.h"
91#include <algorithm>
92#include <bitset>
93#include <cassert>
94#include <cctype>
95#include <cstddef>
96#include <cstdint>
97#include <functional>
98#include <limits>
99#include <string>
100#include <tuple>
101#include <utility>
102
103using namespace clang;
104using namespace sema;
105
106SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
107 unsigned ByteNo) const {
108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
109 Context.getTargetInfo());
110}
111
112/// Checks that a call expression's argument count is the desired number.
113/// This is useful when doing custom type-checking. Returns true on error.
114static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
115 unsigned argCount = call->getNumArgs();
116 if (argCount == desiredArgCount) return false;
117
118 if (argCount < desiredArgCount)
119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
120 << 0 /*function call*/ << desiredArgCount << argCount
121 << call->getSourceRange();
122
123 // Highlight all the excess arguments.
124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
125 call->getArg(argCount - 1)->getEndLoc());
126
127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
128 << 0 /*function call*/ << desiredArgCount << argCount
129 << call->getArg(1)->getSourceRange();
130}
131
132/// Check that the first argument to __builtin_annotation is an integer
133/// and the second argument is a non-wide string literal.
134static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
135 if (checkArgCount(S, TheCall, 2))
136 return true;
137
138 // First argument should be an integer.
139 Expr *ValArg = TheCall->getArg(0);
140 QualType Ty = ValArg->getType();
141 if (!Ty->isIntegerType()) {
142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
143 << ValArg->getSourceRange();
144 return true;
145 }
146
147 // Second argument should be a constant string.
148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
150 if (!Literal || !Literal->isAscii()) {
151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
152 << StrArg->getSourceRange();
153 return true;
154 }
155
156 TheCall->setType(Ty);
157 return false;
158}
159
160static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
161 // We need at least one argument.
162 if (TheCall->getNumArgs() < 1) {
163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
164 << 0 << 1 << TheCall->getNumArgs()
165 << TheCall->getCallee()->getSourceRange();
166 return true;
167 }
168
169 // All arguments should be wide string literals.
170 for (Expr *Arg : TheCall->arguments()) {
171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
172 if (!Literal || !Literal->isWide()) {
173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
174 << Arg->getSourceRange();
175 return true;
176 }
177 }
178
179 return false;
180}
181
182/// Check that the argument to __builtin_addressof is a glvalue, and set the
183/// result type to the corresponding pointer type.
184static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
185 if (checkArgCount(S, TheCall, 1))
186 return true;
187
188 ExprResult Arg(TheCall->getArg(0));
189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
190 if (ResultType.isNull())
191 return true;
192
193 TheCall->setArg(0, Arg.get());
194 TheCall->setType(ResultType);
195 return false;
196}
197
198/// Check the number of arguments and set the result type to
199/// the argument type.
200static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
201 if (checkArgCount(S, TheCall, 1))
202 return true;
203
204 TheCall->setType(TheCall->getArg(0)->getType());
205 return false;
206}
207
208/// Check that the value argument for __builtin_is_aligned(value, alignment) and
209/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
210/// type (but not a function pointer) and that the alignment is a power-of-two.
211static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
212 if (checkArgCount(S, TheCall, 2))
213 return true;
214
215 clang::Expr *Source = TheCall->getArg(0);
216 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
217
218 auto IsValidIntegerType = [](QualType Ty) {
219 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
220 };
221 QualType SrcTy = Source->getType();
222 // We should also be able to use it with arrays (but not functions!).
223 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
224 SrcTy = S.Context.getDecayedType(SrcTy);
225 }
226 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
227 SrcTy->isFunctionPointerType()) {
228 // FIXME: this is not quite the right error message since we don't allow
229 // floating point types, or member pointers.
230 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
231 << SrcTy;
232 return true;
233 }
234
235 clang::Expr *AlignOp = TheCall->getArg(1);
236 if (!IsValidIntegerType(AlignOp->getType())) {
237 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
238 << AlignOp->getType();
239 return true;
240 }
241 Expr::EvalResult AlignResult;
242 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
243 // We can't check validity of alignment if it is value dependent.
244 if (!AlignOp->isValueDependent() &&
245 AlignOp->EvaluateAsInt(AlignResult, S.Context,
246 Expr::SE_AllowSideEffects)) {
247 llvm::APSInt AlignValue = AlignResult.Val.getInt();
248 llvm::APSInt MaxValue(
249 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
250 if (AlignValue < 1) {
251 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
252 return true;
253 }
254 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
255 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
256 << toString(MaxValue, 10);
257 return true;
258 }
259 if (!AlignValue.isPowerOf2()) {
260 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
261 return true;
262 }
263 if (AlignValue == 1) {
264 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
265 << IsBooleanAlignBuiltin;
266 }
267 }
268
269 ExprResult SrcArg = S.PerformCopyInitialization(
270 InitializedEntity::InitializeParameter(S.Context, SrcTy, false),
271 SourceLocation(), Source);
272 if (SrcArg.isInvalid())
273 return true;
274 TheCall->setArg(0, SrcArg.get());
275 ExprResult AlignArg =
276 S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
277 S.Context, AlignOp->getType(), false),
278 SourceLocation(), AlignOp);
279 if (AlignArg.isInvalid())
280 return true;
281 TheCall->setArg(1, AlignArg.get());
282 // For align_up/align_down, the return type is the same as the (potentially
283 // decayed) argument type including qualifiers. For is_aligned(), the result
284 // is always bool.
285 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
286 return false;
287}
288
289static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
290 unsigned BuiltinID) {
291 if (checkArgCount(S, TheCall, 3))
292 return true;
293
294 // First two arguments should be integers.
295 for (unsigned I = 0; I < 2; ++I) {
296 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I));
297 if (Arg.isInvalid()) return true;
298 TheCall->setArg(I, Arg.get());
299
300 QualType Ty = Arg.get()->getType();
301 if (!Ty->isIntegerType()) {
302 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
303 << Ty << Arg.get()->getSourceRange();
304 return true;
305 }
306 }
307
308 // Third argument should be a pointer to a non-const integer.
309 // IRGen correctly handles volatile, restrict, and address spaces, and
310 // the other qualifiers aren't possible.
311 {
312 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2));
313 if (Arg.isInvalid()) return true;
314 TheCall->setArg(2, Arg.get());
315
316 QualType Ty = Arg.get()->getType();
317 const auto *PtrTy = Ty->getAs<PointerType>();
318 if (!PtrTy ||
319 !PtrTy->getPointeeType()->isIntegerType() ||
320 PtrTy->getPointeeType().isConstQualified()) {
321 S.Diag(Arg.get()->getBeginLoc(),
322 diag::err_overflow_builtin_must_be_ptr_int)
323 << Ty << Arg.get()->getSourceRange();
324 return true;
325 }
326 }
327
328 // Disallow signed ExtIntType args larger than 128 bits to mul function until
329 // we improve backend support.
330 if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
331 for (unsigned I = 0; I < 3; ++I) {
332 const auto Arg = TheCall->getArg(I);
333 // Third argument will be a pointer.
334 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
335 if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
336 S.getASTContext().getIntWidth(Ty) > 128)
337 return S.Diag(Arg->getBeginLoc(),
338 diag::err_overflow_builtin_ext_int_max_size)
339 << 128;
340 }
341 }
342
343 return false;
344}
345
346static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
347 if (checkArgCount(S, BuiltinCall, 2))
348 return true;
349
350 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
351 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
352 Expr *Call = BuiltinCall->getArg(0);
353 Expr *Chain = BuiltinCall->getArg(1);
354
355 if (Call->getStmtClass() != Stmt::CallExprClass) {
356 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
357 << Call->getSourceRange();
358 return true;
359 }
360
361 auto CE = cast<CallExpr>(Call);
362 if (CE->getCallee()->getType()->isBlockPointerType()) {
363 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
364 << Call->getSourceRange();
365 return true;
366 }
367
368 const Decl *TargetDecl = CE->getCalleeDecl();
369 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
370 if (FD->getBuiltinID()) {
371 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
372 << Call->getSourceRange();
373 return true;
374 }
375
376 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
377 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
378 << Call->getSourceRange();
379 return true;
380 }
381
382 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
383 if (ChainResult.isInvalid())
384 return true;
385 if (!ChainResult.get()->getType()->isPointerType()) {
386 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
387 << Chain->getSourceRange();
388 return true;
389 }
390
391 QualType ReturnTy = CE->getCallReturnType(S.Context);
392 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
393 QualType BuiltinTy = S.Context.getFunctionType(
394 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
395 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
396
397 Builtin =
398 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
399
400 BuiltinCall->setType(CE->getType());
401 BuiltinCall->setValueKind(CE->getValueKind());
402 BuiltinCall->setObjectKind(CE->getObjectKind());
403 BuiltinCall->setCallee(Builtin);
404 BuiltinCall->setArg(1, ChainResult.get());
405
406 return false;
407}
408
409namespace {
410
411class EstimateSizeFormatHandler
412 : public analyze_format_string::FormatStringHandler {
413 size_t Size;
414
415public:
416 EstimateSizeFormatHandler(StringRef Format)
417 : Size(std::min(Format.find(0), Format.size()) +
418 1 /* null byte always written by sprintf */) {}
419
420 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
421 const char *, unsigned SpecifierLen) override {
422
423 const size_t FieldWidth = computeFieldWidth(FS);
424 const size_t Precision = computePrecision(FS);
425
426 // The actual format.
427 switch (FS.getConversionSpecifier().getKind()) {
428 // Just a char.
429 case analyze_format_string::ConversionSpecifier::cArg:
430 case analyze_format_string::ConversionSpecifier::CArg:
431 Size += std::max(FieldWidth, (size_t)1);
432 break;
433 // Just an integer.
434 case analyze_format_string::ConversionSpecifier::dArg:
435 case analyze_format_string::ConversionSpecifier::DArg:
436 case analyze_format_string::ConversionSpecifier::iArg:
437 case analyze_format_string::ConversionSpecifier::oArg:
438 case analyze_format_string::ConversionSpecifier::OArg:
439 case analyze_format_string::ConversionSpecifier::uArg:
440 case analyze_format_string::ConversionSpecifier::UArg:
441 case analyze_format_string::ConversionSpecifier::xArg:
442 case analyze_format_string::ConversionSpecifier::XArg:
443 Size += std::max(FieldWidth, Precision);
444 break;
445
446 // %g style conversion switches between %f or %e style dynamically.
447 // %f always takes less space, so default to it.
448 case analyze_format_string::ConversionSpecifier::gArg:
449 case analyze_format_string::ConversionSpecifier::GArg:
450
451 // Floating point number in the form '[+]ddd.ddd'.
452 case analyze_format_string::ConversionSpecifier::fArg:
453 case analyze_format_string::ConversionSpecifier::FArg:
454 Size += std::max(FieldWidth, 1 /* integer part */ +
455 (Precision ? 1 + Precision
456 : 0) /* period + decimal */);
457 break;
458
459 // Floating point number in the form '[-]d.ddde[+-]dd'.
460 case analyze_format_string::ConversionSpecifier::eArg:
461 case analyze_format_string::ConversionSpecifier::EArg:
462 Size +=
463 std::max(FieldWidth,
464 1 /* integer part */ +
465 (Precision ? 1 + Precision : 0) /* period + decimal */ +
466 1 /* e or E letter */ + 2 /* exponent */);
467 break;
468
469 // Floating point number in the form '[-]0xh.hhhhp±dd'.
470 case analyze_format_string::ConversionSpecifier::aArg:
471 case analyze_format_string::ConversionSpecifier::AArg:
472 Size +=
473 std::max(FieldWidth,
474 2 /* 0x */ + 1 /* integer part */ +
475 (Precision ? 1 + Precision : 0) /* period + decimal */ +
476 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
477 break;
478
479 // Just a string.
480 case analyze_format_string::ConversionSpecifier::sArg:
481 case analyze_format_string::ConversionSpecifier::SArg:
482 Size += FieldWidth;
483 break;
484
485 // Just a pointer in the form '0xddd'.
486 case analyze_format_string::ConversionSpecifier::pArg:
487 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
488 break;
489
490 // A plain percent.
491 case analyze_format_string::ConversionSpecifier::PercentArg:
492 Size += 1;
493 break;
494
495 default:
496 break;
497 }
498
499 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
500
501 if (FS.hasAlternativeForm()) {
502 switch (FS.getConversionSpecifier().getKind()) {
503 default:
504 break;
505 // Force a leading '0'.
506 case analyze_format_string::ConversionSpecifier::oArg:
507 Size += 1;
508 break;
509 // Force a leading '0x'.
510 case analyze_format_string::ConversionSpecifier::xArg:
511 case analyze_format_string::ConversionSpecifier::XArg:
512 Size += 2;
513 break;
514 // Force a period '.' before decimal, even if precision is 0.
515 case analyze_format_string::ConversionSpecifier::aArg:
516 case analyze_format_string::ConversionSpecifier::AArg:
517 case analyze_format_string::ConversionSpecifier::eArg:
518 case analyze_format_string::ConversionSpecifier::EArg:
519 case analyze_format_string::ConversionSpecifier::fArg:
520 case analyze_format_string::ConversionSpecifier::FArg:
521 case analyze_format_string::ConversionSpecifier::gArg:
522 case analyze_format_string::ConversionSpecifier::GArg:
523 Size += (Precision ? 0 : 1);
524 break;
525 }
526 }
527 assert(SpecifierLen <= Size && "no underflow")((void)0);
528 Size -= SpecifierLen;
529 return true;
530 }
531
532 size_t getSizeLowerBound() const { return Size; }
533
534private:
535 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
536 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
537 size_t FieldWidth = 0;
538 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
539 FieldWidth = FW.getConstantAmount();
540 return FieldWidth;
541 }
542
543 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
544 const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
545 size_t Precision = 0;
546
547 // See man 3 printf for default precision value based on the specifier.
548 switch (FW.getHowSpecified()) {
549 case analyze_format_string::OptionalAmount::NotSpecified:
550 switch (FS.getConversionSpecifier().getKind()) {
551 default:
552 break;
553 case analyze_format_string::ConversionSpecifier::dArg: // %d
554 case analyze_format_string::ConversionSpecifier::DArg: // %D
555 case analyze_format_string::ConversionSpecifier::iArg: // %i
556 Precision = 1;
557 break;
558 case analyze_format_string::ConversionSpecifier::oArg: // %d
559 case analyze_format_string::ConversionSpecifier::OArg: // %D
560 case analyze_format_string::ConversionSpecifier::uArg: // %d
561 case analyze_format_string::ConversionSpecifier::UArg: // %D
562 case analyze_format_string::ConversionSpecifier::xArg: // %d
563 case analyze_format_string::ConversionSpecifier::XArg: // %D
564 Precision = 1;
565 break;
566 case analyze_format_string::ConversionSpecifier::fArg: // %f
567 case analyze_format_string::ConversionSpecifier::FArg: // %F
568 case analyze_format_string::ConversionSpecifier::eArg: // %e
569 case analyze_format_string::ConversionSpecifier::EArg: // %E
570 case analyze_format_string::ConversionSpecifier::gArg: // %g
571 case analyze_format_string::ConversionSpecifier::GArg: // %G
572 Precision = 6;
573 break;
574 case analyze_format_string::ConversionSpecifier::pArg: // %d
575 Precision = 1;
576 break;
577 }
578 break;
579 case analyze_format_string::OptionalAmount::Constant:
580 Precision = FW.getConstantAmount();
581 break;
582 default:
583 break;
584 }
585 return Precision;
586 }
587};
588
589} // namespace
590
591/// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
592/// __builtin_*_chk function, then use the object size argument specified in the
593/// source. Otherwise, infer the object size using __builtin_object_size.
594void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
595 CallExpr *TheCall) {
596 // FIXME: There are some more useful checks we could be doing here:
597 // - Evaluate strlen of strcpy arguments, use as object size.
598
599 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
600 isConstantEvaluated())
601 return;
602
603 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
604 if (!BuiltinID)
605 return;
606
607 const TargetInfo &TI = getASTContext().getTargetInfo();
608 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
609
610 unsigned DiagID = 0;
611 bool IsChkVariant = false;
612 Optional<llvm::APSInt> UsedSize;
613 unsigned SizeIndex, ObjectIndex;
614 switch (BuiltinID) {
615 default:
616 return;
617 case Builtin::BIsprintf:
618 case Builtin::BI__builtin___sprintf_chk: {
619 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
620 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
621
622 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
623
624 if (!Format->isAscii() && !Format->isUTF8())
625 return;
626
627 StringRef FormatStrRef = Format->getString();
628 EstimateSizeFormatHandler H(FormatStrRef);
629 const char *FormatBytes = FormatStrRef.data();
630 const ConstantArrayType *T =
631 Context.getAsConstantArrayType(Format->getType());
632 assert(T && "String literal not of constant array type!")((void)0);
633 size_t TypeSize = T->getSize().getZExtValue();
634
635 // In case there's a null byte somewhere.
636 size_t StrLen =
637 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
638 if (!analyze_format_string::ParsePrintfString(
639 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
640 Context.getTargetInfo(), false)) {
641 DiagID = diag::warn_fortify_source_format_overflow;
642 UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
643 .extOrTrunc(SizeTypeWidth);
644 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
645 IsChkVariant = true;
646 ObjectIndex = 2;
647 } else {
648 IsChkVariant = false;
649 ObjectIndex = 0;
650 }
651 break;
652 }
653 }
654 return;
655 }
656 case Builtin::BI__builtin___memcpy_chk:
657 case Builtin::BI__builtin___memmove_chk:
658 case Builtin::BI__builtin___memset_chk:
659 case Builtin::BI__builtin___strlcat_chk:
660 case Builtin::BI__builtin___strlcpy_chk:
661 case Builtin::BI__builtin___strncat_chk:
662 case Builtin::BI__builtin___strncpy_chk:
663 case Builtin::BI__builtin___stpncpy_chk:
664 case Builtin::BI__builtin___memccpy_chk:
665 case Builtin::BI__builtin___mempcpy_chk: {
666 DiagID = diag::warn_builtin_chk_overflow;
667 IsChkVariant = true;
668 SizeIndex = TheCall->getNumArgs() - 2;
669 ObjectIndex = TheCall->getNumArgs() - 1;
670 break;
671 }
672
673 case Builtin::BI__builtin___snprintf_chk:
674 case Builtin::BI__builtin___vsnprintf_chk: {
675 DiagID = diag::warn_builtin_chk_overflow;
676 IsChkVariant = true;
677 SizeIndex = 1;
678 ObjectIndex = 3;
679 break;
680 }
681
682 case Builtin::BIstrncat:
683 case Builtin::BI__builtin_strncat:
684 case Builtin::BIstrncpy:
685 case Builtin::BI__builtin_strncpy:
686 case Builtin::BIstpncpy:
687 case Builtin::BI__builtin_stpncpy: {
688 // Whether these functions overflow depends on the runtime strlen of the
689 // string, not just the buffer size, so emitting the "always overflow"
690 // diagnostic isn't quite right. We should still diagnose passing a buffer
691 // size larger than the destination buffer though; this is a runtime abort
692 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
693 DiagID = diag::warn_fortify_source_size_mismatch;
694 SizeIndex = TheCall->getNumArgs() - 1;
695 ObjectIndex = 0;
696 break;
697 }
698
699 case Builtin::BImemcpy:
700 case Builtin::BI__builtin_memcpy:
701 case Builtin::BImemmove:
702 case Builtin::BI__builtin_memmove:
703 case Builtin::BImemset:
704 case Builtin::BI__builtin_memset:
705 case Builtin::BImempcpy:
706 case Builtin::BI__builtin_mempcpy: {
707 DiagID = diag::warn_fortify_source_overflow;
708 SizeIndex = TheCall->getNumArgs() - 1;
709 ObjectIndex = 0;
710 break;
711 }
712 case Builtin::BIsnprintf:
713 case Builtin::BI__builtin_snprintf:
714 case Builtin::BIvsnprintf:
715 case Builtin::BI__builtin_vsnprintf: {
716 DiagID = diag::warn_fortify_source_size_mismatch;
717 SizeIndex = 1;
718 ObjectIndex = 0;
719 break;
720 }
721 }
722
723 llvm::APSInt ObjectSize;
724 // For __builtin___*_chk, the object size is explicitly provided by the caller
725 // (usually using __builtin_object_size). Use that value to check this call.
726 if (IsChkVariant) {
727 Expr::EvalResult Result;
728 Expr *SizeArg = TheCall->getArg(ObjectIndex);
729 if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
730 return;
731 ObjectSize = Result.Val.getInt();
732
733 // Otherwise, try to evaluate an imaginary call to __builtin_object_size.
734 } else {
735 // If the parameter has a pass_object_size attribute, then we should use its
736 // (potentially) more strict checking mode. Otherwise, conservatively assume
737 // type 0.
738 int BOSType = 0;
739 if (const auto *POS =
740 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>())
741 BOSType = POS->getType();
742
743 Expr *ObjArg = TheCall->getArg(ObjectIndex);
744 uint64_t Result;
745 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
746 return;
747 // Get the object size in the target's size_t width.
748 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
749 }
750
751 // Evaluate the number of bytes of the object that this call will use.
752 if (!UsedSize) {
753 Expr::EvalResult Result;
754 Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
755 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
756 return;
757 UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth);
758 }
759
760 if (UsedSize.getValue().ule(ObjectSize))
761 return;
762
763 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
764 // Skim off the details of whichever builtin was called to produce a better
765 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly.
766 if (IsChkVariant) {
767 FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
768 FunctionName = FunctionName.drop_back(std::strlen("_chk"));
769 } else if (FunctionName.startswith("__builtin_")) {
770 FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
771 }
772
773 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
774 PDiag(DiagID)
775 << FunctionName << toString(ObjectSize, /*Radix=*/10)
776 << toString(UsedSize.getValue(), /*Radix=*/10));
777}
778
779static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
780 Scope::ScopeFlags NeededScopeFlags,
781 unsigned DiagID) {
782 // Scopes aren't available during instantiation. Fortunately, builtin
783 // functions cannot be template args so they cannot be formed through template
784 // instantiation. Therefore checking once during the parse is sufficient.
785 if (SemaRef.inTemplateInstantiation())
786 return false;
787
788 Scope *S = SemaRef.getCurScope();
789 while (S && !S->isSEHExceptScope())
790 S = S->getParent();
791 if (!S || !(S->getFlags() & NeededScopeFlags)) {
792 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
793 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
794 << DRE->getDecl()->getIdentifier();
795 return true;
796 }
797
798 return false;
799}
800
801static inline bool isBlockPointer(Expr *Arg) {
802 return Arg->getType()->isBlockPointerType();
803}
804
805/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
806/// void*, which is a requirement of device side enqueue.
807static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
808 const BlockPointerType *BPT =
809 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
810 ArrayRef<QualType> Params =
811 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
812 unsigned ArgCounter = 0;
813 bool IllegalParams = false;
814 // Iterate through the block parameters until either one is found that is not
815 // a local void*, or the block is valid.
816 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
817 I != E; ++I, ++ArgCounter) {
818 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
819 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
820 LangAS::opencl_local) {
821 // Get the location of the error. If a block literal has been passed
822 // (BlockExpr) then we can point straight to the offending argument,
823 // else we just point to the variable reference.
824 SourceLocation ErrorLoc;
825 if (isa<BlockExpr>(BlockArg)) {
826 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
827 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
828 } else if (isa<DeclRefExpr>(BlockArg)) {
829 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
830 }
831 S.Diag(ErrorLoc,
832 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
833 IllegalParams = true;
834 }
835 }
836
837 return IllegalParams;
838}
839
840static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
841 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) {
842 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
843 << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
844 return true;
845 }
846 return false;
847}
848
849static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
850 if (checkArgCount(S, TheCall, 2))
851 return true;
852
853 if (checkOpenCLSubgroupExt(S, TheCall))
854 return true;
855
856 // First argument is an ndrange_t type.
857 Expr *NDRangeArg = TheCall->getArg(0);
858 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
859 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
860 << TheCall->getDirectCallee() << "'ndrange_t'";
861 return true;
862 }
863
864 Expr *BlockArg = TheCall->getArg(1);
865 if (!isBlockPointer(BlockArg)) {
866 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
867 << TheCall->getDirectCallee() << "block";
868 return true;
869 }
870 return checkOpenCLBlockArgs(S, BlockArg);
871}
872
873/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
874/// get_kernel_work_group_size
875/// and get_kernel_preferred_work_group_size_multiple builtin functions.
876static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
877 if (checkArgCount(S, TheCall, 1))
878 return true;
879
880 Expr *BlockArg = TheCall->getArg(0);
881 if (!isBlockPointer(BlockArg)) {
882 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
883 << TheCall->getDirectCallee() << "block";
884 return true;
885 }
886 return checkOpenCLBlockArgs(S, BlockArg);
887}
888
889/// Diagnose integer type and any valid implicit conversion to it.
890static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
891 const QualType &IntType);
892
893static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
894 unsigned Start, unsigned End) {
895 bool IllegalParams = false;
896 for (unsigned I = Start; I <= End; ++I)
897 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
898 S.Context.getSizeType());
899 return IllegalParams;
900}
901
902/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
903/// 'local void*' parameter of passed block.
904static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
905 Expr *BlockArg,
906 unsigned NumNonVarArgs) {
907 const BlockPointerType *BPT =
908 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
909 unsigned NumBlockParams =
910 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
911 unsigned TotalNumArgs = TheCall->getNumArgs();
912
913 // For each argument passed to the block, a corresponding uint needs to
914 // be passed to describe the size of the local memory.
915 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
916 S.Diag(TheCall->getBeginLoc(),
917 diag::err_opencl_enqueue_kernel_local_size_args);
918 return true;
919 }
920
921 // Check that the sizes of the local memory are specified by integers.
922 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
923 TotalNumArgs - 1);
924}
925
926/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
927/// overload formats specified in Table 6.13.17.1.
928/// int enqueue_kernel(queue_t queue,
929/// kernel_enqueue_flags_t flags,
930/// const ndrange_t ndrange,
931/// void (^block)(void))
932/// int enqueue_kernel(queue_t queue,
933/// kernel_enqueue_flags_t flags,
934/// const ndrange_t ndrange,
935/// uint num_events_in_wait_list,
936/// clk_event_t *event_wait_list,
937/// clk_event_t *event_ret,
938/// void (^block)(void))
939/// int enqueue_kernel(queue_t queue,
940/// kernel_enqueue_flags_t flags,
941/// const ndrange_t ndrange,
942/// void (^block)(local void*, ...),
943/// uint size0, ...)
944/// int enqueue_kernel(queue_t queue,
945/// kernel_enqueue_flags_t flags,
946/// const ndrange_t ndrange,
947/// uint num_events_in_wait_list,
948/// clk_event_t *event_wait_list,
949/// clk_event_t *event_ret,
950/// void (^block)(local void*, ...),
951/// uint size0, ...)
952static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
953 unsigned NumArgs = TheCall->getNumArgs();
954
955 if (NumArgs < 4) {
956 S.Diag(TheCall->getBeginLoc(),
957 diag::err_typecheck_call_too_few_args_at_least)
958 << 0 << 4 << NumArgs;
959 return true;
960 }
961
962 Expr *Arg0 = TheCall->getArg(0);
963 Expr *Arg1 = TheCall->getArg(1);
964 Expr *Arg2 = TheCall->getArg(2);
965 Expr *Arg3 = TheCall->getArg(3);
966
967 // First argument always needs to be a queue_t type.
968 if (!Arg0->getType()->isQueueT()) {
969 S.Diag(TheCall->getArg(0)->getBeginLoc(),
970 diag::err_opencl_builtin_expected_type)
971 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
972 return true;
973 }
974
975 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
976 if (!Arg1->getType()->isIntegerType()) {
977 S.Diag(TheCall->getArg(1)->getBeginLoc(),
978 diag::err_opencl_builtin_expected_type)
979 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
980 return true;
981 }
982
983 // Third argument is always an ndrange_t type.
984 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
985 S.Diag(TheCall->getArg(2)->getBeginLoc(),
986 diag::err_opencl_builtin_expected_type)
987 << TheCall->getDirectCallee() << "'ndrange_t'";
988 return true;
989 }
990
991 // With four arguments, there is only one form that the function could be
992 // called in: no events and no variable arguments.
993 if (NumArgs == 4) {
994 // check that the last argument is the right block type.
995 if (!isBlockPointer(Arg3)) {
996 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
997 << TheCall->getDirectCallee() << "block";
998 return true;
999 }
1000 // we have a block type, check the prototype
1001 const BlockPointerType *BPT =
1002 cast<BlockPointerType>(Arg3->getType().getCanonicalType());
1003 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1004 S.Diag(Arg3->getBeginLoc(),
1005 diag::err_opencl_enqueue_kernel_blocks_no_args);
1006 return true;
1007 }
1008 return false;
1009 }
1010 // we can have block + varargs.
1011 if (isBlockPointer(Arg3))
1012 return (checkOpenCLBlockArgs(S, Arg3) ||
1013 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
1014 // last two cases with either exactly 7 args or 7 args and varargs.
1015 if (NumArgs >= 7) {
1016 // check common block argument.
1017 Expr *Arg6 = TheCall->getArg(6);
1018 if (!isBlockPointer(Arg6)) {
1019 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1020 << TheCall->getDirectCallee() << "block";
1021 return true;
1022 }
1023 if (checkOpenCLBlockArgs(S, Arg6))
1024 return true;
1025
1026 // Forth argument has to be any integer type.
1027 if (!Arg3->getType()->isIntegerType()) {
1028 S.Diag(TheCall->getArg(3)->getBeginLoc(),
1029 diag::err_opencl_builtin_expected_type)
1030 << TheCall->getDirectCallee() << "integer";
1031 return true;
1032 }
1033 // check remaining common arguments.
1034 Expr *Arg4 = TheCall->getArg(4);
1035 Expr *Arg5 = TheCall->getArg(5);
1036
1037 // Fifth argument is always passed as a pointer to clk_event_t.
1038 if (!Arg4->isNullPointerConstant(S.Context,
1039 Expr::NPC_ValueDependentIsNotNull) &&
1040 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
1041 S.Diag(TheCall->getArg(4)->getBeginLoc(),
1042 diag::err_opencl_builtin_expected_type)
1043 << TheCall->getDirectCallee()
1044 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1045 return true;
1046 }
1047
1048 // Sixth argument is always passed as a pointer to clk_event_t.
1049 if (!Arg5->isNullPointerConstant(S.Context,
1050 Expr::NPC_ValueDependentIsNotNull) &&
1051 !(Arg5->getType()->isPointerType() &&
1052 Arg5->getType()->getPointeeType()->isClkEventT())) {
1053 S.Diag(TheCall->getArg(5)->getBeginLoc(),
1054 diag::err_opencl_builtin_expected_type)
1055 << TheCall->getDirectCallee()
1056 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1057 return true;
1058 }
1059
1060 if (NumArgs == 7)
1061 return false;
1062
1063 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
1064 }
1065
1066 // None of the specific case has been detected, give generic error
1067 S.Diag(TheCall->getBeginLoc(),
1068 diag::err_opencl_enqueue_kernel_incorrect_args);
1069 return true;
1070}
1071
1072/// Returns OpenCL access qual.
1073static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1074 return D->getAttr<OpenCLAccessAttr>();
1075}
1076
1077/// Returns true if pipe element type is different from the pointer.
1078static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
1079 const Expr *Arg0 = Call->getArg(0);
1080 // First argument type should always be pipe.
1081 if (!Arg0->getType()->isPipeType()) {
1082 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1083 << Call->getDirectCallee() << Arg0->getSourceRange();
1084 return true;
1085 }
1086 OpenCLAccessAttr *AccessQual =
1087 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1088 // Validates the access qualifier is compatible with the call.
1089 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1090 // read_only and write_only, and assumed to be read_only if no qualifier is
1091 // specified.
1092 switch (Call->getDirectCallee()->getBuiltinID()) {
1093 case Builtin::BIread_pipe:
1094 case Builtin::BIreserve_read_pipe:
1095 case Builtin::BIcommit_read_pipe:
1096 case Builtin::BIwork_group_reserve_read_pipe:
1097 case Builtin::BIsub_group_reserve_read_pipe:
1098 case Builtin::BIwork_group_commit_read_pipe:
1099 case Builtin::BIsub_group_commit_read_pipe:
1100 if (!(!AccessQual || AccessQual->isReadOnly())) {
1101 S.Diag(Arg0->getBeginLoc(),
1102 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1103 << "read_only" << Arg0->getSourceRange();
1104 return true;
1105 }
1106 break;
1107 case Builtin::BIwrite_pipe:
1108 case Builtin::BIreserve_write_pipe:
1109 case Builtin::BIcommit_write_pipe:
1110 case Builtin::BIwork_group_reserve_write_pipe:
1111 case Builtin::BIsub_group_reserve_write_pipe:
1112 case Builtin::BIwork_group_commit_write_pipe:
1113 case Builtin::BIsub_group_commit_write_pipe:
1114 if (!(AccessQual && AccessQual->isWriteOnly())) {
1115 S.Diag(Arg0->getBeginLoc(),
1116 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1117 << "write_only" << Arg0->getSourceRange();
1118 return true;
1119 }
1120 break;
1121 default:
1122 break;
1123 }
1124 return false;
1125}
1126
1127/// Returns true if pipe element type is different from the pointer.
1128static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1129 const Expr *Arg0 = Call->getArg(0);
1130 const Expr *ArgIdx = Call->getArg(Idx);
1131 const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
1132 const QualType EltTy = PipeTy->getElementType();
1133 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1134 // The Idx argument should be a pointer and the type of the pointer and
1135 // the type of pipe element should also be the same.
1136 if (!ArgTy ||
1137 !S.Context.hasSameType(
1138 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1139 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1140 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1141 << ArgIdx->getType() << ArgIdx->getSourceRange();
1142 return true;
1143 }
1144 return false;
1145}
1146
1147// Performs semantic analysis for the read/write_pipe call.
1148// \param S Reference to the semantic analyzer.
1149// \param Call A pointer to the builtin call.
1150// \return True if a semantic error has been found, false otherwise.
1151static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
1152 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1153 // functions have two forms.
1154 switch (Call->getNumArgs()) {
1155 case 2:
1156 if (checkOpenCLPipeArg(S, Call))
1157 return true;
1158 // The call with 2 arguments should be
1159 // read/write_pipe(pipe T, T*).
1160 // Check packet type T.
1161 if (checkOpenCLPipePacketType(S, Call, 1))
1162 return true;
1163 break;
1164
1165 case 4: {
1166 if (checkOpenCLPipeArg(S, Call))
1167 return true;
1168 // The call with 4 arguments should be
1169 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1170 // Check reserve_id_t.
1171 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1172 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1173 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1174 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1175 return true;
1176 }
1177
1178 // Check the index.
1179 const Expr *Arg2 = Call->getArg(2);
1180 if (!Arg2->getType()->isIntegerType() &&
1181 !Arg2->getType()->isUnsignedIntegerType()) {
1182 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1183 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1184 << Arg2->getType() << Arg2->getSourceRange();
1185 return true;
1186 }
1187
1188 // Check packet type T.
1189 if (checkOpenCLPipePacketType(S, Call, 3))
1190 return true;
1191 } break;
1192 default:
1193 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1194 << Call->getDirectCallee() << Call->getSourceRange();
1195 return true;
1196 }
1197
1198 return false;
1199}
1200
1201// Performs a semantic analysis on the {work_group_/sub_group_
1202// /_}reserve_{read/write}_pipe
1203// \param S Reference to the semantic analyzer.
1204// \param Call The call to the builtin function to be analyzed.
1205// \return True if a semantic error was found, false otherwise.
1206static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
1207 if (checkArgCount(S, Call, 2))
1208 return true;
1209
1210 if (checkOpenCLPipeArg(S, Call))
1211 return true;
1212
1213 // Check the reserve size.
1214 if (!Call->getArg(1)->getType()->isIntegerType() &&
1215 !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
1216 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1217 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1218 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1219 return true;
1220 }
1221
1222 // Since return type of reserve_read/write_pipe built-in function is
1223 // reserve_id_t, which is not defined in the builtin def file , we used int
1224 // as return type and need to override the return type of these functions.
1225 Call->setType(S.Context.OCLReserveIDTy);
1226
1227 return false;
1228}
1229
1230// Performs a semantic analysis on {work_group_/sub_group_
1231// /_}commit_{read/write}_pipe
1232// \param S Reference to the semantic analyzer.
1233// \param Call The call to the builtin function to be analyzed.
1234// \return True if a semantic error was found, false otherwise.
1235static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1236 if (checkArgCount(S, Call, 2))
1237 return true;
1238
1239 if (checkOpenCLPipeArg(S, Call))
1240 return true;
1241
1242 // Check reserve_id_t.
1243 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1244 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1245 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1246 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1247 return true;
1248 }
1249
1250 return false;
1251}
1252
1253// Performs a semantic analysis on the call to built-in Pipe
1254// Query Functions.
1255// \param S Reference to the semantic analyzer.
1256// \param Call The call to the builtin function to be analyzed.
1257// \return True if a semantic error was found, false otherwise.
1258static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
1259 if (checkArgCount(S, Call, 1))
1260 return true;
1261
1262 if (!Call->getArg(0)->getType()->isPipeType()) {
1263 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1264 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1265 return true;
1266 }
1267
1268 return false;
1269}
1270
1271// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1272// Performs semantic analysis for the to_global/local/private call.
1273// \param S Reference to the semantic analyzer.
1274// \param BuiltinID ID of the builtin function.
1275// \param Call A pointer to the builtin call.
1276// \return True if a semantic error has been found, false otherwise.
1277static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
1278 CallExpr *Call) {
1279 if (checkArgCount(S, Call, 1))
1280 return true;
1281
1282 auto RT = Call->getArg(0)->getType();
1283 if (!RT->isPointerType() || RT->getPointeeType()
1284 .getAddressSpace() == LangAS::opencl_constant) {
1285 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1286 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1287 return true;
1288 }
1289
1290 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1291 S.Diag(Call->getArg(0)->getBeginLoc(),
1292 diag::warn_opencl_generic_address_space_arg)
1293 << Call->getDirectCallee()->getNameInfo().getAsString()
1294 << Call->getArg(0)->getSourceRange();
1295 }
1296
1297 RT = RT->getPointeeType();
1298 auto Qual = RT.getQualifiers();
1299 switch (BuiltinID) {
1300 case Builtin::BIto_global:
1301 Qual.setAddressSpace(LangAS::opencl_global);
1302 break;
1303 case Builtin::BIto_local:
1304 Qual.setAddressSpace(LangAS::opencl_local);
1305 break;
1306 case Builtin::BIto_private:
1307 Qual.setAddressSpace(LangAS::opencl_private);
1308 break;
1309 default:
1310 llvm_unreachable("Invalid builtin function")__builtin_unreachable();
1311 }
1312 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
1313 RT.getUnqualifiedType(), Qual)));
1314
1315 return false;
1316}
1317
1318static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
1319 if (checkArgCount(S, TheCall, 1))
1320 return ExprError();
1321
1322 // Compute __builtin_launder's parameter type from the argument.
1323 // The parameter type is:
1324 // * The type of the argument if it's not an array or function type,
1325 // Otherwise,
1326 // * The decayed argument type.
1327 QualType ParamTy = [&]() {
1328 QualType ArgTy = TheCall->getArg(0)->getType();
1329 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1330 return S.Context.getPointerType(Ty->getElementType());
1331 if (ArgTy->isFunctionType()) {
1332 return S.Context.getPointerType(ArgTy);
1333 }
1334 return ArgTy;
1335 }();
1336
1337 TheCall->setType(ParamTy);
1338
1339 auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
1340 if (!ParamTy->isPointerType())
1341 return 0;
1342 if (ParamTy->isFunctionPointerType())
1343 return 1;
1344 if (ParamTy->isVoidPointerType())
1345 return 2;
1346 return llvm::Optional<unsigned>{};
1347 }();
1348 if (DiagSelect.hasValue()) {
1349 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
1350 << DiagSelect.getValue() << TheCall->getSourceRange();
1351 return ExprError();
1352 }
1353
1354 // We either have an incomplete class type, or we have a class template
1355 // whose instantiation has not been forced. Example:
1356 //
1357 // template <class T> struct Foo { T value; };
1358 // Foo<int> *p = nullptr;
1359 // auto *d = __builtin_launder(p);
1360 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
1361 diag::err_incomplete_type))
1362 return ExprError();
1363
1364 assert(ParamTy->getPointeeType()->isObjectType() &&((void)0)
1365 "Unhandled non-object pointer case")((void)0);
1366
1367 InitializedEntity Entity =
1368 InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
1369 ExprResult Arg =
1370 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
1371 if (Arg.isInvalid())
1372 return ExprError();
1373 TheCall->setArg(0, Arg.get());
1374
1375 return TheCall;
1376}
1377
1378// Emit an error and return true if the current architecture is not in the list
1379// of supported architectures.
1380static bool
1381CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1382 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
1383 llvm::Triple::ArchType CurArch =
1384 S.getASTContext().getTargetInfo().getTriple().getArch();
1385 if (llvm::is_contained(SupportedArchs, CurArch))
1386 return false;
1387 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1388 << TheCall->getSourceRange();
1389 return true;
1390}
1391
1392static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
1393 SourceLocation CallSiteLoc);
1394
1395bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
1396 CallExpr *TheCall) {
1397 switch (TI.getTriple().getArch()) {
1398 default:
1399 // Some builtins don't require additional checking, so just consider these
1400 // acceptable.
1401 return false;
1402 case llvm::Triple::arm:
1403 case llvm::Triple::armeb:
1404 case llvm::Triple::thumb:
1405 case llvm::Triple::thumbeb:
1406 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
1407 case llvm::Triple::aarch64:
1408 case llvm::Triple::aarch64_32:
1409 case llvm::Triple::aarch64_be:
1410 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
1411 case llvm::Triple::bpfeb:
1412 case llvm::Triple::bpfel:
1413 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
1414 case llvm::Triple::hexagon:
1415 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
1416 case llvm::Triple::mips:
1417 case llvm::Triple::mipsel:
1418 case llvm::Triple::mips64:
1419 case llvm::Triple::mips64el:
1420 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
1421 case llvm::Triple::systemz:
1422 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
1423 case llvm::Triple::x86:
1424 case llvm::Triple::x86_64:
1425 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
1426 case llvm::Triple::ppc:
1427 case llvm::Triple::ppcle:
1428 case llvm::Triple::ppc64:
1429 case llvm::Triple::ppc64le:
1430 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
1431 case llvm::Triple::amdgcn:
1432 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
1433 case llvm::Triple::riscv32:
1434 case llvm::Triple::riscv64:
1435 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
1436 }
1437}
1438
1439ExprResult
1440Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
1441 CallExpr *TheCall) {
1442 ExprResult TheCallResult(TheCall);
1443
1444 // Find out if any arguments are required to be integer constant expressions.
1445 unsigned ICEArguments = 0;
1446 ASTContext::GetBuiltinTypeError Error;
1447 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
1448 if (Error != ASTContext::GE_None)
1449 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
1450
1451 // If any arguments are required to be ICE's, check and diagnose.
1452 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
1453 // Skip arguments not required to be ICE's.
1454 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
1455
1456 llvm::APSInt Result;
1457 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
1458 return true;
1459 ICEArguments &= ~(1 << ArgNo);
1460 }
1461
1462 switch (BuiltinID) {
1463 case Builtin::BI__builtin___CFStringMakeConstantString:
1464 assert(TheCall->getNumArgs() == 1 &&((void)0)
1465 "Wrong # arguments to builtin CFStringMakeConstantString")((void)0);
1466 if (CheckObjCString(TheCall->getArg(0)))
1467 return ExprError();
1468 break;
1469 case Builtin::BI__builtin_ms_va_start:
1470 case Builtin::BI__builtin_stdarg_start:
1471 case Builtin::BI__builtin_va_start:
1472 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1473 return ExprError();
1474 break;
1475 case Builtin::BI__va_start: {
1476 switch (Context.getTargetInfo().getTriple().getArch()) {
1477 case llvm::Triple::aarch64:
1478 case llvm::Triple::arm:
1479 case llvm::Triple::thumb:
1480 if (SemaBuiltinVAStartARMMicrosoft(TheCall))
1481 return ExprError();
1482 break;
1483 default:
1484 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1485 return ExprError();
1486 break;
1487 }
1488 break;
1489 }
1490
1491 // The acquire, release, and no fence variants are ARM and AArch64 only.
1492 case Builtin::BI_interlockedbittestandset_acq:
1493 case Builtin::BI_interlockedbittestandset_rel:
1494 case Builtin::BI_interlockedbittestandset_nf:
1495 case Builtin::BI_interlockedbittestandreset_acq:
1496 case Builtin::BI_interlockedbittestandreset_rel:
1497 case Builtin::BI_interlockedbittestandreset_nf:
1498 if (CheckBuiltinTargetSupport(
1499 *this, BuiltinID, TheCall,
1500 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
1501 return ExprError();
1502 break;
1503
1504 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
1505 case Builtin::BI_bittest64:
1506 case Builtin::BI_bittestandcomplement64:
1507 case Builtin::BI_bittestandreset64:
1508 case Builtin::BI_bittestandset64:
1509 case Builtin::BI_interlockedbittestandreset64:
1510 case Builtin::BI_interlockedbittestandset64:
1511 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
1512 {llvm::Triple::x86_64, llvm::Triple::arm,
1513 llvm::Triple::thumb, llvm::Triple::aarch64}))
1514 return ExprError();
1515 break;
1516
1517 case Builtin::BI__builtin_isgreater:
1518 case Builtin::BI__builtin_isgreaterequal:
1519 case Builtin::BI__builtin_isless:
1520 case Builtin::BI__builtin_islessequal:
1521 case Builtin::BI__builtin_islessgreater:
1522 case Builtin::BI__builtin_isunordered:
1523 if (SemaBuiltinUnorderedCompare(TheCall))
1524 return ExprError();
1525 break;
1526 case Builtin::BI__builtin_fpclassify:
1527 if (SemaBuiltinFPClassification(TheCall, 6))
1528 return ExprError();
1529 break;
1530 case Builtin::BI__builtin_isfinite:
1531 case Builtin::BI__builtin_isinf:
1532 case Builtin::BI__builtin_isinf_sign:
1533 case Builtin::BI__builtin_isnan:
1534 case Builtin::BI__builtin_isnormal:
1535 case Builtin::BI__builtin_signbit:
1536 case Builtin::BI__builtin_signbitf:
1537 case Builtin::BI__builtin_signbitl:
1538 if (SemaBuiltinFPClassification(TheCall, 1))
1539 return ExprError();
1540 break;
1541 case Builtin::BI__builtin_shufflevector:
1542 return SemaBuiltinShuffleVector(TheCall);
1543 // TheCall will be freed by the smart pointer here, but that's fine, since
1544 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
1545 case Builtin::BI__builtin_prefetch:
1546 if (SemaBuiltinPrefetch(TheCall))
1547 return ExprError();
1548 break;
1549 case Builtin::BI__builtin_alloca_with_align:
1550 if (SemaBuiltinAllocaWithAlign(TheCall))
1551 return ExprError();
1552 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1553 case Builtin::BI__builtin_alloca:
1554 Diag(TheCall->getBeginLoc(), diag::warn_alloca)
1555 << TheCall->getDirectCallee();
1556 break;
1557 case Builtin::BI__arithmetic_fence:
1558 if (SemaBuiltinArithmeticFence(TheCall))
1559 return ExprError();
1560 break;
1561 case Builtin::BI__assume:
1562 case Builtin::BI__builtin_assume:
1563 if (SemaBuiltinAssume(TheCall))
1564 return ExprError();
1565 break;
1566 case Builtin::BI__builtin_assume_aligned:
1567 if (SemaBuiltinAssumeAligned(TheCall))
1568 return ExprError();
1569 break;
1570 case Builtin::BI__builtin_dynamic_object_size:
1571 case Builtin::BI__builtin_object_size:
1572 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
1573 return ExprError();
1574 break;
1575 case Builtin::BI__builtin_longjmp:
1576 if (SemaBuiltinLongjmp(TheCall))
1577 return ExprError();
1578 break;
1579 case Builtin::BI__builtin_setjmp:
1580 if (SemaBuiltinSetjmp(TheCall))
1581 return ExprError();
1582 break;
1583 case Builtin::BI__builtin_classify_type:
1584 if (checkArgCount(*this, TheCall, 1)) return true;
1585 TheCall->setType(Context.IntTy);
1586 break;
1587 case Builtin::BI__builtin_complex:
1588 if (SemaBuiltinComplex(TheCall))
1589 return ExprError();
1590 break;
1591 case Builtin::BI__builtin_constant_p: {
1592 if (checkArgCount(*this, TheCall, 1)) return true;
1593 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
1594 if (Arg.isInvalid()) return true;
1595 TheCall->setArg(0, Arg.get());
1596 TheCall->setType(Context.IntTy);
1597 break;
1598 }
1599 case Builtin::BI__builtin_launder:
1600 return SemaBuiltinLaunder(*this, TheCall);
1601 case Builtin::BI__sync_fetch_and_add:
1602 case Builtin::BI__sync_fetch_and_add_1:
1603 case Builtin::BI__sync_fetch_and_add_2:
1604 case Builtin::BI__sync_fetch_and_add_4:
1605 case Builtin::BI__sync_fetch_and_add_8:
1606 case Builtin::BI__sync_fetch_and_add_16:
1607 case Builtin::BI__sync_fetch_and_sub:
1608 case Builtin::BI__sync_fetch_and_sub_1:
1609 case Builtin::BI__sync_fetch_and_sub_2:
1610 case Builtin::BI__sync_fetch_and_sub_4:
1611 case Builtin::BI__sync_fetch_and_sub_8:
1612 case Builtin::BI__sync_fetch_and_sub_16:
1613 case Builtin::BI__sync_fetch_and_or:
1614 case Builtin::BI__sync_fetch_and_or_1:
1615 case Builtin::BI__sync_fetch_and_or_2:
1616 case Builtin::BI__sync_fetch_and_or_4:
1617 case Builtin::BI__sync_fetch_and_or_8:
1618 case Builtin::BI__sync_fetch_and_or_16:
1619 case Builtin::BI__sync_fetch_and_and:
1620 case Builtin::BI__sync_fetch_and_and_1:
1621 case Builtin::BI__sync_fetch_and_and_2:
1622 case Builtin::BI__sync_fetch_and_and_4:
1623 case Builtin::BI__sync_fetch_and_and_8:
1624 case Builtin::BI__sync_fetch_and_and_16:
1625 case Builtin::BI__sync_fetch_and_xor:
1626 case Builtin::BI__sync_fetch_and_xor_1:
1627 case Builtin::BI__sync_fetch_and_xor_2:
1628 case Builtin::BI__sync_fetch_and_xor_4:
1629 case Builtin::BI__sync_fetch_and_xor_8:
1630 case Builtin::BI__sync_fetch_and_xor_16:
1631 case Builtin::BI__sync_fetch_and_nand:
1632 case Builtin::BI__sync_fetch_and_nand_1:
1633 case Builtin::BI__sync_fetch_and_nand_2:
1634 case Builtin::BI__sync_fetch_and_nand_4:
1635 case Builtin::BI__sync_fetch_and_nand_8:
1636 case Builtin::BI__sync_fetch_and_nand_16:
1637 case Builtin::BI__sync_add_and_fetch:
1638 case Builtin::BI__sync_add_and_fetch_1:
1639 case Builtin::BI__sync_add_and_fetch_2:
1640 case Builtin::BI__sync_add_and_fetch_4:
1641 case Builtin::BI__sync_add_and_fetch_8:
1642 case Builtin::BI__sync_add_and_fetch_16:
1643 case Builtin::BI__sync_sub_and_fetch:
1644 case Builtin::BI__sync_sub_and_fetch_1:
1645 case Builtin::BI__sync_sub_and_fetch_2:
1646 case Builtin::BI__sync_sub_and_fetch_4:
1647 case Builtin::BI__sync_sub_and_fetch_8:
1648 case Builtin::BI__sync_sub_and_fetch_16:
1649 case Builtin::BI__sync_and_and_fetch:
1650 case Builtin::BI__sync_and_and_fetch_1:
1651 case Builtin::BI__sync_and_and_fetch_2:
1652 case Builtin::BI__sync_and_and_fetch_4:
1653 case Builtin::BI__sync_and_and_fetch_8:
1654 case Builtin::BI__sync_and_and_fetch_16:
1655 case Builtin::BI__sync_or_and_fetch:
1656 case Builtin::BI__sync_or_and_fetch_1:
1657 case Builtin::BI__sync_or_and_fetch_2:
1658 case Builtin::BI__sync_or_and_fetch_4:
1659 case Builtin::BI__sync_or_and_fetch_8:
1660 case Builtin::BI__sync_or_and_fetch_16:
1661 case Builtin::BI__sync_xor_and_fetch:
1662 case Builtin::BI__sync_xor_and_fetch_1:
1663 case Builtin::BI__sync_xor_and_fetch_2:
1664 case Builtin::BI__sync_xor_and_fetch_4:
1665 case Builtin::BI__sync_xor_and_fetch_8:
1666 case Builtin::BI__sync_xor_and_fetch_16:
1667 case Builtin::BI__sync_nand_and_fetch:
1668 case Builtin::BI__sync_nand_and_fetch_1:
1669 case Builtin::BI__sync_nand_and_fetch_2:
1670 case Builtin::BI__sync_nand_and_fetch_4:
1671 case Builtin::BI__sync_nand_and_fetch_8:
1672 case Builtin::BI__sync_nand_and_fetch_16:
1673 case Builtin::BI__sync_val_compare_and_swap:
1674 case Builtin::BI__sync_val_compare_and_swap_1:
1675 case Builtin::BI__sync_val_compare_and_swap_2:
1676 case Builtin::BI__sync_val_compare_and_swap_4:
1677 case Builtin::BI__sync_val_compare_and_swap_8:
1678 case Builtin::BI__sync_val_compare_and_swap_16:
1679 case Builtin::BI__sync_bool_compare_and_swap:
1680 case Builtin::BI__sync_bool_compare_and_swap_1:
1681 case Builtin::BI__sync_bool_compare_and_swap_2:
1682 case Builtin::BI__sync_bool_compare_and_swap_4:
1683 case Builtin::BI__sync_bool_compare_and_swap_8:
1684 case Builtin::BI__sync_bool_compare_and_swap_16:
1685 case Builtin::BI__sync_lock_test_and_set:
1686 case Builtin::BI__sync_lock_test_and_set_1:
1687 case Builtin::BI__sync_lock_test_and_set_2:
1688 case Builtin::BI__sync_lock_test_and_set_4:
1689 case Builtin::BI__sync_lock_test_and_set_8:
1690 case Builtin::BI__sync_lock_test_and_set_16:
1691 case Builtin::BI__sync_lock_release:
1692 case Builtin::BI__sync_lock_release_1:
1693 case Builtin::BI__sync_lock_release_2:
1694 case Builtin::BI__sync_lock_release_4:
1695 case Builtin::BI__sync_lock_release_8:
1696 case Builtin::BI__sync_lock_release_16:
1697 case Builtin::BI__sync_swap:
1698 case Builtin::BI__sync_swap_1:
1699 case Builtin::BI__sync_swap_2:
1700 case Builtin::BI__sync_swap_4:
1701 case Builtin::BI__sync_swap_8:
1702 case Builtin::BI__sync_swap_16:
1703 return SemaBuiltinAtomicOverloaded(TheCallResult);
1704 case Builtin::BI__sync_synchronize:
1705 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
1706 << TheCall->getCallee()->getSourceRange();
1707 break;
1708 case Builtin::BI__builtin_nontemporal_load:
1709 case Builtin::BI__builtin_nontemporal_store:
1710 return SemaBuiltinNontemporalOverloaded(TheCallResult);
1711 case Builtin::BI__builtin_memcpy_inline: {
1712 clang::Expr *SizeOp = TheCall->getArg(2);
1713 // We warn about copying to or from `nullptr` pointers when `size` is
1714 // greater than 0. When `size` is value dependent we cannot evaluate its
1715 // value so we bail out.
1716 if (SizeOp->isValueDependent())
1717 break;
1718 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) {
1719 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
1720 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
1721 }
1722 break;
1723 }
1724#define BUILTIN(ID, TYPE, ATTRS)
1725#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
1726 case Builtin::BI##ID: \
1727 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
1728#include "clang/Basic/Builtins.def"
1729 case Builtin::BI__annotation:
1730 if (SemaBuiltinMSVCAnnotation(*this, TheCall))
1731 return ExprError();
1732 break;
1733 case Builtin::BI__builtin_annotation:
1734 if (SemaBuiltinAnnotation(*this, TheCall))
1735 return ExprError();
1736 break;
1737 case Builtin::BI__builtin_addressof:
1738 if (SemaBuiltinAddressof(*this, TheCall))
1739 return ExprError();
1740 break;
1741 case Builtin::BI__builtin_is_aligned:
1742 case Builtin::BI__builtin_align_up:
1743 case Builtin::BI__builtin_align_down:
1744 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
1745 return ExprError();
1746 break;
1747 case Builtin::BI__builtin_add_overflow:
1748 case Builtin::BI__builtin_sub_overflow:
1749 case Builtin::BI__builtin_mul_overflow:
1750 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
1751 return ExprError();
1752 break;
1753 case Builtin::BI__builtin_operator_new:
1754 case Builtin::BI__builtin_operator_delete: {
1755 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
1756 ExprResult Res =
1757 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
1758 if (Res.isInvalid())
1759 CorrectDelayedTyposInExpr(TheCallResult.get());
1760 return Res;
1761 }
1762 case Builtin::BI__builtin_dump_struct: {
1763 // We first want to ensure we are called with 2 arguments
1764 if (checkArgCount(*this, TheCall, 2))
1765 return ExprError();
1766 // Ensure that the first argument is of type 'struct XX *'
1767 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
1768 const QualType PtrArgType = PtrArg->getType();
1769 if (!PtrArgType->isPointerType() ||
1770 !PtrArgType->getPointeeType()->isRecordType()) {
1771 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1772 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
1773 << "structure pointer";
1774 return ExprError();
1775 }
1776
1777 // Ensure that the second argument is of type 'FunctionType'
1778 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
1779 const QualType FnPtrArgType = FnPtrArg->getType();
1780 if (!FnPtrArgType->isPointerType()) {
1781 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1782 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1783 << FnPtrArgType << "'int (*)(const char *, ...)'";
1784 return ExprError();
1785 }
1786
1787 const auto *FuncType =
1788 FnPtrArgType->getPointeeType()->getAs<FunctionType>();
1789
1790 if (!FuncType) {
1791 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1792 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1793 << FnPtrArgType << "'int (*)(const char *, ...)'";
1794 return ExprError();
1795 }
1796
1797 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
1798 if (!FT->getNumParams()) {
1799 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1800 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1801 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1802 return ExprError();
1803 }
1804 QualType PT = FT->getParamType(0);
1805 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
1806 !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
1807 !PT->getPointeeType().isConstQualified()) {
1808 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1809 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1810 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1811 return ExprError();
1812 }
1813 }
1814
1815 TheCall->setType(Context.IntTy);
1816 break;
1817 }
1818 case Builtin::BI__builtin_expect_with_probability: {
1819 // We first want to ensure we are called with 3 arguments
1820 if (checkArgCount(*this, TheCall, 3))
1821 return ExprError();
1822 // then check probability is constant float in range [0.0, 1.0]
1823 const Expr *ProbArg = TheCall->getArg(2);
1824 SmallVector<PartialDiagnosticAt, 8> Notes;
1825 Expr::EvalResult Eval;
1826 Eval.Diag = &Notes;
1827 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
1828 !Eval.Val.isFloat()) {
1829 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
1830 << ProbArg->getSourceRange();
1831 for (const PartialDiagnosticAt &PDiag : Notes)
1832 Diag(PDiag.first, PDiag.second);
1833 return ExprError();
1834 }
1835 llvm::APFloat Probability = Eval.Val.getFloat();
1836 bool LoseInfo = false;
1837 Probability.convert(llvm::APFloat::IEEEdouble(),
1838 llvm::RoundingMode::Dynamic, &LoseInfo);
1839 if (!(Probability >= llvm::APFloat(0.0) &&
1840 Probability <= llvm::APFloat(1.0))) {
1841 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
1842 << ProbArg->getSourceRange();
1843 return ExprError();
1844 }
1845 break;
1846 }
1847 case Builtin::BI__builtin_preserve_access_index:
1848 if (SemaBuiltinPreserveAI(*this, TheCall))
1849 return ExprError();
1850 break;
1851 case Builtin::BI__builtin_call_with_static_chain:
1852 if (SemaBuiltinCallWithStaticChain(*this, TheCall))
1853 return ExprError();
1854 break;
1855 case Builtin::BI__exception_code:
1856 case Builtin::BI_exception_code:
1857 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
1858 diag::err_seh___except_block))
1859 return ExprError();
1860 break;
1861 case Builtin::BI__exception_info:
1862 case Builtin::BI_exception_info:
1863 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
1864 diag::err_seh___except_filter))
1865 return ExprError();
1866 break;
1867 case Builtin::BI__GetExceptionInfo:
1868 if (checkArgCount(*this, TheCall, 1))
1869 return ExprError();
1870
1871 if (CheckCXXThrowOperand(
1872 TheCall->getBeginLoc(),
1873 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
1874 TheCall))
1875 return ExprError();
1876
1877 TheCall->setType(Context.VoidPtrTy);
1878 break;
1879 // OpenCL v2.0, s6.13.16 - Pipe functions
1880 case Builtin::BIread_pipe:
1881 case Builtin::BIwrite_pipe:
1882 // Since those two functions are declared with var args, we need a semantic
1883 // check for the argument.
1884 if (SemaBuiltinRWPipe(*this, TheCall))
1885 return ExprError();
1886 break;
1887 case Builtin::BIreserve_read_pipe:
1888 case Builtin::BIreserve_write_pipe:
1889 case Builtin::BIwork_group_reserve_read_pipe:
1890 case Builtin::BIwork_group_reserve_write_pipe:
1891 if (SemaBuiltinReserveRWPipe(*this, TheCall))
1892 return ExprError();
1893 break;
1894 case Builtin::BIsub_group_reserve_read_pipe:
1895 case Builtin::BIsub_group_reserve_write_pipe:
1896 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1897 SemaBuiltinReserveRWPipe(*this, TheCall))
1898 return ExprError();
1899 break;
1900 case Builtin::BIcommit_read_pipe:
1901 case Builtin::BIcommit_write_pipe:
1902 case Builtin::BIwork_group_commit_read_pipe:
1903 case Builtin::BIwork_group_commit_write_pipe:
1904 if (SemaBuiltinCommitRWPipe(*this, TheCall))
1905 return ExprError();
1906 break;
1907 case Builtin::BIsub_group_commit_read_pipe:
1908 case Builtin::BIsub_group_commit_write_pipe:
1909 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1910 SemaBuiltinCommitRWPipe(*this, TheCall))
1911 return ExprError();
1912 break;
1913 case Builtin::BIget_pipe_num_packets:
1914 case Builtin::BIget_pipe_max_packets:
1915 if (SemaBuiltinPipePackets(*this, TheCall))
1916 return ExprError();
1917 break;
1918 case Builtin::BIto_global:
1919 case Builtin::BIto_local:
1920 case Builtin::BIto_private:
1921 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
1922 return ExprError();
1923 break;
1924 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
1925 case Builtin::BIenqueue_kernel:
1926 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
1927 return ExprError();
1928 break;
1929 case Builtin::BIget_kernel_work_group_size:
1930 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1931 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
1932 return ExprError();
1933 break;
1934 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1935 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1936 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
1937 return ExprError();
1938 break;
1939 case Builtin::BI__builtin_os_log_format:
1940 Cleanup.setExprNeedsCleanups(true);
1941 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1942 case Builtin::BI__builtin_os_log_format_buffer_size:
1943 if (SemaBuiltinOSLogFormat(TheCall))
1944 return ExprError();
1945 break;
1946 case Builtin::BI__builtin_frame_address:
1947 case Builtin::BI__builtin_return_address: {
1948 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
1949 return ExprError();
1950
1951 // -Wframe-address warning if non-zero passed to builtin
1952 // return/frame address.
1953 Expr::EvalResult Result;
1954 if (!TheCall->getArg(0)->isValueDependent() &&
1955 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
1956 Result.Val.getInt() != 0)
1957 Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
1958 << ((BuiltinID == Builtin::BI__builtin_return_address)
1959 ? "__builtin_return_address"
1960 : "__builtin_frame_address")
1961 << TheCall->getSourceRange();
1962 break;
1963 }
1964
1965 case Builtin::BI__builtin_matrix_transpose:
1966 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
1967
1968 case Builtin::BI__builtin_matrix_column_major_load:
1969 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
1970
1971 case Builtin::BI__builtin_matrix_column_major_store:
1972 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
1973
1974 case Builtin::BI__builtin_get_device_side_mangled_name: {
1975 auto Check = [](CallExpr *TheCall) {
1976 if (TheCall->getNumArgs() != 1)
1977 return false;
1978 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
1979 if (!DRE)
1980 return false;
1981 auto *D = DRE->getDecl();
1982 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
1983 return false;
1984 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
1985 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
1986 };
1987 if (!Check(TheCall)) {
1988 Diag(TheCall->getBeginLoc(),
1989 diag::err_hip_invalid_args_builtin_mangled_name);
1990 return ExprError();
1991 }
1992 }
1993 }
1994
1995 // Since the target specific builtins for each arch overlap, only check those
1996 // of the arch we are compiling for.
1997 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
1998 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
1999 assert(Context.getAuxTargetInfo() &&((void)0)
2000 "Aux Target Builtin, but not an aux target?")((void)0);
2001
2002 if (CheckTSBuiltinFunctionCall(
2003 *Context.getAuxTargetInfo(),
2004 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
2005 return ExprError();
2006 } else {
2007 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
2008 TheCall))
2009 return ExprError();
2010 }
2011 }
2012
2013 return TheCallResult;
2014}
2015
2016// Get the valid immediate range for the specified NEON type code.
2017static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
2018 NeonTypeFlags Type(t);
2019 int IsQuad = ForceQuad ? true : Type.isQuad();
2020 switch (Type.getEltType()) {
2021 case NeonTypeFlags::Int8:
2022 case NeonTypeFlags::Poly8:
2023 return shift ? 7 : (8 << IsQuad) - 1;
2024 case NeonTypeFlags::Int16:
2025 case NeonTypeFlags::Poly16:
2026 return shift ? 15 : (4 << IsQuad) - 1;
2027 case NeonTypeFlags::Int32:
2028 return shift ? 31 : (2 << IsQuad) - 1;
2029 case NeonTypeFlags::Int64:
2030 case NeonTypeFlags::Poly64:
2031 return shift ? 63 : (1 << IsQuad) - 1;
2032 case NeonTypeFlags::Poly128:
2033 return shift ? 127 : (1 << IsQuad) - 1;
2034 case NeonTypeFlags::Float16:
2035 assert(!shift && "cannot shift float types!")((void)0);
2036 return (4 << IsQuad) - 1;
2037 case NeonTypeFlags::Float32:
2038 assert(!shift && "cannot shift float types!")((void)0);
2039 return (2 << IsQuad) - 1;
2040 case NeonTypeFlags::Float64:
2041 assert(!shift && "cannot shift float types!")((void)0);
2042 return (1 << IsQuad) - 1;
2043 case NeonTypeFlags::BFloat16:
2044 assert(!shift && "cannot shift float types!")((void)0);
2045 return (4 << IsQuad) - 1;
2046 }
2047 llvm_unreachable("Invalid NeonTypeFlag!")__builtin_unreachable();
2048}
2049
2050/// getNeonEltType - Return the QualType corresponding to the elements of
2051/// the vector type specified by the NeonTypeFlags. This is used to check
2052/// the pointer arguments for Neon load/store intrinsics.
2053static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
2054 bool IsPolyUnsigned, bool IsInt64Long) {
2055 switch (Flags.getEltType()) {
2056 case NeonTypeFlags::Int8:
2057 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
2058 case NeonTypeFlags::Int16:
2059 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
2060 case NeonTypeFlags::Int32:
2061 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
2062 case NeonTypeFlags::Int64:
2063 if (IsInt64Long)
2064 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
2065 else
2066 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
2067 : Context.LongLongTy;
2068 case NeonTypeFlags::Poly8:
2069 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
2070 case NeonTypeFlags::Poly16:
2071 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
2072 case NeonTypeFlags::Poly64:
2073 if (IsInt64Long)
2074 return Context.UnsignedLongTy;
2075 else
2076 return Context.UnsignedLongLongTy;
2077 case NeonTypeFlags::Poly128:
2078 break;
2079 case NeonTypeFlags::Float16:
2080 return Context.HalfTy;
2081 case NeonTypeFlags::Float32:
2082 return Context.FloatTy;
2083 case NeonTypeFlags::Float64:
2084 return Context.DoubleTy;
2085 case NeonTypeFlags::BFloat16:
2086 return Context.BFloat16Ty;
2087 }
2088 llvm_unreachable("Invalid NeonTypeFlag!")__builtin_unreachable();
2089}
2090
2091bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2092 // Range check SVE intrinsics that take immediate values.
2093 SmallVector<std::tuple<int,int,int>, 3> ImmChecks;
2094
2095 switch (BuiltinID) {
2096 default:
2097 return false;
2098#define GET_SVE_IMMEDIATE_CHECK
2099#include "clang/Basic/arm_sve_sema_rangechecks.inc"
2100#undef GET_SVE_IMMEDIATE_CHECK
2101 }
2102
2103 // Perform all the immediate checks for this builtin call.
2104 bool HasError = false;
2105 for (auto &I : ImmChecks) {
2106 int ArgNum, CheckTy, ElementSizeInBits;
2107 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
2108
2109 typedef bool(*OptionSetCheckFnTy)(int64_t Value);
2110
2111 // Function that checks whether the operand (ArgNum) is an immediate
2112 // that is one of the predefined values.
2113 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
2114 int ErrDiag) -> bool {
2115 // We can't check the value of a dependent argument.
2116 Expr *Arg = TheCall->getArg(ArgNum);
2117 if (Arg->isTypeDependent() || Arg->isValueDependent())
2118 return false;
2119
2120 // Check constant-ness first.
2121 llvm::APSInt Imm;
2122 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
2123 return true;
2124
2125 if (!CheckImm(Imm.getSExtValue()))
2126 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
2127 return false;
2128 };
2129
2130 switch ((SVETypeFlags::ImmCheckType)CheckTy) {
2131 case SVETypeFlags::ImmCheck0_31:
2132 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
2133 HasError = true;
2134 break;
2135 case SVETypeFlags::ImmCheck0_13:
2136 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
2137 HasError = true;
2138 break;
2139 case SVETypeFlags::ImmCheck1_16:
2140 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
2141 HasError = true;
2142 break;
2143 case SVETypeFlags::ImmCheck0_7:
2144 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
2145 HasError = true;
2146 break;
2147 case SVETypeFlags::ImmCheckExtract:
2148 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2149 (2048 / ElementSizeInBits) - 1))
2150 HasError = true;
2151 break;
2152 case SVETypeFlags::ImmCheckShiftRight:
2153 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
2154 HasError = true;
2155 break;
2156 case SVETypeFlags::ImmCheckShiftRightNarrow:
2157 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
2158 ElementSizeInBits / 2))
2159 HasError = true;
2160 break;
2161 case SVETypeFlags::ImmCheckShiftLeft:
2162 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2163 ElementSizeInBits - 1))
2164 HasError = true;
2165 break;
2166 case SVETypeFlags::ImmCheckLaneIndex:
2167 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2168 (128 / (1 * ElementSizeInBits)) - 1))
2169 HasError = true;
2170 break;
2171 case SVETypeFlags::ImmCheckLaneIndexCompRotate:
2172 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2173 (128 / (2 * ElementSizeInBits)) - 1))
2174 HasError = true;
2175 break;
2176 case SVETypeFlags::ImmCheckLaneIndexDot:
2177 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2178 (128 / (4 * ElementSizeInBits)) - 1))
2179 HasError = true;
2180 break;
2181 case SVETypeFlags::ImmCheckComplexRot90_270:
2182 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
2183 diag::err_rotation_argument_to_cadd))
2184 HasError = true;
2185 break;
2186 case SVETypeFlags::ImmCheckComplexRotAll90:
2187 if (CheckImmediateInSet(
2188 [](int64_t V) {
2189 return V == 0 || V == 90 || V == 180 || V == 270;
2190 },
2191 diag::err_rotation_argument_to_cmla))
2192 HasError = true;
2193 break;
2194 case SVETypeFlags::ImmCheck0_1:
2195 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
2196 HasError = true;
2197 break;
2198 case SVETypeFlags::ImmCheck0_2:
2199 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
2200 HasError = true;
2201 break;
2202 case SVETypeFlags::ImmCheck0_3:
2203 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
2204 HasError = true;
2205 break;
2206 }
2207 }
2208
2209 return HasError;
2210}
2211
2212bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
2213 unsigned BuiltinID, CallExpr *TheCall) {
2214 llvm::APSInt Result;
2215 uint64_t mask = 0;
2216 unsigned TV = 0;
2217 int PtrArgNum = -1;
2218 bool HasConstPtr = false;
2219 switch (BuiltinID) {
2220#define GET_NEON_OVERLOAD_CHECK
2221#include "clang/Basic/arm_neon.inc"
2222#include "clang/Basic/arm_fp16.inc"
2223#undef GET_NEON_OVERLOAD_CHECK
2224 }
2225
2226 // For NEON intrinsics which are overloaded on vector element type, validate
2227 // the immediate which specifies which variant to emit.
2228 unsigned ImmArg = TheCall->getNumArgs()-1;
2229 if (mask) {
2230 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
2231 return true;
2232
2233 TV = Result.getLimitedValue(64);
2234 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
2235 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
2236 << TheCall->getArg(ImmArg)->getSourceRange();
2237 }
2238
2239 if (PtrArgNum >= 0) {
2240 // Check that pointer arguments have the specified type.
2241 Expr *Arg = TheCall->getArg(PtrArgNum);
2242 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
2243 Arg = ICE->getSubExpr();
2244 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
2245 QualType RHSTy = RHS.get()->getType();
2246
2247 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
2248 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
2249 Arch == llvm::Triple::aarch64_32 ||
2250 Arch == llvm::Triple::aarch64_be;
2251 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
2252 QualType EltTy =
2253 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
2254 if (HasConstPtr)
2255 EltTy = EltTy.withConst();
2256 QualType LHSTy = Context.getPointerType(EltTy);
2257 AssignConvertType ConvTy;
2258 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
2259 if (RHS.isInvalid())
2260 return true;
2261 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
2262 RHS.get(), AA_Assigning))
2263 return true;
2264 }
2265
2266 // For NEON intrinsics which take an immediate value as part of the
2267 // instruction, range check them here.
2268 unsigned i = 0, l = 0, u = 0;
2269 switch (BuiltinID) {
2270 default:
2271 return false;
2272 #define GET_NEON_IMMEDIATE_CHECK
2273 #include "clang/Basic/arm_neon.inc"
2274 #include "clang/Basic/arm_fp16.inc"
2275 #undef GET_NEON_IMMEDIATE_CHECK
2276 }
2277
2278 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2279}
2280
2281bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2282 switch (BuiltinID) {
2283 default:
2284 return false;
2285 #include "clang/Basic/arm_mve_builtin_sema.inc"
2286 }
2287}
2288
2289bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2290 CallExpr *TheCall) {
2291 bool Err = false;
2292 switch (BuiltinID) {
2293 default:
2294 return false;
2295#include "clang/Basic/arm_cde_builtin_sema.inc"
2296 }
2297
2298 if (Err)
2299 return true;
2300
2301 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
2302}
2303
2304bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
2305 const Expr *CoprocArg, bool WantCDE) {
2306 if (isConstantEvaluated())
2307 return false;
2308
2309 // We can't check the value of a dependent argument.
2310 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
2311 return false;
2312
2313 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
2314 int64_t CoprocNo = CoprocNoAP.getExtValue();
2315 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative")((void)0);
2316
2317 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
2318 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
2319
2320 if (IsCDECoproc != WantCDE)
2321 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
2322 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
2323
2324 return false;
2325}
2326
2327bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
2328 unsigned MaxWidth) {
2329 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||((void)0)
2330 BuiltinID == ARM::BI__builtin_arm_ldaex ||((void)0)
2331 BuiltinID == ARM::BI__builtin_arm_strex ||((void)0)
2332 BuiltinID == ARM::BI__builtin_arm_stlex ||((void)0)
2333 BuiltinID == AArch64::BI__builtin_arm_ldrex ||((void)0)
2334 BuiltinID == AArch64::BI__builtin_arm_ldaex ||((void)0)
2335 BuiltinID == AArch64::BI__builtin_arm_strex ||((void)0)
2336 BuiltinID == AArch64::BI__builtin_arm_stlex) &&((void)0)
2337 "unexpected ARM builtin")((void)0);
2338 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
2339 BuiltinID == ARM::BI__builtin_arm_ldaex ||
2340 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2341 BuiltinID == AArch64::BI__builtin_arm_ldaex;
2342
2343 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
2344
2345 // Ensure that we have the proper number of arguments.
2346 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
2347 return true;
2348
2349 // Inspect the pointer argument of the atomic builtin. This should always be
2350 // a pointer type, whose element is an integral scalar or pointer type.
2351 // Because it is a pointer type, we don't have to worry about any implicit
2352 // casts here.
2353 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
2354 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
2355 if (PointerArgRes.isInvalid())
2356 return true;
2357 PointerArg = PointerArgRes.get();
2358
2359 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
2360 if (!pointerType) {
2361 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
2362 << PointerArg->getType() << PointerArg->getSourceRange();
2363 return true;
2364 }
2365
2366 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
2367 // task is to insert the appropriate casts into the AST. First work out just
2368 // what the appropriate type is.
2369 QualType ValType = pointerType->getPointeeType();
2370 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
2371 if (IsLdrex)
2372 AddrType.addConst();
2373
2374 // Issue a warning if the cast is dodgy.
2375 CastKind CastNeeded = CK_NoOp;
2376 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
2377 CastNeeded = CK_BitCast;
2378 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
2379 << PointerArg->getType() << Context.getPointerType(AddrType)
2380 << AA_Passing << PointerArg->getSourceRange();
2381 }
2382
2383 // Finally, do the cast and replace the argument with the corrected version.
2384 AddrType = Context.getPointerType(AddrType);
2385 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
2386 if (PointerArgRes.isInvalid())
2387 return true;
2388 PointerArg = PointerArgRes.get();
2389
2390 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
2391
2392 // In general, we allow ints, floats and pointers to be loaded and stored.
2393 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
2394 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
2395 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
2396 << PointerArg->getType() << PointerArg->getSourceRange();
2397 return true;
2398 }
2399
2400 // But ARM doesn't have instructions to deal with 128-bit versions.
2401 if (Context.getTypeSize(ValType) > MaxWidth) {
2402 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate")((void)0);
2403 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
2404 << PointerArg->getType() << PointerArg->getSourceRange();
2405 return true;
2406 }
2407
2408 switch (ValType.getObjCLifetime()) {
2409 case Qualifiers::OCL_None:
2410 case Qualifiers::OCL_ExplicitNone:
2411 // okay
2412 break;
2413
2414 case Qualifiers::OCL_Weak:
2415 case Qualifiers::OCL_Strong:
2416 case Qualifiers::OCL_Autoreleasing:
2417 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
2418 << ValType << PointerArg->getSourceRange();
2419 return true;
2420 }
2421
2422 if (IsLdrex) {
2423 TheCall->setType(ValType);
2424 return false;
2425 }
2426
2427 // Initialize the argument to be stored.
2428 ExprResult ValArg = TheCall->getArg(0);
2429 InitializedEntity Entity = InitializedEntity::InitializeParameter(
2430 Context, ValType, /*consume*/ false);
2431 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
2432 if (ValArg.isInvalid())
2433 return true;
2434 TheCall->setArg(0, ValArg.get());
2435
2436 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
2437 // but the custom checker bypasses all default analysis.
2438 TheCall->setType(Context.IntTy);
2439 return false;
2440}
2441
2442bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2443 CallExpr *TheCall) {
2444 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
2445 BuiltinID == ARM::BI__builtin_arm_ldaex ||
2446 BuiltinID == ARM::BI__builtin_arm_strex ||
2447 BuiltinID == ARM::BI__builtin_arm_stlex) {
2448 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
2449 }
2450
2451 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
2452 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2453 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
2454 }
2455
2456 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
2457 BuiltinID == ARM::BI__builtin_arm_wsr64)
2458 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
2459
2460 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
2461 BuiltinID == ARM::BI__builtin_arm_rsrp ||
2462 BuiltinID == ARM::BI__builtin_arm_wsr ||
2463 BuiltinID == ARM::BI__builtin_arm_wsrp)
2464 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2465
2466 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
2467 return true;
2468 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
2469 return true;
2470 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
2471 return true;
2472
2473 // For intrinsics which take an immediate value as part of the instruction,
2474 // range check them here.
2475 // FIXME: VFP Intrinsics should error if VFP not present.
2476 switch (BuiltinID) {
2477 default: return false;
2478 case ARM::BI__builtin_arm_ssat:
2479 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
2480 case ARM::BI__builtin_arm_usat:
2481 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
2482 case ARM::BI__builtin_arm_ssat16:
2483 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
2484 case ARM::BI__builtin_arm_usat16:
2485 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
2486 case ARM::BI__builtin_arm_vcvtr_f:
2487 case ARM::BI__builtin_arm_vcvtr_d:
2488 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
2489 case ARM::BI__builtin_arm_dmb:
2490 case ARM::BI__builtin_arm_dsb:
2491 case ARM::BI__builtin_arm_isb:
2492 case ARM::BI__builtin_arm_dbg:
2493 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
2494 case ARM::BI__builtin_arm_cdp:
2495 case ARM::BI__builtin_arm_cdp2:
2496 case ARM::BI__builtin_arm_mcr:
2497 case ARM::BI__builtin_arm_mcr2:
2498 case ARM::BI__builtin_arm_mrc:
2499 case ARM::BI__builtin_arm_mrc2:
2500 case ARM::BI__builtin_arm_mcrr:
2501 case ARM::BI__builtin_arm_mcrr2:
2502 case ARM::BI__builtin_arm_mrrc:
2503 case ARM::BI__builtin_arm_mrrc2:
2504 case ARM::BI__builtin_arm_ldc:
2505 case ARM::BI__builtin_arm_ldcl:
2506 case ARM::BI__builtin_arm_ldc2:
2507 case ARM::BI__builtin_arm_ldc2l:
2508 case ARM::BI__builtin_arm_stc:
2509 case ARM::BI__builtin_arm_stcl:
2510 case ARM::BI__builtin_arm_stc2:
2511 case ARM::BI__builtin_arm_stc2l:
2512 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
2513 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
2514 /*WantCDE*/ false);
2515 }
2516}
2517
2518bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
2519 unsigned BuiltinID,
2520 CallExpr *TheCall) {
2521 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2522 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
2523 BuiltinID == AArch64::BI__builtin_arm_strex ||
2524 BuiltinID == AArch64::BI__builtin_arm_stlex) {
2525 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
2526 }
2527
2528 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
2529 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2530 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
2531 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
2532 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
2533 }
2534
2535 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
2536 BuiltinID == AArch64::BI__builtin_arm_wsr64)
2537 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2538
2539 // Memory Tagging Extensions (MTE) Intrinsics
2540 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
2541 BuiltinID == AArch64::BI__builtin_arm_addg ||
2542 BuiltinID == AArch64::BI__builtin_arm_gmi ||
2543 BuiltinID == AArch64::BI__builtin_arm_ldg ||
2544 BuiltinID == AArch64::BI__builtin_arm_stg ||
2545 BuiltinID == AArch64::BI__builtin_arm_subp) {
2546 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
2547 }
2548
2549 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
2550 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
2551 BuiltinID == AArch64::BI__builtin_arm_wsr ||
2552 BuiltinID == AArch64::BI__builtin_arm_wsrp)
2553 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2554
2555 // Only check the valid encoding range. Any constant in this range would be
2556 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
2557 // an exception for incorrect registers. This matches MSVC behavior.
2558 if (BuiltinID == AArch64::BI_ReadStatusReg ||
2559 BuiltinID == AArch64::BI_WriteStatusReg)
2560 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
2561
2562 if (BuiltinID == AArch64::BI__getReg)
2563 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
2564
2565 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
2566 return true;
2567
2568 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
2569 return true;
2570
2571 // For intrinsics which take an immediate value as part of the instruction,
2572 // range check them here.
2573 unsigned i = 0, l = 0, u = 0;
2574 switch (BuiltinID) {
2575 default: return false;
2576 case AArch64::BI__builtin_arm_dmb:
2577 case AArch64::BI__builtin_arm_dsb:
2578 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
2579 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
2580 }
2581
2582 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2583}
2584
2585static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
2586 if (Arg->getType()->getAsPlaceholderType())
2587 return false;
2588
2589 // The first argument needs to be a record field access.
2590 // If it is an array element access, we delay decision
2591 // to BPF backend to check whether the access is a
2592 // field access or not.
2593 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
2594 dyn_cast<MemberExpr>(Arg->IgnoreParens()) ||
2595 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()));
2596}
2597
2598static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S,
2599 QualType VectorTy, QualType EltTy) {
2600 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType();
2601 if (!Context.hasSameType(VectorEltTy, EltTy)) {
2602 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types)
2603 << Call->getSourceRange() << VectorEltTy << EltTy;
2604 return false;
2605 }
2606 return true;
2607}
2608
2609static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
2610 QualType ArgType = Arg->getType();
2611 if (ArgType->getAsPlaceholderType())
2612 return false;
2613
2614 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type
2615 // format:
2616 // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
2617 // 2. <type> var;
2618 // __builtin_preserve_type_info(var, flag);
2619 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) &&
2620 !dyn_cast<UnaryOperator>(Arg->IgnoreParens()))
2621 return false;
2622
2623 // Typedef type.
2624 if (ArgType->getAs<TypedefType>())
2625 return true;
2626
2627 // Record type or Enum type.
2628 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
2629 if (const auto *RT = Ty->getAs<RecordType>()) {
2630 if (!RT->getDecl()->getDeclName().isEmpty())
2631 return true;
2632 } else if (const auto *ET = Ty->getAs<EnumType>()) {
2633 if (!ET->getDecl()->getDeclName().isEmpty())
2634 return true;
2635 }
2636
2637 return false;
2638}
2639
2640static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
2641 QualType ArgType = Arg->getType();
2642 if (ArgType->getAsPlaceholderType())
2643 return false;
2644
2645 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
2646 // format:
2647 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
2648 // flag);
2649 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
2650 if (!UO)
2651 return false;
2652
2653 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
2654 if (!CE)
2655 return false;
2656 if (CE->getCastKind() != CK_IntegralToPointer &&
2657 CE->getCastKind() != CK_NullToPointer)
2658 return false;
2659
2660 // The integer must be from an EnumConstantDecl.
2661 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
2662 if (!DR)
2663 return false;
2664
2665 const EnumConstantDecl *Enumerator =
2666 dyn_cast<EnumConstantDecl>(DR->getDecl());
2667 if (!Enumerator)
2668 return false;
2669
2670 // The type must be EnumType.
2671 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
2672 const auto *ET = Ty->getAs<EnumType>();
2673 if (!ET)
2674 return false;
2675
2676 // The enum value must be supported.
2677 for (auto *EDI : ET->getDecl()->enumerators()) {
2678 if (EDI == Enumerator)
2679 return true;
2680 }
2681
2682 return false;
2683}
2684
2685bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
2686 CallExpr *TheCall) {
2687 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||((void)0)
2688 BuiltinID == BPF::BI__builtin_btf_type_id ||((void)0)
2689 BuiltinID == BPF::BI__builtin_preserve_type_info ||((void)0)
2690 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&((void)0)
2691 "unexpected BPF builtin")((void)0);
2692
2693 if (checkArgCount(*this, TheCall, 2))
2694 return true;
2695
2696 // The second argument needs to be a constant int
2697 Expr *Arg = TheCall->getArg(1);
2698 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
2699 diag::kind kind;
2700 if (!Value) {
2701 if (BuiltinID == BPF::BI__builtin_preserve_field_info)
2702 kind = diag::err_preserve_field_info_not_const;
2703 else if (BuiltinID == BPF::BI__builtin_btf_type_id)
2704 kind = diag::err_btf_type_id_not_const;
2705 else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
2706 kind = diag::err_preserve_type_info_not_const;
2707 else
2708 kind = diag::err_preserve_enum_value_not_const;
2709 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
2710 return true;
2711 }
2712
2713 // The first argument
2714 Arg = TheCall->getArg(0);
2715 bool InvalidArg = false;
2716 bool ReturnUnsignedInt = true;
2717 if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
2718 if (!isValidBPFPreserveFieldInfoArg(Arg)) {
2719 InvalidArg = true;
2720 kind = diag::err_preserve_field_info_not_field;
2721 }
2722 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
2723 if (!isValidBPFPreserveTypeInfoArg(Arg)) {
2724 InvalidArg = true;
2725 kind = diag::err_preserve_type_info_invalid;
2726 }
2727 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
2728 if (!isValidBPFPreserveEnumValueArg(Arg)) {
2729 InvalidArg = true;
2730 kind = diag::err_preserve_enum_value_invalid;
2731 }
2732 ReturnUnsignedInt = false;
2733 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
2734 ReturnUnsignedInt = false;
2735 }
2736
2737 if (InvalidArg) {
2738 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
2739 return true;
2740 }
2741
2742 if (ReturnUnsignedInt)
2743 TheCall->setType(Context.UnsignedIntTy);
2744 else
2745 TheCall->setType(Context.UnsignedLongTy);
2746 return false;
2747}
2748
2749bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
2750 struct ArgInfo {
2751 uint8_t OpNum;
2752 bool IsSigned;
2753 uint8_t BitWidth;
2754 uint8_t Align;
2755 };
2756 struct BuiltinInfo {
2757 unsigned BuiltinID;
2758 ArgInfo Infos[2];
2759 };
2760
2761 static BuiltinInfo Infos[] = {
2762 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
2763 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
2764 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
2765 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
2766 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
2767 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
2768 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
2769 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
2770 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
2771 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
2772 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
2773
2774 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
2775 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
2776 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
2777 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
2778 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
2779 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
2780 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
2781 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
2782 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
2783 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
2784 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
2785
2786 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
2787 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
2788 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
2789 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
2790 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
2791 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
2792 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
2793 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
2794 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
2795 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
2796 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
2797 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
2798 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
2799 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
2800 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
2801 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
2802 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
2803 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
2804 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
2805 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
2806 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
2807 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
2808 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
2809 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
2810 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
2811 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
2812 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
2813 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
2814 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
2815 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
2816 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
2817 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
2818 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
2819 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
2820 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
2821 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
2822 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
2823 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
2824 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
2825 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
2826 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
2827 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
2828 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
2829 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
2830 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
2831 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
2832 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
2833 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
2834 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
2835 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
2836 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
2837 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
2838 {{ 1, false, 6, 0 }} },
2839 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
2840 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
2841 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
2842 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
2843 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
2844 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
2845 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
2846 {{ 1, false, 5, 0 }} },
2847 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
2848 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
2849 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
2850 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
2851 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
2852 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
2853 { 2, false, 5, 0 }} },
2854 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
2855 { 2, false, 6, 0 }} },
2856 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
2857 { 3, false, 5, 0 }} },
2858 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
2859 { 3, false, 6, 0 }} },
2860 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
2861 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
2862 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
2863 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
2864 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
2865 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
2866 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
2867 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
2868 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
2869 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
2870 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
2871 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
2872 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
2873 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
2874 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
2875 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
2876 {{ 2, false, 4, 0 },
2877 { 3, false, 5, 0 }} },
2878 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
2879 {{ 2, false, 4, 0 },
2880 { 3, false, 5, 0 }} },
2881 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
2882 {{ 2, false, 4, 0 },
2883 { 3, false, 5, 0 }} },
2884 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
2885 {{ 2, false, 4, 0 },
2886 { 3, false, 5, 0 }} },
2887 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
2888 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
2889 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
2890 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
2891 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
2892 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
2893 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
2894 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
2895 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
2896 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
2897 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
2898 { 2, false, 5, 0 }} },
2899 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
2900 { 2, false, 6, 0 }} },
2901 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
2902 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
2903 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
2904 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
2905 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
2906 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
2907 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
2908 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
2909 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
2910 {{ 1, false, 4, 0 }} },
2911 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
2912 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
2913 {{ 1, false, 4, 0 }} },
2914 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
2915 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
2916 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
2917 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
2918 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
2919 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
2920 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
2921 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
2922 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
2923 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
2924 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
2925 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
2926 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
2927 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
2928 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
2929 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
2930 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
2931 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
2932 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
2933 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
2934 {{ 3, false, 1, 0 }} },
2935 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
2936 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
2937 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
2938 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
2939 {{ 3, false, 1, 0 }} },
2940 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
2941 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
2942 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
2943 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
2944 {{ 3, false, 1, 0 }} },
2945 };
2946
2947 // Use a dynamically initialized static to sort the table exactly once on
2948 // first run.
2949 static const bool SortOnce =
2950 (llvm::sort(Infos,
2951 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
2952 return LHS.BuiltinID < RHS.BuiltinID;
2953 }),
2954 true);
2955 (void)SortOnce;
2956
2957 const BuiltinInfo *F = llvm::partition_point(
2958 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
2959 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
2960 return false;
2961
2962 bool Error = false;
2963
2964 for (const ArgInfo &A : F->Infos) {
2965 // Ignore empty ArgInfo elements.
2966 if (A.BitWidth == 0)
2967 continue;
2968
2969 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
2970 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
2971 if (!A.Align) {
2972 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
2973 } else {
2974 unsigned M = 1 << A.Align;
2975 Min *= M;
2976 Max *= M;
2977 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
2978 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
2979 }
2980 }
2981 return Error;
2982}
2983
2984bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
2985 CallExpr *TheCall) {
2986 return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
2987}
2988
2989bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
2990 unsigned BuiltinID, CallExpr *TheCall) {
2991 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
2992 CheckMipsBuiltinArgument(BuiltinID, TheCall);
2993}
2994
2995bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
2996 CallExpr *TheCall) {
2997
2998 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
2999 BuiltinID <= Mips::BI__builtin_mips_lwx) {
3000 if (!TI.hasFeature("dsp"))
3001 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
3002 }
3003
3004 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
3005 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
3006 if (!TI.hasFeature("dspr2"))
3007 return Diag(TheCall->getBeginLoc(),
3008 diag::err_mips_builtin_requires_dspr2);
3009 }
3010
3011 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
3012 BuiltinID <= Mips::BI__builtin_msa_xori_b) {
3013 if (!TI.hasFeature("msa"))
3014 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
3015 }
3016
3017 return false;
3018}
3019
3020// CheckMipsBuiltinArgument - Checks the constant value passed to the
3021// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
3022// ordering for DSP is unspecified. MSA is ordered by the data format used
3023// by the underlying instruction i.e., df/m, df/n and then by size.
3024//
3025// FIXME: The size tests here should instead be tablegen'd along with the
3026// definitions from include/clang/Basic/BuiltinsMips.def.
3027// FIXME: GCC is strict on signedness for some of these intrinsics, we should
3028// be too.
3029bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
3030 unsigned i = 0, l = 0, u = 0, m = 0;
3031 switch (BuiltinID) {
3032 default: return false;
3033 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
3034 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
3035 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
3036 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
3037 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
3038 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
3039 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
3040 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
3041 // df/m field.
3042 // These intrinsics take an unsigned 3 bit immediate.
3043 case Mips::BI__builtin_msa_bclri_b:
3044 case Mips::BI__builtin_msa_bnegi_b:
3045 case Mips::BI__builtin_msa_bseti_b:
3046 case Mips::BI__builtin_msa_sat_s_b:
3047 case Mips::BI__builtin_msa_sat_u_b:
3048 case Mips::BI__builtin_msa_slli_b:
3049 case Mips::BI__builtin_msa_srai_b:
3050 case Mips::BI__builtin_msa_srari_b:
3051 case Mips::BI__builtin_msa_srli_b:
3052 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
3053 case Mips::BI__builtin_msa_binsli_b:
3054 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
3055 // These intrinsics take an unsigned 4 bit immediate.
3056 case Mips::BI__builtin_msa_bclri_h:
3057 case Mips::BI__builtin_msa_bnegi_h:
3058 case Mips::BI__builtin_msa_bseti_h:
3059 case Mips::BI__builtin_msa_sat_s_h:
3060 case Mips::BI__builtin_msa_sat_u_h:
3061 case Mips::BI__builtin_msa_slli_h:
3062 case Mips::BI__builtin_msa_srai_h:
3063 case Mips::BI__builtin_msa_srari_h:
3064 case Mips::BI__builtin_msa_srli_h:
3065 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
3066 case Mips::BI__builtin_msa_binsli_h:
3067 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
3068 // These intrinsics take an unsigned 5 bit immediate.
3069 // The first block of intrinsics actually have an unsigned 5 bit field,
3070 // not a df/n field.
3071 case Mips::BI__builtin_msa_cfcmsa:
3072 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
3073 case Mips::BI__builtin_msa_clei_u_b:
3074 case Mips::BI__builtin_msa_clei_u_h:
3075 case Mips::BI__builtin_msa_clei_u_w:
3076 case Mips::BI__builtin_msa_clei_u_d:
3077 case Mips::BI__builtin_msa_clti_u_b:
3078 case Mips::BI__builtin_msa_clti_u_h:
3079 case Mips::BI__builtin_msa_clti_u_w:
3080 case Mips::BI__builtin_msa_clti_u_d:
3081 case Mips::BI__builtin_msa_maxi_u_b:
3082 case Mips::BI__builtin_msa_maxi_u_h:
3083 case Mips::BI__builtin_msa_maxi_u_w:
3084 case Mips::BI__builtin_msa_maxi_u_d:
3085 case Mips::BI__builtin_msa_mini_u_b:
3086 case Mips::BI__builtin_msa_mini_u_h:
3087 case Mips::BI__builtin_msa_mini_u_w:
3088 case Mips::BI__builtin_msa_mini_u_d:
3089 case Mips::BI__builtin_msa_addvi_b:
3090 case Mips::BI__builtin_msa_addvi_h:
3091 case Mips::BI__builtin_msa_addvi_w:
3092 case Mips::BI__builtin_msa_addvi_d:
3093 case Mips::BI__builtin_msa_bclri_w:
3094 case Mips::BI__builtin_msa_bnegi_w:
3095 case Mips::BI__builtin_msa_bseti_w:
3096 case Mips::BI__builtin_msa_sat_s_w:
3097 case Mips::BI__builtin_msa_sat_u_w:
3098 case Mips::BI__builtin_msa_slli_w:
3099 case Mips::BI__builtin_msa_srai_w:
3100 case Mips::BI__builtin_msa_srari_w:
3101 case Mips::BI__builtin_msa_srli_w:
3102 case Mips::BI__builtin_msa_srlri_w:
3103 case Mips::BI__builtin_msa_subvi_b:
3104 case Mips::BI__builtin_msa_subvi_h:
3105 case Mips::BI__builtin_msa_subvi_w:
3106 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
3107 case Mips::BI__builtin_msa_binsli_w:
3108 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
3109 // These intrinsics take an unsigned 6 bit immediate.
3110 case Mips::BI__builtin_msa_bclri_d:
3111 case Mips::BI__builtin_msa_bnegi_d:
3112 case Mips::BI__builtin_msa_bseti_d:
3113 case Mips::BI__builtin_msa_sat_s_d:
3114 case Mips::BI__builtin_msa_sat_u_d:
3115 case Mips::BI__builtin_msa_slli_d:
3116 case Mips::BI__builtin_msa_srai_d:
3117 case Mips::BI__builtin_msa_srari_d:
3118 case Mips::BI__builtin_msa_srli_d:
3119 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
3120 case Mips::BI__builtin_msa_binsli_d:
3121 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
3122 // These intrinsics take a signed 5 bit immediate.
3123 case Mips::BI__builtin_msa_ceqi_b:
3124 case Mips::BI__builtin_msa_ceqi_h:
3125 case Mips::BI__builtin_msa_ceqi_w:
3126 case Mips::BI__builtin_msa_ceqi_d:
3127 case Mips::BI__builtin_msa_clti_s_b:
3128 case Mips::BI__builtin_msa_clti_s_h:
3129 case Mips::BI__builtin_msa_clti_s_w:
3130 case Mips::BI__builtin_msa_clti_s_d:
3131 case Mips::BI__builtin_msa_clei_s_b:
3132 case Mips::BI__builtin_msa_clei_s_h:
3133 case Mips::BI__builtin_msa_clei_s_w:
3134 case Mips::BI__builtin_msa_clei_s_d:
3135 case Mips::BI__builtin_msa_maxi_s_b:
3136 case Mips::BI__builtin_msa_maxi_s_h:
3137 case Mips::BI__builtin_msa_maxi_s_w:
3138 case Mips::BI__builtin_msa_maxi_s_d:
3139 case Mips::BI__builtin_msa_mini_s_b:
3140 case Mips::BI__builtin_msa_mini_s_h:
3141 case Mips::BI__builtin_msa_mini_s_w:
3142 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
3143 // These intrinsics take an unsigned 8 bit immediate.
3144 case Mips::BI__builtin_msa_andi_b:
3145 case Mips::BI__builtin_msa_nori_b:
3146 case Mips::BI__builtin_msa_ori_b:
3147 case Mips::BI__builtin_msa_shf_b:
3148 case Mips::BI__builtin_msa_shf_h:
3149 case Mips::BI__builtin_msa_shf_w:
3150 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
3151 case Mips::BI__builtin_msa_bseli_b:
3152 case Mips::BI__builtin_msa_bmnzi_b:
3153 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
3154 // df/n format
3155 // These intrinsics take an unsigned 4 bit immediate.
3156 case Mips::BI__builtin_msa_copy_s_b:
3157 case Mips::BI__builtin_msa_copy_u_b:
3158 case Mips::BI__builtin_msa_insve_b:
3159 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
3160 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
3161 // These intrinsics take an unsigned 3 bit immediate.
3162 case Mips::BI__builtin_msa_copy_s_h:
3163 case Mips::BI__builtin_msa_copy_u_h:
3164 case Mips::BI__builtin_msa_insve_h:
3165 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
3166 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
3167 // These intrinsics take an unsigned 2 bit immediate.
3168 case Mips::BI__builtin_msa_copy_s_w:
3169 case Mips::BI__builtin_msa_copy_u_w:
3170 case Mips::BI__builtin_msa_insve_w:
3171 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
3172 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
3173 // These intrinsics take an unsigned 1 bit immediate.
3174 case Mips::BI__builtin_msa_copy_s_d:
3175 case Mips::BI__builtin_msa_copy_u_d:
3176 case Mips::BI__builtin_msa_insve_d:
3177 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
3178 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
3179 // Memory offsets and immediate loads.
3180 // These intrinsics take a signed 10 bit immediate.
3181 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
3182 case Mips::BI__builtin_msa_ldi_h:
3183 case Mips::BI__builtin_msa_ldi_w:
3184 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
3185 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
3186 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
3187 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
3188 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
3189 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
3190 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
3191 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
3192 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
3193 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
3194 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
3195 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
3196 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
3197 }
3198
3199 if (!m)
3200 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3201
3202 return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
3203 SemaBuiltinConstantArgMultiple(TheCall, i, m);
3204}
3205
3206/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
3207/// advancing the pointer over the consumed characters. The decoded type is
3208/// returned. If the decoded type represents a constant integer with a
3209/// constraint on its value then Mask is set to that value. The type descriptors
3210/// used in Str are specific to PPC MMA builtins and are documented in the file
3211/// defining the PPC builtins.
3212static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
3213 unsigned &Mask) {
3214 bool RequireICE = false;
3215 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
3216 switch (*Str++) {
3217 case 'V':
3218 return Context.getVectorType(Context.UnsignedCharTy, 16,
3219 VectorType::VectorKind::AltiVecVector);
3220 case 'i': {
3221 char *End;
3222 unsigned size = strtoul(Str, &End, 10);
3223 assert(End != Str && "Missing constant parameter constraint")((void)0);
3224 Str = End;
3225 Mask = size;
3226 return Context.IntTy;
3227 }
3228 case 'W': {
3229 char *End;
3230 unsigned size = strtoul(Str, &End, 10);
3231 assert(End != Str && "Missing PowerPC MMA type size")((void)0);
3232 Str = End;
3233 QualType Type;
3234 switch (size) {
3235 #define PPC_VECTOR_TYPE(typeName, Id, size) \
3236 case size: Type = Context.Id##Ty; break;
3237 #include "clang/Basic/PPCTypes.def"
3238 default: llvm_unreachable("Invalid PowerPC MMA vector type")__builtin_unreachable();
3239 }
3240 bool CheckVectorArgs = false;
3241 while (!CheckVectorArgs) {
3242 switch (*Str++) {
3243 case '*':
3244 Type = Context.getPointerType(Type);
3245 break;
3246 case 'C':
3247 Type = Type.withConst();
3248 break;
3249 default:
3250 CheckVectorArgs = true;
3251 --Str;
3252 break;
3253 }
3254 }
3255 return Type;
3256 }
3257 default:
3258 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
3259 }
3260}
3261
3262static bool isPPC_64Builtin(unsigned BuiltinID) {
3263 // These builtins only work on PPC 64bit targets.
3264 switch (BuiltinID) {
3265 case PPC::BI__builtin_divde:
3266 case PPC::BI__builtin_divdeu:
3267 case PPC::BI__builtin_bpermd:
3268 case PPC::BI__builtin_ppc_ldarx:
3269 case PPC::BI__builtin_ppc_stdcx:
3270 case PPC::BI__builtin_ppc_tdw:
3271 case PPC::BI__builtin_ppc_trapd:
3272 case PPC::BI__builtin_ppc_cmpeqb:
3273 case PPC::BI__builtin_ppc_setb:
3274 case PPC::BI__builtin_ppc_mulhd:
3275 case PPC::BI__builtin_ppc_mulhdu:
3276 case PPC::BI__builtin_ppc_maddhd:
3277 case PPC::BI__builtin_ppc_maddhdu:
3278 case PPC::BI__builtin_ppc_maddld:
3279 case PPC::BI__builtin_ppc_load8r:
3280 case PPC::BI__builtin_ppc_store8r:
3281 case PPC::BI__builtin_ppc_insert_exp:
3282 case PPC::BI__builtin_ppc_extract_sig:
3283 return true;
3284 }
3285 return false;
3286}
3287
3288static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall,
3289 StringRef FeatureToCheck, unsigned DiagID,
3290 StringRef DiagArg = "") {
3291 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck))
3292 return false;
3293
3294 if (DiagArg.empty())
3295 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange();
3296 else
3297 S.Diag(TheCall->getBeginLoc(), DiagID)
3298 << DiagArg << TheCall->getSourceRange();
3299
3300 return true;
3301}
3302
3303/// Returns true if the argument consists of one contiguous run of 1s with any
3304/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
3305/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
3306/// since all 1s are not contiguous.
3307bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
3308 llvm::APSInt Result;
3309 // We can't check the value of a dependent argument.
3310 Expr *Arg = TheCall->getArg(ArgNum);
3311 if (Arg->isTypeDependent() || Arg->isValueDependent())
3312 return false;
3313
3314 // Check constant-ness first.
3315 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3316 return true;
3317
3318 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
3319 if (Result.isShiftedMask() || (~Result).isShiftedMask())
3320 return false;
3321
3322 return Diag(TheCall->getBeginLoc(),
3323 diag::err_argument_not_contiguous_bit_field)
3324 << ArgNum << Arg->getSourceRange();
3325}
3326
3327bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3328 CallExpr *TheCall) {
3329 unsigned i = 0, l = 0, u = 0;
3330 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
3331 llvm::APSInt Result;
3332
3333 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
3334 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
3335 << TheCall->getSourceRange();
3336
3337 switch (BuiltinID) {
3338 default: return false;
3339 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
3340 case PPC::BI__builtin_altivec_crypto_vshasigmad:
3341 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3342 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3343 case PPC::BI__builtin_altivec_dss:
3344 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
3345 case PPC::BI__builtin_tbegin:
3346 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
3347 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
3348 case PPC::BI__builtin_tabortwc:
3349 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
3350 case PPC::BI__builtin_tabortwci:
3351 case PPC::BI__builtin_tabortdci:
3352 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
3353 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
3354 case PPC::BI__builtin_altivec_dst:
3355 case PPC::BI__builtin_altivec_dstt:
3356 case PPC::BI__builtin_altivec_dstst:
3357 case PPC::BI__builtin_altivec_dststt:
3358 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
3359 case PPC::BI__builtin_vsx_xxpermdi:
3360 case PPC::BI__builtin_vsx_xxsldwi:
3361 return SemaBuiltinVSX(TheCall);
3362 case PPC::BI__builtin_divwe:
3363 case PPC::BI__builtin_divweu:
3364 case PPC::BI__builtin_divde:
3365 case PPC::BI__builtin_divdeu:
3366 return SemaFeatureCheck(*this, TheCall, "extdiv",
3367 diag::err_ppc_builtin_only_on_arch, "7");
3368 case PPC::BI__builtin_bpermd:
3369 return SemaFeatureCheck(*this, TheCall, "bpermd",
3370 diag::err_ppc_builtin_only_on_arch, "7");
3371 case PPC::BI__builtin_unpack_vector_int128:
3372 return SemaFeatureCheck(*this, TheCall, "vsx",
3373 diag::err_ppc_builtin_only_on_arch, "7") ||
3374 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3375 case PPC::BI__builtin_pack_vector_int128:
3376 return SemaFeatureCheck(*this, TheCall, "vsx",
3377 diag::err_ppc_builtin_only_on_arch, "7");
3378 case PPC::BI__builtin_altivec_vgnb:
3379 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
3380 case PPC::BI__builtin_altivec_vec_replace_elt:
3381 case PPC::BI__builtin_altivec_vec_replace_unaligned: {
3382 QualType VecTy = TheCall->getArg(0)->getType();
3383 QualType EltTy = TheCall->getArg(1)->getType();
3384 unsigned Width = Context.getIntWidth(EltTy);
3385 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) ||
3386 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy);
3387 }
3388 case PPC::BI__builtin_vsx_xxeval:
3389 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
3390 case PPC::BI__builtin_altivec_vsldbi:
3391 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
3392 case PPC::BI__builtin_altivec_vsrdbi:
3393 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
3394 case PPC::BI__builtin_vsx_xxpermx:
3395 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
3396 case PPC::BI__builtin_ppc_tw:
3397 case PPC::BI__builtin_ppc_tdw:
3398 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
3399 case PPC::BI__builtin_ppc_cmpeqb:
3400 case PPC::BI__builtin_ppc_setb:
3401 case PPC::BI__builtin_ppc_maddhd:
3402 case PPC::BI__builtin_ppc_maddhdu:
3403 case PPC::BI__builtin_ppc_maddld:
3404 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3405 diag::err_ppc_builtin_only_on_arch, "9");
3406 case PPC::BI__builtin_ppc_cmprb:
3407 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3408 diag::err_ppc_builtin_only_on_arch, "9") ||
3409 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
3410 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
3411 // be a constant that represents a contiguous bit field.
3412 case PPC::BI__builtin_ppc_rlwnm:
3413 return SemaBuiltinConstantArg(TheCall, 1, Result) ||
3414 SemaValueIsRunOfOnes(TheCall, 2);
3415 case PPC::BI__builtin_ppc_rlwimi:
3416 case PPC::BI__builtin_ppc_rldimi:
3417 return SemaBuiltinConstantArg(TheCall, 2, Result) ||
3418 SemaValueIsRunOfOnes(TheCall, 3);
3419 case PPC::BI__builtin_ppc_extract_exp:
3420 case PPC::BI__builtin_ppc_extract_sig:
3421 case PPC::BI__builtin_ppc_insert_exp:
3422 return SemaFeatureCheck(*this, TheCall, "power9-vector",
3423 diag::err_ppc_builtin_only_on_arch, "9");
3424 case PPC::BI__builtin_ppc_mtfsb0:
3425 case PPC::BI__builtin_ppc_mtfsb1:
3426 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
3427 case PPC::BI__builtin_ppc_mtfsf:
3428 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255);
3429 case PPC::BI__builtin_ppc_mtfsfi:
3430 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
3431 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
3432 case PPC::BI__builtin_ppc_alignx:
3433 return SemaBuiltinConstantArgPower2(TheCall, 0);
3434 case PPC::BI__builtin_ppc_rdlam:
3435 return SemaValueIsRunOfOnes(TheCall, 2);
3436 case PPC::BI__builtin_ppc_icbt:
3437 case PPC::BI__builtin_ppc_sthcx:
3438 case PPC::BI__builtin_ppc_stbcx:
3439 case PPC::BI__builtin_ppc_lharx:
3440 case PPC::BI__builtin_ppc_lbarx:
3441 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
3442 diag::err_ppc_builtin_only_on_arch, "8");
3443 case PPC::BI__builtin_vsx_ldrmb:
3444 case PPC::BI__builtin_vsx_strmb:
3445 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
3446 diag::err_ppc_builtin_only_on_arch, "8") ||
3447 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
3448#define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
3449 case PPC::BI__builtin_##Name: \
3450 return SemaBuiltinPPCMMACall(TheCall, Types);
3451#include "clang/Basic/BuiltinsPPC.def"
3452 }
3453 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3454}
3455
3456// Check if the given type is a non-pointer PPC MMA type. This function is used
3457// in Sema to prevent invalid uses of restricted PPC MMA types.
3458bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
3459 if (Type->isPointerType() || Type->isArrayType())
3460 return false;
3461
3462 QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
3463#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
3464 if (false
3465#include "clang/Basic/PPCTypes.def"
3466 ) {
3467 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
3468 return true;
3469 }
3470 return false;
3471}
3472
3473bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
3474 CallExpr *TheCall) {
3475 // position of memory order and scope arguments in the builtin
3476 unsigned OrderIndex, ScopeIndex;
3477 switch (BuiltinID) {
3478 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
3479 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
3480 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
3481 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
3482 OrderIndex = 2;
3483 ScopeIndex = 3;
3484 break;
3485 case AMDGPU::BI__builtin_amdgcn_fence:
3486 OrderIndex = 0;
3487 ScopeIndex = 1;
3488 break;
3489 default:
3490 return false;
3491 }
3492
3493 ExprResult Arg = TheCall->getArg(OrderIndex);
3494 auto ArgExpr = Arg.get();
3495 Expr::EvalResult ArgResult;
3496
3497 if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
3498 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
3499 << ArgExpr->getType();
3500 auto Ord = ArgResult.Val.getInt().getZExtValue();
3501
3502 // Check valididty of memory ordering as per C11 / C++11's memody model.
3503 // Only fence needs check. Atomic dec/inc allow all memory orders.
3504 if (!llvm::isValidAtomicOrderingCABI(Ord))
3505 return Diag(ArgExpr->getBeginLoc(),
3506 diag::warn_atomic_op_has_invalid_memory_order)
3507 << ArgExpr->getSourceRange();
3508 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
3509 case llvm::AtomicOrderingCABI::relaxed:
3510 case llvm::AtomicOrderingCABI::consume:
3511 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
3512 return Diag(ArgExpr->getBeginLoc(),
3513 diag::warn_atomic_op_has_invalid_memory_order)
3514 << ArgExpr->getSourceRange();
3515 break;
3516 case llvm::AtomicOrderingCABI::acquire:
3517 case llvm::AtomicOrderingCABI::release:
3518 case llvm::AtomicOrderingCABI::acq_rel:
3519 case llvm::AtomicOrderingCABI::seq_cst:
3520 break;
3521 }
3522
3523 Arg = TheCall->getArg(ScopeIndex);
3524 ArgExpr = Arg.get();
3525 Expr::EvalResult ArgResult1;
3526 // Check that sync scope is a constant literal
3527 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
3528 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
3529 << ArgExpr->getType();
3530
3531 return false;
3532}
3533
3534bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
3535 llvm::APSInt Result;
3536
3537 // We can't check the value of a dependent argument.
3538 Expr *Arg = TheCall->getArg(ArgNum);
3539 if (Arg->isTypeDependent() || Arg->isValueDependent())
3540 return false;
3541
3542 // Check constant-ness first.
3543 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3544 return true;
3545
3546 int64_t Val = Result.getSExtValue();
3547 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
3548 return false;
3549
3550 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
3551 << Arg->getSourceRange();
3552}
3553
3554bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
3555 unsigned BuiltinID,
3556 CallExpr *TheCall) {
3557 // CodeGenFunction can also detect this, but this gives a better error
3558 // message.
3559 bool FeatureMissing = false;
3560 SmallVector<StringRef> ReqFeatures;
3561 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
3562 Features.split(ReqFeatures, ',');
3563
3564 // Check if each required feature is included
3565 for (StringRef F : ReqFeatures) {
3566 if (TI.hasFeature(F))
3567 continue;
3568
3569 // If the feature is 64bit, alter the string so it will print better in
3570 // the diagnostic.
3571 if (F == "64bit")
3572 F = "RV64";
3573
3574 // Convert features like "zbr" and "experimental-zbr" to "Zbr".
3575 F.consume_front("experimental-");
3576 std::string FeatureStr = F.str();
3577 FeatureStr[0] = std::toupper(FeatureStr[0]);
3578
3579 // Error message
3580 FeatureMissing = true;
3581 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
3582 << TheCall->getSourceRange() << StringRef(FeatureStr);
3583 }
3584
3585 if (FeatureMissing)
3586 return true;
3587
3588 switch (BuiltinID) {
3589 case RISCV::BI__builtin_rvv_vsetvli:
3590 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
3591 CheckRISCVLMUL(TheCall, 2);
3592 case RISCV::BI__builtin_rvv_vsetvlimax:
3593 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
3594 CheckRISCVLMUL(TheCall, 1);
3595 case RISCV::BI__builtin_rvv_vget_v_i8m2_i8m1:
3596 case RISCV::BI__builtin_rvv_vget_v_i16m2_i16m1:
3597 case RISCV::BI__builtin_rvv_vget_v_i32m2_i32m1:
3598 case RISCV::BI__builtin_rvv_vget_v_i64m2_i64m1:
3599 case RISCV::BI__builtin_rvv_vget_v_f32m2_f32m1:
3600 case RISCV::BI__builtin_rvv_vget_v_f64m2_f64m1:
3601 case RISCV::BI__builtin_rvv_vget_v_u8m2_u8m1:
3602 case RISCV::BI__builtin_rvv_vget_v_u16m2_u16m1:
3603 case RISCV::BI__builtin_rvv_vget_v_u32m2_u32m1:
3604 case RISCV::BI__builtin_rvv_vget_v_u64m2_u64m1:
3605 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m2:
3606 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m2:
3607 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m2:
3608 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m2:
3609 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m2:
3610 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m2:
3611 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m2:
3612 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m2:
3613 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m2:
3614 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m2:
3615 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m4:
3616 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m4:
3617 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m4:
3618 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m4:
3619 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m4:
3620 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m4:
3621 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m4:
3622 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m4:
3623 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m4:
3624 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m4:
3625 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3626 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m1:
3627 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m1:
3628 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m1:
3629 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m1:
3630 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m1:
3631 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m1:
3632 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m1:
3633 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m1:
3634 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m1:
3635 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m1:
3636 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m2:
3637 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m2:
3638 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m2:
3639 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m2:
3640 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m2:
3641 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m2:
3642 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m2:
3643 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m2:
3644 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m2:
3645 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m2:
3646 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3647 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m1:
3648 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m1:
3649 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m1:
3650 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m1:
3651 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m1:
3652 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m1:
3653 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m1:
3654 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m1:
3655 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m1:
3656 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m1:
3657 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3658 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m2:
3659 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m2:
3660 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m2:
3661 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m2:
3662 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m2:
3663 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m2:
3664 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m2:
3665 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m2:
3666 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m2:
3667 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m2:
3668 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m4:
3669 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m4:
3670 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m4:
3671 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m4:
3672 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m4:
3673 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m4:
3674 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m4:
3675 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m4:
3676 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m4:
3677 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m4:
3678 case RISCV::BI__builtin_rvv_vset_v_i8m4_i8m8:
3679 case RISCV::BI__builtin_rvv_vset_v_i16m4_i16m8:
3680 case RISCV::BI__builtin_rvv_vset_v_i32m4_i32m8:
3681 case RISCV::BI__builtin_rvv_vset_v_i64m4_i64m8:
3682 case RISCV::BI__builtin_rvv_vset_v_f32m4_f32m8:
3683 case RISCV::BI__builtin_rvv_vset_v_f64m4_f64m8:
3684 case RISCV::BI__builtin_rvv_vset_v_u8m4_u8m8:
3685 case RISCV::BI__builtin_rvv_vset_v_u16m4_u16m8:
3686 case RISCV::BI__builtin_rvv_vset_v_u32m4_u32m8:
3687 case RISCV::BI__builtin_rvv_vset_v_u64m4_u64m8:
3688 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3689 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m4:
3690 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m4:
3691 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m4:
3692 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m4:
3693 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m4:
3694 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m4:
3695 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m4:
3696 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m4:
3697 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m4:
3698 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m4:
3699 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m8:
3700 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m8:
3701 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m8:
3702 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m8:
3703 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m8:
3704 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m8:
3705 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m8:
3706 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m8:
3707 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m8:
3708 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m8:
3709 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3710 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m8:
3711 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m8:
3712 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m8:
3713 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m8:
3714 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m8:
3715 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m8:
3716 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m8:
3717 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m8:
3718 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m8:
3719 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m8:
3720 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3721 }
3722
3723 return false;
3724}
3725
3726bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
3727 CallExpr *TheCall) {
3728 if (BuiltinID == SystemZ::BI__builtin_tabort) {
3729 Expr *Arg = TheCall->getArg(0);
3730 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context))
3731 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
3732 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
3733 << Arg->getSourceRange();
3734 }
3735
3736 // For intrinsics which take an immediate value as part of the instruction,
3737 // range check them here.
3738 unsigned i = 0, l = 0, u = 0;
3739 switch (BuiltinID) {
3740 default: return false;
3741 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
3742 case SystemZ::BI__builtin_s390_verimb:
3743 case SystemZ::BI__builtin_s390_verimh:
3744 case SystemZ::BI__builtin_s390_verimf:
3745 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
3746 case SystemZ::BI__builtin_s390_vfaeb:
3747 case SystemZ::BI__builtin_s390_vfaeh:
3748 case SystemZ::BI__builtin_s390_vfaef:
3749 case SystemZ::BI__builtin_s390_vfaebs:
3750 case SystemZ::BI__builtin_s390_vfaehs:
3751 case SystemZ::BI__builtin_s390_vfaefs:
3752 case SystemZ::BI__builtin_s390_vfaezb:
3753 case SystemZ::BI__builtin_s390_vfaezh:
3754 case SystemZ::BI__builtin_s390_vfaezf:
3755 case SystemZ::BI__builtin_s390_vfaezbs:
3756 case SystemZ::BI__builtin_s390_vfaezhs:
3757 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
3758 case SystemZ::BI__builtin_s390_vfisb:
3759 case SystemZ::BI__builtin_s390_vfidb:
3760 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
3761 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3762 case SystemZ::BI__builtin_s390_vftcisb:
3763 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
3764 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
3765 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
3766 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
3767 case SystemZ::BI__builtin_s390_vstrcb:
3768 case SystemZ::BI__builtin_s390_vstrch:
3769 case SystemZ::BI__builtin_s390_vstrcf:
3770 case SystemZ::BI__builtin_s390_vstrczb:
3771 case SystemZ::BI__builtin_s390_vstrczh:
3772 case SystemZ::BI__builtin_s390_vstrczf:
3773 case SystemZ::BI__builtin_s390_vstrcbs:
3774 case SystemZ::BI__builtin_s390_vstrchs:
3775 case SystemZ::BI__builtin_s390_vstrcfs:
3776 case SystemZ::BI__builtin_s390_vstrczbs:
3777 case SystemZ::BI__builtin_s390_vstrczhs:
3778 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
3779 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
3780 case SystemZ::BI__builtin_s390_vfminsb:
3781 case SystemZ::BI__builtin_s390_vfmaxsb:
3782 case SystemZ::BI__builtin_s390_vfmindb:
3783 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
3784 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
3785 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
3786 case SystemZ::BI__builtin_s390_vclfnhs:
3787 case SystemZ::BI__builtin_s390_vclfnls:
3788 case SystemZ::BI__builtin_s390_vcfn:
3789 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
3790 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
3791 }
3792 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3793}
3794
3795/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
3796/// This checks that the target supports __builtin_cpu_supports and
3797/// that the string argument is constant and valid.
3798static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
3799 CallExpr *TheCall) {
3800 Expr *Arg = TheCall->getArg(0);
3801
3802 // Check if the argument is a string literal.
3803 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3804 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3805 << Arg->getSourceRange();
3806
3807 // Check the contents of the string.
3808 StringRef Feature =
3809 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3810 if (!TI.validateCpuSupports(Feature))
3811 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
3812 << Arg->getSourceRange();
3813 return false;
3814}
3815
3816/// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
3817/// This checks that the target supports __builtin_cpu_is and
3818/// that the string argument is constant and valid.
3819static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
3820 Expr *Arg = TheCall->getArg(0);
3821
3822 // Check if the argument is a string literal.
3823 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3824 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3825 << Arg->getSourceRange();
3826
3827 // Check the contents of the string.
3828 StringRef Feature =
3829 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3830 if (!TI.validateCpuIs(Feature))
3831 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
3832 << Arg->getSourceRange();
3833 return false;
3834}
3835
3836// Check if the rounding mode is legal.
3837bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
3838 // Indicates if this instruction has rounding control or just SAE.
3839 bool HasRC = false;
3840
3841 unsigned ArgNum = 0;
3842 switch (BuiltinID) {
3843 default:
3844 return false;
3845 case X86::BI__builtin_ia32_vcvttsd2si32:
3846 case X86::BI__builtin_ia32_vcvttsd2si64:
3847 case X86::BI__builtin_ia32_vcvttsd2usi32:
3848 case X86::BI__builtin_ia32_vcvttsd2usi64:
3849 case X86::BI__builtin_ia32_vcvttss2si32:
3850 case X86::BI__builtin_ia32_vcvttss2si64:
3851 case X86::BI__builtin_ia32_vcvttss2usi32:
3852 case X86::BI__builtin_ia32_vcvttss2usi64:
3853 ArgNum = 1;
3854 break;
3855 case X86::BI__builtin_ia32_maxpd512:
3856 case X86::BI__builtin_ia32_maxps512:
3857 case X86::BI__builtin_ia32_minpd512:
3858 case X86::BI__builtin_ia32_minps512:
3859 ArgNum = 2;
3860 break;
3861 case X86::BI__builtin_ia32_cvtps2pd512_mask:
3862 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
3863 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
3864 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
3865 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
3866 case X86::BI__builtin_ia32_cvttps2dq512_mask:
3867 case X86::BI__builtin_ia32_cvttps2qq512_mask:
3868 case X86::BI__builtin_ia32_cvttps2udq512_mask:
3869 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
3870 case X86::BI__builtin_ia32_exp2pd_mask:
3871 case X86::BI__builtin_ia32_exp2ps_mask:
3872 case X86::BI__builtin_ia32_getexppd512_mask:
3873 case X86::BI__builtin_ia32_getexpps512_mask:
3874 case X86::BI__builtin_ia32_rcp28pd_mask:
3875 case X86::BI__builtin_ia32_rcp28ps_mask:
3876 case X86::BI__builtin_ia32_rsqrt28pd_mask:
3877 case X86::BI__builtin_ia32_rsqrt28ps_mask:
3878 case X86::BI__builtin_ia32_vcomisd:
3879 case X86::BI__builtin_ia32_vcomiss:
3880 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
3881 ArgNum = 3;
3882 break;
3883 case X86::BI__builtin_ia32_cmppd512_mask:
3884 case X86::BI__builtin_ia32_cmpps512_mask:
3885 case X86::BI__builtin_ia32_cmpsd_mask:
3886 case X86::BI__builtin_ia32_cmpss_mask:
3887 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
3888 case X86::BI__builtin_ia32_getexpsd128_round_mask:
3889 case X86::BI__builtin_ia32_getexpss128_round_mask:
3890 case X86::BI__builtin_ia32_getmantpd512_mask:
3891 case X86::BI__builtin_ia32_getmantps512_mask:
3892 case X86::BI__builtin_ia32_maxsd_round_mask:
3893 case X86::BI__builtin_ia32_maxss_round_mask:
3894 case X86::BI__builtin_ia32_minsd_round_mask:
3895 case X86::BI__builtin_ia32_minss_round_mask:
3896 case X86::BI__builtin_ia32_rcp28sd_round_mask:
3897 case X86::BI__builtin_ia32_rcp28ss_round_mask:
3898 case X86::BI__builtin_ia32_reducepd512_mask:
3899 case X86::BI__builtin_ia32_reduceps512_mask:
3900 case X86::BI__builtin_ia32_rndscalepd_mask:
3901 case X86::BI__builtin_ia32_rndscaleps_mask:
3902 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
3903 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
3904 ArgNum = 4;
3905 break;
3906 case X86::BI__builtin_ia32_fixupimmpd512_mask:
3907 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
3908 case X86::BI__builtin_ia32_fixupimmps512_mask:
3909 case X86::BI__builtin_ia32_fixupimmps512_maskz:
3910 case X86::BI__builtin_ia32_fixupimmsd_mask:
3911 case X86::BI__builtin_ia32_fixupimmsd_maskz:
3912 case X86::BI__builtin_ia32_fixupimmss_mask:
3913 case X86::BI__builtin_ia32_fixupimmss_maskz:
3914 case X86::BI__builtin_ia32_getmantsd_round_mask:
3915 case X86::BI__builtin_ia32_getmantss_round_mask:
3916 case X86::BI__builtin_ia32_rangepd512_mask:
3917 case X86::BI__builtin_ia32_rangeps512_mask:
3918 case X86::BI__builtin_ia32_rangesd128_round_mask:
3919 case X86::BI__builtin_ia32_rangess128_round_mask:
3920 case X86::BI__builtin_ia32_reducesd_mask:
3921 case X86::BI__builtin_ia32_reducess_mask:
3922 case X86::BI__builtin_ia32_rndscalesd_round_mask:
3923 case X86::BI__builtin_ia32_rndscaless_round_mask:
3924 ArgNum = 5;
3925 break;
3926 case X86::BI__builtin_ia32_vcvtsd2si64:
3927 case X86::BI__builtin_ia32_vcvtsd2si32:
3928 case X86::BI__builtin_ia32_vcvtsd2usi32:
3929 case X86::BI__builtin_ia32_vcvtsd2usi64:
3930 case X86::BI__builtin_ia32_vcvtss2si32:
3931 case X86::BI__builtin_ia32_vcvtss2si64:
3932 case X86::BI__builtin_ia32_vcvtss2usi32:
3933 case X86::BI__builtin_ia32_vcvtss2usi64:
3934 case X86::BI__builtin_ia32_sqrtpd512:
3935 case X86::BI__builtin_ia32_sqrtps512:
3936 ArgNum = 1;
3937 HasRC = true;
3938 break;
3939 case X86::BI__builtin_ia32_addpd512:
3940 case X86::BI__builtin_ia32_addps512:
3941 case X86::BI__builtin_ia32_divpd512:
3942 case X86::BI__builtin_ia32_divps512:
3943 case X86::BI__builtin_ia32_mulpd512:
3944 case X86::BI__builtin_ia32_mulps512:
3945 case X86::BI__builtin_ia32_subpd512:
3946 case X86::BI__builtin_ia32_subps512:
3947 case X86::BI__builtin_ia32_cvtsi2sd64:
3948 case X86::BI__builtin_ia32_cvtsi2ss32:
3949 case X86::BI__builtin_ia32_cvtsi2ss64:
3950 case X86::BI__builtin_ia32_cvtusi2sd64:
3951 case X86::BI__builtin_ia32_cvtusi2ss32:
3952 case X86::BI__builtin_ia32_cvtusi2ss64:
3953 ArgNum = 2;
3954 HasRC = true;
3955 break;
3956 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
3957 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
3958 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
3959 case X86::BI__builtin_ia32_cvtpd2dq512_mask:
3960 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
3961 case X86::BI__builtin_ia32_cvtpd2udq512_mask:
3962 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
3963 case X86::BI__builtin_ia32_cvtps2dq512_mask:
3964 case X86::BI__builtin_ia32_cvtps2qq512_mask:
3965 case X86::BI__builtin_ia32_cvtps2udq512_mask:
3966 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
3967 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
3968 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
3969 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
3970 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
3971 ArgNum = 3;
3972 HasRC = true;
3973 break;
3974 case X86::BI__builtin_ia32_addss_round_mask:
3975 case X86::BI__builtin_ia32_addsd_round_mask:
3976 case X86::BI__builtin_ia32_divss_round_mask:
3977 case X86::BI__builtin_ia32_divsd_round_mask:
3978 case X86::BI__builtin_ia32_mulss_round_mask:
3979 case X86::BI__builtin_ia32_mulsd_round_mask:
3980 case X86::BI__builtin_ia32_subss_round_mask:
3981 case X86::BI__builtin_ia32_subsd_round_mask:
3982 case X86::BI__builtin_ia32_scalefpd512_mask:
3983 case X86::BI__builtin_ia32_scalefps512_mask:
3984 case X86::BI__builtin_ia32_scalefsd_round_mask:
3985 case X86::BI__builtin_ia32_scalefss_round_mask:
3986 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
3987 case X86::BI__builtin_ia32_sqrtsd_round_mask:
3988 case X86::BI__builtin_ia32_sqrtss_round_mask:
3989 case X86::BI__builtin_ia32_vfmaddsd3_mask:
3990 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
3991 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
3992 case X86::BI__builtin_ia32_vfmaddss3_mask:
3993 case X86::BI__builtin_ia32_vfmaddss3_maskz:
3994 case X86::BI__builtin_ia32_vfmaddss3_mask3:
3995 case X86::BI__builtin_ia32_vfmaddpd512_mask:
3996 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
3997 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
3998 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
3999 case X86::BI__builtin_ia32_vfmaddps512_mask:
4000 case X86::BI__builtin_ia32_vfmaddps512_maskz:
4001 case X86::BI__builtin_ia32_vfmaddps512_mask3:
4002 case X86::BI__builtin_ia32_vfmsubps512_mask3:
4003 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
4004 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
4005 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
4006 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
4007 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
4008 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
4009 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
4010 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
4011 ArgNum = 4;
4012 HasRC = true;
4013 break;
4014 }
4015
4016 llvm::APSInt Result;
4017
4018 // We can't check the value of a dependent argument.
4019 Expr *Arg = TheCall->getArg(ArgNum);
4020 if (Arg->isTypeDependent() || Arg->isValueDependent())
4021 return false;
4022
4023 // Check constant-ness first.
4024 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4025 return true;
4026
4027 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
4028 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
4029 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
4030 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
4031 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
4032 Result == 8/*ROUND_NO_EXC*/ ||
4033 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
4034 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
4035 return false;
4036
4037 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
4038 << Arg->getSourceRange();
4039}
4040
4041// Check if the gather/scatter scale is legal.
4042bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
4043 CallExpr *TheCall) {
4044 unsigned ArgNum = 0;
4045 switch (BuiltinID) {
4046 default:
4047 return false;
4048 case X86::BI__builtin_ia32_gatherpfdpd:
4049 case X86::BI__builtin_ia32_gatherpfdps:
4050 case X86::BI__builtin_ia32_gatherpfqpd:
4051 case X86::BI__builtin_ia32_gatherpfqps:
4052 case X86::BI__builtin_ia32_scatterpfdpd:
4053 case X86::BI__builtin_ia32_scatterpfdps:
4054 case X86::BI__builtin_ia32_scatterpfqpd:
4055 case X86::BI__builtin_ia32_scatterpfqps:
4056 ArgNum = 3;
4057 break;
4058 case X86::BI__builtin_ia32_gatherd_pd:
4059 case X86::BI__builtin_ia32_gatherd_pd256:
4060 case X86::BI__builtin_ia32_gatherq_pd:
4061 case X86::BI__builtin_ia32_gatherq_pd256:
4062 case X86::BI__builtin_ia32_gatherd_ps:
4063 case X86::BI__builtin_ia32_gatherd_ps256:
4064 case X86::BI__builtin_ia32_gatherq_ps:
4065 case X86::BI__builtin_ia32_gatherq_ps256:
4066 case X86::BI__builtin_ia32_gatherd_q:
4067 case X86::BI__builtin_ia32_gatherd_q256:
4068 case X86::BI__builtin_ia32_gatherq_q:
4069 case X86::BI__builtin_ia32_gatherq_q256:
4070 case X86::BI__builtin_ia32_gatherd_d:
4071 case X86::BI__builtin_ia32_gatherd_d256:
4072 case X86::BI__builtin_ia32_gatherq_d:
4073 case X86::BI__builtin_ia32_gatherq_d256:
4074 case X86::BI__builtin_ia32_gather3div2df:
4075 case X86::BI__builtin_ia32_gather3div2di:
4076 case X86::BI__builtin_ia32_gather3div4df:
4077 case X86::BI__builtin_ia32_gather3div4di:
4078 case X86::BI__builtin_ia32_gather3div4sf:
4079 case X86::BI__builtin_ia32_gather3div4si:
4080 case X86::BI__builtin_ia32_gather3div8sf:
4081 case X86::BI__builtin_ia32_gather3div8si:
4082 case X86::BI__builtin_ia32_gather3siv2df:
4083 case X86::BI__builtin_ia32_gather3siv2di:
4084 case X86::BI__builtin_ia32_gather3siv4df:
4085 case X86::BI__builtin_ia32_gather3siv4di:
4086 case X86::BI__builtin_ia32_gather3siv4sf:
4087 case X86::BI__builtin_ia32_gather3siv4si:
4088 case X86::BI__builtin_ia32_gather3siv8sf:
4089 case X86::BI__builtin_ia32_gather3siv8si:
4090 case X86::BI__builtin_ia32_gathersiv8df:
4091 case X86::BI__builtin_ia32_gathersiv16sf:
4092 case X86::BI__builtin_ia32_gatherdiv8df:
4093 case X86::BI__builtin_ia32_gatherdiv16sf:
4094 case X86::BI__builtin_ia32_gathersiv8di:
4095 case X86::BI__builtin_ia32_gathersiv16si:
4096 case X86::BI__builtin_ia32_gatherdiv8di:
4097 case X86::BI__builtin_ia32_gatherdiv16si:
4098 case X86::BI__builtin_ia32_scatterdiv2df:
4099 case X86::BI__builtin_ia32_scatterdiv2di:
4100 case X86::BI__builtin_ia32_scatterdiv4df:
4101 case X86::BI__builtin_ia32_scatterdiv4di:
4102 case X86::BI__builtin_ia32_scatterdiv4sf:
4103 case X86::BI__builtin_ia32_scatterdiv4si:
4104 case X86::BI__builtin_ia32_scatterdiv8sf:
4105 case X86::BI__builtin_ia32_scatterdiv8si:
4106 case X86::BI__builtin_ia32_scattersiv2df:
4107 case X86::BI__builtin_ia32_scattersiv2di:
4108 case X86::BI__builtin_ia32_scattersiv4df:
4109 case X86::BI__builtin_ia32_scattersiv4di:
4110 case X86::BI__builtin_ia32_scattersiv4sf:
4111 case X86::BI__builtin_ia32_scattersiv4si:
4112 case X86::BI__builtin_ia32_scattersiv8sf:
4113 case X86::BI__builtin_ia32_scattersiv8si:
4114 case X86::BI__builtin_ia32_scattersiv8df:
4115 case X86::BI__builtin_ia32_scattersiv16sf:
4116 case X86::BI__builtin_ia32_scatterdiv8df:
4117 case X86::BI__builtin_ia32_scatterdiv16sf:
4118 case X86::BI__builtin_ia32_scattersiv8di:
4119 case X86::BI__builtin_ia32_scattersiv16si:
4120 case X86::BI__builtin_ia32_scatterdiv8di:
4121 case X86::BI__builtin_ia32_scatterdiv16si:
4122 ArgNum = 4;
4123 break;
4124 }
4125
4126 llvm::APSInt Result;
4127
4128 // We can't check the value of a dependent argument.
4129 Expr *Arg = TheCall->getArg(ArgNum);
4130 if (Arg->isTypeDependent() || Arg->isValueDependent())
4131 return false;
4132
4133 // Check constant-ness first.
4134 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4135 return true;
4136
4137 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
4138 return false;
4139
4140 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
4141 << Arg->getSourceRange();
4142}
4143
4144enum { TileRegLow = 0, TileRegHigh = 7 };
4145
4146bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
4147 ArrayRef<int> ArgNums) {
4148 for (int ArgNum : ArgNums) {
4149 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
4150 return true;
4151 }
4152 return false;
4153}
4154
4155bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
4156 ArrayRef<int> ArgNums) {
4157 // Because the max number of tile register is TileRegHigh + 1, so here we use
4158 // each bit to represent the usage of them in bitset.
4159 std::bitset<TileRegHigh + 1> ArgValues;
4160 for (int ArgNum : ArgNums) {
4161 Expr *Arg = TheCall->getArg(ArgNum);
4162 if (Arg->isTypeDependent() || Arg->isValueDependent())
4163 continue;
4164
4165 llvm::APSInt Result;
4166 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4167 return true;
4168 int ArgExtValue = Result.getExtValue();
4169 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&((void)0)
4170 "Incorrect tile register num.")((void)0);
4171 if (ArgValues.test(ArgExtValue))
4172 return Diag(TheCall->getBeginLoc(),
4173 diag::err_x86_builtin_tile_arg_duplicate)
4174 << TheCall->getArg(ArgNum)->getSourceRange();
4175 ArgValues.set(ArgExtValue);
4176 }
4177 return false;
4178}
4179
4180bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
4181 ArrayRef<int> ArgNums) {
4182 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
4183 CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
4184}
4185
4186bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
4187 switch (BuiltinID) {
4188 default:
4189 return false;
4190 case X86::BI__builtin_ia32_tileloadd64:
4191 case X86::BI__builtin_ia32_tileloaddt164:
4192 case X86::BI__builtin_ia32_tilestored64:
4193 case X86::BI__builtin_ia32_tilezero:
4194 return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
4195 case X86::BI__builtin_ia32_tdpbssd:
4196 case X86::BI__builtin_ia32_tdpbsud:
4197 case X86::BI__builtin_ia32_tdpbusd:
4198 case X86::BI__builtin_ia32_tdpbuud:
4199 case X86::BI__builtin_ia32_tdpbf16ps:
4200 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
4201 }
4202}
4203static bool isX86_32Builtin(unsigned BuiltinID) {
4204 // These builtins only work on x86-32 targets.
4205 switch (BuiltinID) {
4206 case X86::BI__builtin_ia32_readeflags_u32:
4207 case X86::BI__builtin_ia32_writeeflags_u32:
4208 return true;
4209 }
4210
4211 return false;
4212}
4213
4214bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
4215 CallExpr *TheCall) {
4216 if (BuiltinID == X86::BI__builtin_cpu_supports)
4217 return SemaBuiltinCpuSupports(*this, TI, TheCall);
4218
4219 if (BuiltinID == X86::BI__builtin_cpu_is)
4220 return SemaBuiltinCpuIs(*this, TI, TheCall);
4221
4222 // Check for 32-bit only builtins on a 64-bit target.
4223 const llvm::Triple &TT = TI.getTriple();
4224 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
4225 return Diag(TheCall->getCallee()->getBeginLoc(),
4226 diag::err_32_bit_builtin_64_bit_tgt);
4227
4228 // If the intrinsic has rounding or SAE make sure its valid.
4229 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
4230 return true;
4231
4232 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
4233 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
4234 return true;
4235
4236 // If the intrinsic has a tile arguments, make sure they are valid.
4237 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
4238 return true;
4239
4240 // For intrinsics which take an immediate value as part of the instruction,
4241 // range check them here.
4242 int i = 0, l = 0, u = 0;
4243 switch (BuiltinID) {
4244 default:
4245 return false;
4246 case X86::BI__builtin_ia32_vec_ext_v2si:
4247 case X86::BI__builtin_ia32_vec_ext_v2di:
4248 case X86::BI__builtin_ia32_vextractf128_pd256:
4249 case X86::BI__builtin_ia32_vextractf128_ps256:
4250 case X86::BI__builtin_ia32_vextractf128_si256:
4251 case X86::BI__builtin_ia32_extract128i256:
4252 case X86::BI__builtin_ia32_extractf64x4_mask:
4253 case X86::BI__builtin_ia32_extracti64x4_mask:
4254 case X86::BI__builtin_ia32_extractf32x8_mask:
4255 case X86::BI__builtin_ia32_extracti32x8_mask:
4256 case X86::BI__builtin_ia32_extractf64x2_256_mask:
4257 case X86::BI__builtin_ia32_extracti64x2_256_mask:
4258 case X86::BI__builtin_ia32_extractf32x4_256_mask:
4259 case X86::BI__builtin_ia32_extracti32x4_256_mask:
4260 i = 1; l = 0; u = 1;
4261 break;
4262 case X86::BI__builtin_ia32_vec_set_v2di:
4263 case X86::BI__builtin_ia32_vinsertf128_pd256:
4264 case X86::BI__builtin_ia32_vinsertf128_ps256:
4265 case X86::BI__builtin_ia32_vinsertf128_si256:
4266 case X86::BI__builtin_ia32_insert128i256:
4267 case X86::BI__builtin_ia32_insertf32x8:
4268 case X86::BI__builtin_ia32_inserti32x8:
4269 case X86::BI__builtin_ia32_insertf64x4:
4270 case X86::BI__builtin_ia32_inserti64x4:
4271 case X86::BI__builtin_ia32_insertf64x2_256:
4272 case X86::BI__builtin_ia32_inserti64x2_256:
4273 case X86::BI__builtin_ia32_insertf32x4_256:
4274 case X86::BI__builtin_ia32_inserti32x4_256:
4275 i = 2; l = 0; u = 1;
4276 break;
4277 case X86::BI__builtin_ia32_vpermilpd:
4278 case X86::BI__builtin_ia32_vec_ext_v4hi:
4279 case X86::BI__builtin_ia32_vec_ext_v4si:
4280 case X86::BI__builtin_ia32_vec_ext_v4sf:
4281 case X86::BI__builtin_ia32_vec_ext_v4di:
4282 case X86::BI__builtin_ia32_extractf32x4_mask:
4283 case X86::BI__builtin_ia32_extracti32x4_mask:
4284 case X86::BI__builtin_ia32_extractf64x2_512_mask:
4285 case X86::BI__builtin_ia32_extracti64x2_512_mask:
4286 i = 1; l = 0; u = 3;
4287 break;
4288 case X86::BI_mm_prefetch:
4289 case X86::BI__builtin_ia32_vec_ext_v8hi:
4290 case X86::BI__builtin_ia32_vec_ext_v8si:
4291 i = 1; l = 0; u = 7;
4292 break;
4293 case X86::BI__builtin_ia32_sha1rnds4:
4294 case X86::BI__builtin_ia32_blendpd:
4295 case X86::BI__builtin_ia32_shufpd:
4296 case X86::BI__builtin_ia32_vec_set_v4hi:
4297 case X86::BI__builtin_ia32_vec_set_v4si:
4298 case X86::BI__builtin_ia32_vec_set_v4di:
4299 case X86::BI__builtin_ia32_shuf_f32x4_256:
4300 case X86::BI__builtin_ia32_shuf_f64x2_256:
4301 case X86::BI__builtin_ia32_shuf_i32x4_256:
4302 case X86::BI__builtin_ia32_shuf_i64x2_256:
4303 case X86::BI__builtin_ia32_insertf64x2_512:
4304 case X86::BI__builtin_ia32_inserti64x2_512:
4305 case X86::BI__builtin_ia32_insertf32x4:
4306 case X86::BI__builtin_ia32_inserti32x4:
4307 i = 2; l = 0; u = 3;
4308 break;
4309 case X86::BI__builtin_ia32_vpermil2pd:
4310 case X86::BI__builtin_ia32_vpermil2pd256:
4311 case X86::BI__builtin_ia32_vpermil2ps:
4312 case X86::BI__builtin_ia32_vpermil2ps256:
4313 i = 3; l = 0; u = 3;
4314 break;
4315 case X86::BI__builtin_ia32_cmpb128_mask:
4316 case X86::BI__builtin_ia32_cmpw128_mask:
4317 case X86::BI__builtin_ia32_cmpd128_mask:
4318 case X86::BI__builtin_ia32_cmpq128_mask:
4319 case X86::BI__builtin_ia32_cmpb256_mask:
4320 case X86::BI__builtin_ia32_cmpw256_mask:
4321 case X86::BI__builtin_ia32_cmpd256_mask:
4322 case X86::BI__builtin_ia32_cmpq256_mask:
4323 case X86::BI__builtin_ia32_cmpb512_mask:
4324 case X86::BI__builtin_ia32_cmpw512_mask:
4325 case X86::BI__builtin_ia32_cmpd512_mask:
4326 case X86::BI__builtin_ia32_cmpq512_mask:
4327 case X86::BI__builtin_ia32_ucmpb128_mask:
4328 case X86::BI__builtin_ia32_ucmpw128_mask:
4329 case X86::BI__builtin_ia32_ucmpd128_mask:
4330 case X86::BI__builtin_ia32_ucmpq128_mask:
4331 case X86::BI__builtin_ia32_ucmpb256_mask:
4332 case X86::BI__builtin_ia32_ucmpw256_mask:
4333 case X86::BI__builtin_ia32_ucmpd256_mask:
4334 case X86::BI__builtin_ia32_ucmpq256_mask:
4335 case X86::BI__builtin_ia32_ucmpb512_mask:
4336 case X86::BI__builtin_ia32_ucmpw512_mask:
4337 case X86::BI__builtin_ia32_ucmpd512_mask:
4338 case X86::BI__builtin_ia32_ucmpq512_mask:
4339 case X86::BI__builtin_ia32_vpcomub:
4340 case X86::BI__builtin_ia32_vpcomuw:
4341 case X86::BI__builtin_ia32_vpcomud:
4342 case X86::BI__builtin_ia32_vpcomuq:
4343 case X86::BI__builtin_ia32_vpcomb:
4344 case X86::BI__builtin_ia32_vpcomw:
4345 case X86::BI__builtin_ia32_vpcomd:
4346 case X86::BI__builtin_ia32_vpcomq:
4347 case X86::BI__builtin_ia32_vec_set_v8hi:
4348 case X86::BI__builtin_ia32_vec_set_v8si:
4349 i = 2; l = 0; u = 7;
4350 break;
4351 case X86::BI__builtin_ia32_vpermilpd256:
4352 case X86::BI__builtin_ia32_roundps:
4353 case X86::BI__builtin_ia32_roundpd:
4354 case X86::BI__builtin_ia32_roundps256:
4355 case X86::BI__builtin_ia32_roundpd256:
4356 case X86::BI__builtin_ia32_getmantpd128_mask:
4357 case X86::BI__builtin_ia32_getmantpd256_mask:
4358 case X86::BI__builtin_ia32_getmantps128_mask:
4359 case X86::BI__builtin_ia32_getmantps256_mask:
4360 case X86::BI__builtin_ia32_getmantpd512_mask:
4361 case X86::BI__builtin_ia32_getmantps512_mask:
4362 case X86::BI__builtin_ia32_vec_ext_v16qi:
4363 case X86::BI__builtin_ia32_vec_ext_v16hi:
4364 i = 1; l = 0; u = 15;
4365 break;
4366 case X86::BI__builtin_ia32_pblendd128:
4367 case X86::BI__builtin_ia32_blendps:
4368 case X86::BI__builtin_ia32_blendpd256:
4369 case X86::BI__builtin_ia32_shufpd256:
4370 case X86::BI__builtin_ia32_roundss:
4371 case X86::BI__builtin_ia32_roundsd:
4372 case X86::BI__builtin_ia32_rangepd128_mask:
4373 case X86::BI__builtin_ia32_rangepd256_mask:
4374 case X86::BI__builtin_ia32_rangepd512_mask:
4375 case X86::BI__builtin_ia32_rangeps128_mask:
4376 case X86::BI__builtin_ia32_rangeps256_mask:
4377 case X86::BI__builtin_ia32_rangeps512_mask:
4378 case X86::BI__builtin_ia32_getmantsd_round_mask:
4379 case X86::BI__builtin_ia32_getmantss_round_mask:
4380 case X86::BI__builtin_ia32_vec_set_v16qi:
4381 case X86::BI__builtin_ia32_vec_set_v16hi:
4382 i = 2; l = 0; u = 15;
4383 break;
4384 case X86::BI__builtin_ia32_vec_ext_v32qi:
4385 i = 1; l = 0; u = 31;
4386 break;
4387 case X86::BI__builtin_ia32_cmpps:
4388 case X86::BI__builtin_ia32_cmpss:
4389 case X86::BI__builtin_ia32_cmppd:
4390 case X86::BI__builtin_ia32_cmpsd:
4391 case X86::BI__builtin_ia32_cmpps256:
4392 case X86::BI__builtin_ia32_cmppd256:
4393 case X86::BI__builtin_ia32_cmpps128_mask:
4394 case X86::BI__builtin_ia32_cmppd128_mask:
4395 case X86::BI__builtin_ia32_cmpps256_mask:
4396 case X86::BI__builtin_ia32_cmppd256_mask:
4397 case X86::BI__builtin_ia32_cmpps512_mask:
4398 case X86::BI__builtin_ia32_cmppd512_mask:
4399 case X86::BI__builtin_ia32_cmpsd_mask:
4400 case X86::BI__builtin_ia32_cmpss_mask:
4401 case X86::BI__builtin_ia32_vec_set_v32qi:
4402 i = 2; l = 0; u = 31;
4403 break;
4404 case X86::BI__builtin_ia32_permdf256:
4405 case X86::BI__builtin_ia32_permdi256:
4406 case X86::BI__builtin_ia32_permdf512:
4407 case X86::BI__builtin_ia32_permdi512:
4408 case X86::BI__builtin_ia32_vpermilps:
4409 case X86::BI__builtin_ia32_vpermilps256:
4410 case X86::BI__builtin_ia32_vpermilpd512:
4411 case X86::BI__builtin_ia32_vpermilps512:
4412 case X86::BI__builtin_ia32_pshufd:
4413 case X86::BI__builtin_ia32_pshufd256:
4414 case X86::BI__builtin_ia32_pshufd512:
4415 case X86::BI__builtin_ia32_pshufhw:
4416 case X86::BI__builtin_ia32_pshufhw256:
4417 case X86::BI__builtin_ia32_pshufhw512:
4418 case X86::BI__builtin_ia32_pshuflw:
4419 case X86::BI__builtin_ia32_pshuflw256:
4420 case X86::BI__builtin_ia32_pshuflw512:
4421 case X86::BI__builtin_ia32_vcvtps2ph:
4422 case X86::BI__builtin_ia32_vcvtps2ph_mask:
4423 case X86::BI__builtin_ia32_vcvtps2ph256:
4424 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
4425 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
4426 case X86::BI__builtin_ia32_rndscaleps_128_mask:
4427 case X86::BI__builtin_ia32_rndscalepd_128_mask:
4428 case X86::BI__builtin_ia32_rndscaleps_256_mask:
4429 case X86::BI__builtin_ia32_rndscalepd_256_mask:
4430 case X86::BI__builtin_ia32_rndscaleps_mask:
4431 case X86::BI__builtin_ia32_rndscalepd_mask:
4432 case X86::BI__builtin_ia32_reducepd128_mask:
4433 case X86::BI__builtin_ia32_reducepd256_mask:
4434 case X86::BI__builtin_ia32_reducepd512_mask:
4435 case X86::BI__builtin_ia32_reduceps128_mask:
4436 case X86::BI__builtin_ia32_reduceps256_mask:
4437 case X86::BI__builtin_ia32_reduceps512_mask:
4438 case X86::BI__builtin_ia32_prold512:
4439 case X86::BI__builtin_ia32_prolq512:
4440 case X86::BI__builtin_ia32_prold128:
4441 case X86::BI__builtin_ia32_prold256:
4442 case X86::BI__builtin_ia32_prolq128:
4443 case X86::BI__builtin_ia32_prolq256:
4444 case X86::BI__builtin_ia32_prord512:
4445 case X86::BI__builtin_ia32_prorq512:
4446 case X86::BI__builtin_ia32_prord128:
4447 case X86::BI__builtin_ia32_prord256:
4448 case X86::BI__builtin_ia32_prorq128:
4449 case X86::BI__builtin_ia32_prorq256:
4450 case X86::BI__builtin_ia32_fpclasspd128_mask:
4451 case X86::BI__builtin_ia32_fpclasspd256_mask:
4452 case X86::BI__builtin_ia32_fpclassps128_mask:
4453 case X86::BI__builtin_ia32_fpclassps256_mask:
4454 case X86::BI__builtin_ia32_fpclassps512_mask:
4455 case X86::BI__builtin_ia32_fpclasspd512_mask:
4456 case X86::BI__builtin_ia32_fpclasssd_mask:
4457 case X86::BI__builtin_ia32_fpclassss_mask:
4458 case X86::BI__builtin_ia32_pslldqi128_byteshift:
4459 case X86::BI__builtin_ia32_pslldqi256_byteshift:
4460 case X86::BI__builtin_ia32_pslldqi512_byteshift:
4461 case X86::BI__builtin_ia32_psrldqi128_byteshift:
4462 case X86::BI__builtin_ia32_psrldqi256_byteshift:
4463 case X86::BI__builtin_ia32_psrldqi512_byteshift:
4464 case X86::BI__builtin_ia32_kshiftliqi:
4465 case X86::BI__builtin_ia32_kshiftlihi:
4466 case X86::BI__builtin_ia32_kshiftlisi:
4467 case X86::BI__builtin_ia32_kshiftlidi:
4468 case X86::BI__builtin_ia32_kshiftriqi:
4469 case X86::BI__builtin_ia32_kshiftrihi:
4470 case X86::BI__builtin_ia32_kshiftrisi:
4471 case X86::BI__builtin_ia32_kshiftridi:
4472 i = 1; l = 0; u = 255;
4473 break;
4474 case X86::BI__builtin_ia32_vperm2f128_pd256:
4475 case X86::BI__builtin_ia32_vperm2f128_ps256:
4476 case X86::BI__builtin_ia32_vperm2f128_si256:
4477 case X86::BI__builtin_ia32_permti256:
4478 case X86::BI__builtin_ia32_pblendw128:
4479 case X86::BI__builtin_ia32_pblendw256:
4480 case X86::BI__builtin_ia32_blendps256:
4481 case X86::BI__builtin_ia32_pblendd256:
4482 case X86::BI__builtin_ia32_palignr128:
4483 case X86::BI__builtin_ia32_palignr256:
4484 case X86::BI__builtin_ia32_palignr512:
4485 case X86::BI__builtin_ia32_alignq512:
4486 case X86::BI__builtin_ia32_alignd512:
4487 case X86::BI__builtin_ia32_alignd128:
4488 case X86::BI__builtin_ia32_alignd256:
4489 case X86::BI__builtin_ia32_alignq128:
4490 case X86::BI__builtin_ia32_alignq256:
4491 case X86::BI__builtin_ia32_vcomisd:
4492 case X86::BI__builtin_ia32_vcomiss:
4493 case X86::BI__builtin_ia32_shuf_f32x4:
4494 case X86::BI__builtin_ia32_shuf_f64x2:
4495 case X86::BI__builtin_ia32_shuf_i32x4:
4496 case X86::BI__builtin_ia32_shuf_i64x2:
4497 case X86::BI__builtin_ia32_shufpd512:
4498 case X86::BI__builtin_ia32_shufps:
4499 case X86::BI__builtin_ia32_shufps256:
4500 case X86::BI__builtin_ia32_shufps512:
4501 case X86::BI__builtin_ia32_dbpsadbw128:
4502 case X86::BI__builtin_ia32_dbpsadbw256:
4503 case X86::BI__builtin_ia32_dbpsadbw512:
4504 case X86::BI__builtin_ia32_vpshldd128:
4505 case X86::BI__builtin_ia32_vpshldd256:
4506 case X86::BI__builtin_ia32_vpshldd512:
4507 case X86::BI__builtin_ia32_vpshldq128:
4508 case X86::BI__builtin_ia32_vpshldq256:
4509 case X86::BI__builtin_ia32_vpshldq512:
4510 case X86::BI__builtin_ia32_vpshldw128:
4511 case X86::BI__builtin_ia32_vpshldw256:
4512 case X86::BI__builtin_ia32_vpshldw512:
4513 case X86::BI__builtin_ia32_vpshrdd128:
4514 case X86::BI__builtin_ia32_vpshrdd256:
4515 case X86::BI__builtin_ia32_vpshrdd512:
4516 case X86::BI__builtin_ia32_vpshrdq128:
4517 case X86::BI__builtin_ia32_vpshrdq256:
4518 case X86::BI__builtin_ia32_vpshrdq512:
4519 case X86::BI__builtin_ia32_vpshrdw128:
4520 case X86::BI__builtin_ia32_vpshrdw256:
4521 case X86::BI__builtin_ia32_vpshrdw512:
4522 i = 2; l = 0; u = 255;
4523 break;
4524 case X86::BI__builtin_ia32_fixupimmpd512_mask:
4525 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
4526 case X86::BI__builtin_ia32_fixupimmps512_mask:
4527 case X86::BI__builtin_ia32_fixupimmps512_maskz:
4528 case X86::BI__builtin_ia32_fixupimmsd_mask:
4529 case X86::BI__builtin_ia32_fixupimmsd_maskz:
4530 case X86::BI__builtin_ia32_fixupimmss_mask:
4531 case X86::BI__builtin_ia32_fixupimmss_maskz:
4532 case X86::BI__builtin_ia32_fixupimmpd128_mask:
4533 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
4534 case X86::BI__builtin_ia32_fixupimmpd256_mask:
4535 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
4536 case X86::BI__builtin_ia32_fixupimmps128_mask:
4537 case X86::BI__builtin_ia32_fixupimmps128_maskz:
4538 case X86::BI__builtin_ia32_fixupimmps256_mask:
4539 case X86::BI__builtin_ia32_fixupimmps256_maskz:
4540 case X86::BI__builtin_ia32_pternlogd512_mask:
4541 case X86::BI__builtin_ia32_pternlogd512_maskz:
4542 case X86::BI__builtin_ia32_pternlogq512_mask:
4543 case X86::BI__builtin_ia32_pternlogq512_maskz:
4544 case X86::BI__builtin_ia32_pternlogd128_mask:
4545 case X86::BI__builtin_ia32_pternlogd128_maskz:
4546 case X86::BI__builtin_ia32_pternlogd256_mask:
4547 case X86::BI__builtin_ia32_pternlogd256_maskz:
4548 case X86::BI__builtin_ia32_pternlogq128_mask:
4549 case X86::BI__builtin_ia32_pternlogq128_maskz:
4550 case X86::BI__builtin_ia32_pternlogq256_mask:
4551 case X86::BI__builtin_ia32_pternlogq256_maskz:
4552 i = 3; l = 0; u = 255;
4553 break;
4554 case X86::BI__builtin_ia32_gatherpfdpd:
4555 case X86::BI__builtin_ia32_gatherpfdps:
4556 case X86::BI__builtin_ia32_gatherpfqpd:
4557 case X86::BI__builtin_ia32_gatherpfqps:
4558 case X86::BI__builtin_ia32_scatterpfdpd:
4559 case X86::BI__builtin_ia32_scatterpfdps:
4560 case X86::BI__builtin_ia32_scatterpfqpd:
4561 case X86::BI__builtin_ia32_scatterpfqps:
4562 i = 4; l = 2; u = 3;
4563 break;
4564 case X86::BI__builtin_ia32_reducesd_mask:
4565 case X86::BI__builtin_ia32_reducess_mask:
4566 case X86::BI__builtin_ia32_rndscalesd_round_mask:
4567 case X86::BI__builtin_ia32_rndscaless_round_mask:
4568 i = 4; l = 0; u = 255;
4569 break;
4570 }
4571
4572 // Note that we don't force a hard error on the range check here, allowing
4573 // template-generated or macro-generated dead code to potentially have out-of-
4574 // range values. These need to code generate, but don't need to necessarily
4575 // make any sense. We use a warning that defaults to an error.
4576 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
4577}
4578
4579/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
4580/// parameter with the FormatAttr's correct format_idx and firstDataArg.
4581/// Returns true when the format fits the function and the FormatStringInfo has
4582/// been populated.
4583bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
4584 FormatStringInfo *FSI) {
4585 FSI->HasVAListArg = Format->getFirstArg() == 0;
4586 FSI->FormatIdx = Format->getFormatIdx() - 1;
4587 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
4588
4589 // The way the format attribute works in GCC, the implicit this argument
4590 // of member functions is counted. However, it doesn't appear in our own
4591 // lists, so decrement format_idx in that case.
4592 if (IsCXXMember) {
4593 if(FSI->FormatIdx == 0)
4594 return false;
4595 --FSI->FormatIdx;
4596 if (FSI->FirstDataArg != 0)
4597 --FSI->FirstDataArg;
4598 }
4599 return true;
4600}
4601
4602/// Checks if a the given expression evaluates to null.
4603///
4604/// Returns true if the value evaluates to null.
4605static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
4606 // If the expression has non-null type, it doesn't evaluate to null.
4607 if (auto nullability
4608 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
4609 if (*nullability == NullabilityKind::NonNull)
4610 return false;
4611 }
4612
4613 // As a special case, transparent unions initialized with zero are
4614 // considered null for the purposes of the nonnull attribute.
4615 if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
4616 if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
4617 if (const CompoundLiteralExpr *CLE =
4618 dyn_cast<CompoundLiteralExpr>(Expr))
4619 if (const InitListExpr *ILE =
4620 dyn_cast<InitListExpr>(CLE->getInitializer()))
4621 Expr = ILE->getInit(0);
4622 }
4623
4624 bool Result;
4625 return (!Expr->isValueDependent() &&
4626 Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
4627 !Result);
4628}
4629
4630static void CheckNonNullArgument(Sema &S,
4631 const Expr *ArgExpr,
4632 SourceLocation CallSiteLoc) {
4633 if (CheckNonNullExpr(S, ArgExpr))
4634 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
4635 S.PDiag(diag::warn_null_arg)
4636 << ArgExpr->getSourceRange());
4637}
4638
4639bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
4640 FormatStringInfo FSI;
4641 if ((GetFormatStringType(Format) == FST_NSString) &&
4642 getFormatStringInfo(Format, false, &FSI)) {
4643 Idx = FSI.FormatIdx;
4644 return true;
4645 }
4646 return false;
4647}
4648
4649/// Diagnose use of %s directive in an NSString which is being passed
4650/// as formatting string to formatting method.
4651static void
4652DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
4653 const NamedDecl *FDecl,
4654 Expr **Args,
4655 unsigned NumArgs) {
4656 unsigned Idx = 0;
4657 bool Format = false;
4658 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
4659 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
4660 Idx = 2;
4661 Format = true;
4662 }
4663 else
4664 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4665 if (S.GetFormatNSStringIdx(I, Idx)) {
4666 Format = true;
4667 break;
4668 }
4669 }
4670 if (!Format || NumArgs <= Idx)
4671 return;
4672 const Expr *FormatExpr = Args[Idx];
4673 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
4674 FormatExpr = CSCE->getSubExpr();
4675 const StringLiteral *FormatString;
4676 if (const ObjCStringLiteral *OSL =
4677 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
4678 FormatString = OSL->getString();
4679 else
4680 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
4681 if (!FormatString)
4682 return;
4683 if (S.FormatStringHasSArg(FormatString)) {
4684 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
4685 << "%s" << 1 << 1;
4686 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
4687 << FDecl->getDeclName();
4688 }
4689}
4690
4691/// Determine whether the given type has a non-null nullability annotation.
4692static bool isNonNullType(ASTContext &ctx, QualType type) {
4693 if (auto nullability = type->getNullability(ctx))
4694 return *nullability == NullabilityKind::NonNull;
4695
4696 return false;
4697}
4698
4699static void CheckNonNullArguments(Sema &S,
4700 const NamedDecl *FDecl,
4701 const FunctionProtoType *Proto,
4702 ArrayRef<const Expr *> Args,
4703 SourceLocation CallSiteLoc) {
4704 assert((FDecl || Proto) && "Need a function declaration or prototype")((void)0);
4705
4706 // Already checked by by constant evaluator.
4707 if (S.isConstantEvaluated())
4708 return;
4709 // Check the attributes attached to the method/function itself.
4710 llvm::SmallBitVector NonNullArgs;
4711 if (FDecl) {
4712 // Handle the nonnull attribute on the function/method declaration itself.
4713 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
4714 if (!NonNull->args_size()) {
4715 // Easy case: all pointer arguments are nonnull.
4716 for (const auto *Arg : Args)
4717 if (S.isValidPointerAttrType(Arg->getType()))
4718 CheckNonNullArgument(S, Arg, CallSiteLoc);
4719 return;
4720 }
4721
4722 for (const ParamIdx &Idx : NonNull->args()) {
4723 unsigned IdxAST = Idx.getASTIndex();
4724 if (IdxAST >= Args.size())
4725 continue;
4726 if (NonNullArgs.empty())
4727 NonNullArgs.resize(Args.size());
4728 NonNullArgs.set(IdxAST);
4729 }
4730 }
4731 }
4732
4733 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
4734 // Handle the nonnull attribute on the parameters of the
4735 // function/method.
4736 ArrayRef<ParmVarDecl*> parms;
4737 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
4738 parms = FD->parameters();
4739 else
4740 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
4741
4742 unsigned ParamIndex = 0;
4743 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
4744 I != E; ++I, ++ParamIndex) {
4745 const ParmVarDecl *PVD = *I;
4746 if (PVD->hasAttr<NonNullAttr>() ||
4747 isNonNullType(S.Context, PVD->getType())) {
4748 if (NonNullArgs.empty())
4749 NonNullArgs.resize(Args.size());
4750
4751 NonNullArgs.set(ParamIndex);
4752 }
4753 }
4754 } else {
4755 // If we have a non-function, non-method declaration but no
4756 // function prototype, try to dig out the function prototype.
4757 if (!Proto) {
4758 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
4759 QualType type = VD->getType().getNonReferenceType();
4760 if (auto pointerType = type->getAs<PointerType>())
4761 type = pointerType->getPointeeType();
4762 else if (auto blockType = type->getAs<BlockPointerType>())
4763 type = blockType->getPointeeType();
4764 // FIXME: data member pointers?
4765
4766 // Dig out the function prototype, if there is one.
4767 Proto = type->getAs<FunctionProtoType>();
4768 }
4769 }
4770
4771 // Fill in non-null argument information from the nullability
4772 // information on the parameter types (if we have them).
4773 if (Proto) {
4774 unsigned Index = 0;
4775 for (auto paramType : Proto->getParamTypes()) {
4776 if (isNonNullType(S.Context, paramType)) {
4777 if (NonNullArgs.empty())
4778 NonNullArgs.resize(Args.size());
4779
4780 NonNullArgs.set(Index);
4781 }
4782
4783 ++Index;
4784 }
4785 }
4786 }
4787
4788 // Check for non-null arguments.
4789 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
4790 ArgIndex != ArgIndexEnd; ++ArgIndex) {
4791 if (NonNullArgs[ArgIndex])
4792 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
4793 }
4794}
4795
4796/// Warn if a pointer or reference argument passed to a function points to an
4797/// object that is less aligned than the parameter. This can happen when
4798/// creating a typedef with a lower alignment than the original type and then
4799/// calling functions defined in terms of the original type.
4800void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
4801 StringRef ParamName, QualType ArgTy,
4802 QualType ParamTy) {
4803
4804 // If a function accepts a pointer or reference type
4805 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
4806 return;
4807
4808 // If the parameter is a pointer type, get the pointee type for the
4809 // argument too. If the parameter is a reference type, don't try to get
4810 // the pointee type for the argument.
4811 if (ParamTy->isPointerType())
4812 ArgTy = ArgTy->getPointeeType();
4813
4814 // Remove reference or pointer
4815 ParamTy = ParamTy->getPointeeType();
4816
4817 // Find expected alignment, and the actual alignment of the passed object.
4818 // getTypeAlignInChars requires complete types
4819 if (ArgTy.isNull() || ParamTy->isIncompleteType() ||
4820 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() ||
4821 ArgTy->isUndeducedType())
4822 return;
4823
4824 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
4825 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
4826
4827 // If the argument is less aligned than the parameter, there is a
4828 // potential alignment issue.
4829 if (ArgAlign < ParamAlign)
4830 Diag(Loc, diag::warn_param_mismatched_alignment)
4831 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
4832 << ParamName << FDecl;
4833}
4834
4835/// Handles the checks for format strings, non-POD arguments to vararg
4836/// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
4837/// attributes.
4838void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
4839 const Expr *ThisArg, ArrayRef<const Expr *> Args,
4840 bool IsMemberFunction, SourceLocation Loc,
4841 SourceRange Range, VariadicCallType CallType) {
4842 // FIXME: We should check as much as we can in the template definition.
4843 if (CurContext->isDependentContext())
4844 return;
4845
4846 // Printf and scanf checking.
4847 llvm::SmallBitVector CheckedVarArgs;
4848 if (FDecl) {
4849 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4850 // Only create vector if there are format attributes.
4851 CheckedVarArgs.resize(Args.size());
4852
4853 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
4854 CheckedVarArgs);
4855 }
4856 }
4857
4858 // Refuse POD arguments that weren't caught by the format string
4859 // checks above.
4860 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
4861 if (CallType != VariadicDoesNotApply &&
4862 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
4863 unsigned NumParams = Proto ? Proto->getNumParams()
4864 : FDecl && isa<FunctionDecl>(FDecl)
4865 ? cast<FunctionDecl>(FDecl)->getNumParams()
4866 : FDecl && isa<ObjCMethodDecl>(FDecl)
4867 ? cast<ObjCMethodDecl>(FDecl)->param_size()
4868 : 0;
4869
4870 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
4871 // Args[ArgIdx] can be null in malformed code.
4872 if (const Expr *Arg = Args[ArgIdx]) {
4873 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
4874 checkVariadicArgument(Arg, CallType);
4875 }
4876 }
4877 }
4878
4879 if (FDecl || Proto) {
4880 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
4881
4882 // Type safety checking.
4883 if (FDecl) {
4884 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
4885 CheckArgumentWithTypeTag(I, Args, Loc);
4886 }
4887 }
4888
4889 // Check that passed arguments match the alignment of original arguments.
4890 // Try to get the missing prototype from the declaration.
4891 if (!Proto && FDecl) {
4892 const auto *FT = FDecl->getFunctionType();
4893 if (isa_and_nonnull<FunctionProtoType>(FT))
4894 Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
4895 }
4896 if (Proto) {
4897 // For variadic functions, we may have more args than parameters.
4898 // For some K&R functions, we may have less args than parameters.
4899 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
4900 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
4901 // Args[ArgIdx] can be null in malformed code.
4902 if (const Expr *Arg = Args[ArgIdx]) {
4903 if (Arg->containsErrors())
4904 continue;
4905
4906 QualType ParamTy = Proto->getParamType(ArgIdx);
4907 QualType ArgTy = Arg->getType();
4908 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
4909 ArgTy, ParamTy);
4910 }
4911 }
4912 }
4913
4914 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
4915 auto *AA = FDecl->getAttr<AllocAlignAttr>();
4916 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
4917 if (!Arg->isValueDependent()) {
4918 Expr::EvalResult Align;
4919 if (Arg->EvaluateAsInt(Align, Context)) {
4920 const llvm::APSInt &I = Align.Val.getInt();
4921 if (!I.isPowerOf2())
4922 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
4923 << Arg->getSourceRange();
4924
4925 if (I > Sema::MaximumAlignment)
4926 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
4927 << Arg->getSourceRange() << Sema::MaximumAlignment;
4928 }
4929 }
4930 }
4931
4932 if (FD)
4933 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
4934}
4935
4936/// CheckConstructorCall - Check a constructor call for correctness and safety
4937/// properties not enforced by the C type system.
4938void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
4939 ArrayRef<const Expr *> Args,
4940 const FunctionProtoType *Proto,
4941 SourceLocation Loc) {
4942 VariadicCallType CallType =
4943 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
4944
4945 auto *Ctor = cast<CXXConstructorDecl>(FDecl);
4946 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType),
4947 Context.getPointerType(Ctor->getThisObjectType()));
4948
4949 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
4950 Loc, SourceRange(), CallType);
4951}
4952
4953/// CheckFunctionCall - Check a direct function call for various correctness
4954/// and safety properties not strictly enforced by the C type system.
4955bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
4956 const FunctionProtoType *Proto) {
4957 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
1
Assuming 'TheCall' is not a 'CXXOperatorCallExpr'
4958 isa<CXXMethodDecl>(FDecl);
4959 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
2
Assuming 'TheCall' is not a 'CXXMemberCallExpr'
4960 IsMemberOperatorCall;
4961 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
4962 TheCall->getCallee());
4963 Expr** Args = TheCall->getArgs();
4964 unsigned NumArgs = TheCall->getNumArgs();
4965
4966 Expr *ImplicitThis = nullptr;
4967 if (IsMemberOperatorCall
2.1
'IsMemberOperatorCall' is false
) {
3
Taking false branch
4968 // If this is a call to a member operator, hide the first argument
4969 // from checkCall.
4970 // FIXME: Our choice of AST representation here is less than ideal.
4971 ImplicitThis = Args[0];
4972 ++Args;
4973 --NumArgs;
4974 } else if (IsMemberFunction
3.1
'IsMemberFunction' is false
)
4
Taking false branch
4975 ImplicitThis =
4976 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
4977
4978 if (ImplicitThis
4.1
'ImplicitThis' is null
) {
5
Taking false branch
4979 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
4980 // used.
4981 QualType ThisType = ImplicitThis->getType();
4982 if (!ThisType->isPointerType()) {
4983 assert(!ThisType->isReferenceType())((void)0);
4984 ThisType = Context.getPointerType(ThisType);
4985 }
4986
4987 QualType ThisTypeFromDecl =
4988 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType());
4989
4990 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
4991 ThisTypeFromDecl);
4992 }
4993
4994 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
4995 IsMemberFunction, TheCall->getRParenLoc(),
4996 TheCall->getCallee()->getSourceRange(), CallType);
4997
4998 IdentifierInfo *FnInfo = FDecl->getIdentifier();
4999 // None of the checks below are needed for functions that don't have
5000 // simple names (e.g., C++ conversion functions).
5001 if (!FnInfo)
6
Assuming 'FnInfo' is non-null
7
Taking false branch
5002 return false;
5003
5004 CheckTCBEnforcement(TheCall, FDecl);
5005
5006 CheckAbsoluteValueFunction(TheCall, FDecl);
5007 CheckMaxUnsignedZero(TheCall, FDecl);
5008
5009 if (getLangOpts().ObjC)
8
Assuming field 'ObjC' is 0
9
Taking false branch
5010 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
5011
5012 unsigned CMId = FDecl->getMemoryFunctionKind();
5013
5014 // Handle memory setting and copying functions.
5015 switch (CMId) {
10
Control jumps to 'case BIfree:' at line 5025
5016 case 0:
5017 return false;
5018 case Builtin::BIstrlcpy: // fallthrough
5019 case Builtin::BIstrlcat:
5020 CheckStrlcpycatArguments(TheCall, FnInfo);
5021 break;
5022 case Builtin::BIstrncat:
5023 CheckStrncatArguments(TheCall, FnInfo);
5024 break;
5025 case Builtin::BIfree:
5026 CheckFreeArguments(TheCall);
11
Calling 'Sema::CheckFreeArguments'
5027 break;
5028 default:
5029 CheckMemaccessArguments(TheCall, CMId, FnInfo);
5030 }
5031
5032 return false;
5033}
5034
5035bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
5036 ArrayRef<const Expr *> Args) {
5037 VariadicCallType CallType =
5038 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
5039
5040 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
5041 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
5042 CallType);
5043
5044 return false;
5045}
5046
5047bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
5048 const FunctionProtoType *Proto) {
5049 QualType Ty;
5050 if (const auto *V = dyn_cast<VarDecl>(NDecl))
5051 Ty = V->getType().getNonReferenceType();
5052 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
5053 Ty = F->getType().getNonReferenceType();
5054 else
5055 return false;
5056
5057 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
5058 !Ty->isFunctionProtoType())
5059 return false;
5060
5061 VariadicCallType CallType;
5062 if (!Proto || !Proto->isVariadic()) {
5063 CallType = VariadicDoesNotApply;
5064 } else if (Ty->isBlockPointerType()) {
5065 CallType = VariadicBlock;
5066 } else { // Ty->isFunctionPointerType()
5067 CallType = VariadicFunction;
5068 }
5069
5070 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
5071 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
5072 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
5073 TheCall->getCallee()->getSourceRange(), CallType);
5074
5075 return false;
5076}
5077
5078/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
5079/// such as function pointers returned from functions.
5080bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
5081 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
5082 TheCall->getCallee());
5083 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
5084 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
5085 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
5086 TheCall->getCallee()->getSourceRange(), CallType);
5087
5088 return false;
5089}
5090
5091static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
5092 if (!llvm::isValidAtomicOrderingCABI(Ordering))
5093 return false;
5094
5095 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
5096 switch (Op) {
5097 case AtomicExpr::AO__c11_atomic_init:
5098 case AtomicExpr::AO__opencl_atomic_init:
5099 llvm_unreachable("There is no ordering argument for an init")__builtin_unreachable();
5100
5101 case AtomicExpr::AO__c11_atomic_load:
5102 case AtomicExpr::AO__opencl_atomic_load:
5103 case AtomicExpr::AO__atomic_load_n:
5104 case AtomicExpr::AO__atomic_load:
5105 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
5106 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
5107
5108 case AtomicExpr::AO__c11_atomic_store:
5109 case AtomicExpr::AO__opencl_atomic_store:
5110 case AtomicExpr::AO__atomic_store:
5111 case AtomicExpr::AO__atomic_store_n:
5112 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
5113 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
5114 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
5115
5116 default:
5117 return true;
5118 }
5119}
5120
5121ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
5122 AtomicExpr::AtomicOp Op) {
5123 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
5124 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5125 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
5126 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()},
5127 DRE->getSourceRange(), TheCall->getRParenLoc(), Args,
5128 Op);
5129}
5130
5131ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
5132 SourceLocation RParenLoc, MultiExprArg Args,
5133 AtomicExpr::AtomicOp Op,
5134 AtomicArgumentOrder ArgOrder) {
5135 // All the non-OpenCL operations take one of the following forms.
5136 // The OpenCL operations take the __c11 forms with one extra argument for
5137 // synchronization scope.
5138 enum {
5139 // C __c11_atomic_init(A *, C)
5140 Init,
5141
5142 // C __c11_atomic_load(A *, int)
5143 Load,
5144
5145 // void __atomic_load(A *, CP, int)
5146 LoadCopy,
5147
5148 // void __atomic_store(A *, CP, int)
5149 Copy,
5150
5151 // C __c11_atomic_add(A *, M, int)
5152 Arithmetic,
5153
5154 // C __atomic_exchange_n(A *, CP, int)
5155 Xchg,
5156
5157 // void __atomic_exchange(A *, C *, CP, int)
5158 GNUXchg,
5159
5160 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
5161 C11CmpXchg,
5162
5163 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
5164 GNUCmpXchg
5165 } Form = Init;
5166
5167 const unsigned NumForm = GNUCmpXchg + 1;
5168 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
5169 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
5170 // where:
5171 // C is an appropriate type,
5172 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
5173 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
5174 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
5175 // the int parameters are for orderings.
5176
5177 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
5178 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
5179 "need to update code for modified forms");
5180 static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
5181 AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
5182 AtomicExpr::AO__atomic_load,
5183 "need to update code for modified C11 atomics");
5184 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
5185 Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
5186 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
5187 Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
5188 IsOpenCL;
5189 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
5190 Op == AtomicExpr::AO__atomic_store_n ||
5191 Op == AtomicExpr::AO__atomic_exchange_n ||
5192 Op == AtomicExpr::AO__atomic_compare_exchange_n;
5193 bool IsAddSub = false;
5194
5195 switch (Op) {
5196 case AtomicExpr::AO__c11_atomic_init:
5197 case AtomicExpr::AO__opencl_atomic_init:
5198 Form = Init;
5199 break;
5200
5201 case AtomicExpr::AO__c11_atomic_load:
5202 case AtomicExpr::AO__opencl_atomic_load:
5203 case AtomicExpr::AO__atomic_load_n:
5204 Form = Load;
5205 break;
5206
5207 case AtomicExpr::AO__atomic_load:
5208 Form = LoadCopy;
5209 break;
5210
5211 case AtomicExpr::AO__c11_atomic_store:
5212 case AtomicExpr::AO__opencl_atomic_store:
5213 case AtomicExpr::AO__atomic_store:
5214 case AtomicExpr::AO__atomic_store_n:
5215 Form = Copy;
5216 break;
5217
5218 case AtomicExpr::AO__c11_atomic_fetch_add:
5219 case AtomicExpr::AO__c11_atomic_fetch_sub:
5220 case AtomicExpr::AO__opencl_atomic_fetch_add:
5221 case AtomicExpr::AO__opencl_atomic_fetch_sub:
5222 case AtomicExpr::AO__atomic_fetch_add:
5223 case AtomicExpr::AO__atomic_fetch_sub:
5224 case AtomicExpr::AO__atomic_add_fetch:
5225 case AtomicExpr::AO__atomic_sub_fetch:
5226 IsAddSub = true;
5227 Form = Arithmetic;
5228 break;
5229 case AtomicExpr::AO__c11_atomic_fetch_and:
5230 case AtomicExpr::AO__c11_atomic_fetch_or:
5231 case AtomicExpr::AO__c11_atomic_fetch_xor:
5232 case AtomicExpr::AO__opencl_atomic_fetch_and:
5233 case AtomicExpr::AO__opencl_atomic_fetch_or:
5234 case AtomicExpr::AO__opencl_atomic_fetch_xor:
5235 case AtomicExpr::AO__atomic_fetch_and:
5236 case AtomicExpr::AO__atomic_fetch_or:
5237 case AtomicExpr::AO__atomic_fetch_xor:
5238 case AtomicExpr::AO__atomic_fetch_nand:
5239 case AtomicExpr::AO__atomic_and_fetch:
5240 case AtomicExpr::AO__atomic_or_fetch:
5241 case AtomicExpr::AO__atomic_xor_fetch:
5242 case AtomicExpr::AO__atomic_nand_fetch:
5243 Form = Arithmetic;
5244 break;
5245 case AtomicExpr::AO__c11_atomic_fetch_min:
5246 case AtomicExpr::AO__c11_atomic_fetch_max:
5247 case AtomicExpr::AO__opencl_atomic_fetch_min:
5248 case AtomicExpr::AO__opencl_atomic_fetch_max:
5249 case AtomicExpr::AO__atomic_min_fetch:
5250 case AtomicExpr::AO__atomic_max_fetch:
5251 case AtomicExpr::AO__atomic_fetch_min:
5252 case AtomicExpr::AO__atomic_fetch_max:
5253 Form = Arithmetic;
5254 break;
5255
5256 case AtomicExpr::AO__c11_atomic_exchange:
5257 case AtomicExpr::AO__opencl_atomic_exchange:
5258 case AtomicExpr::AO__atomic_exchange_n:
5259 Form = Xchg;
5260 break;
5261
5262 case AtomicExpr::AO__atomic_exchange:
5263 Form = GNUXchg;
5264 break;
5265
5266 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
5267 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
5268 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
5269 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
5270 Form = C11CmpXchg;
5271 break;
5272
5273 case AtomicExpr::AO__atomic_compare_exchange:
5274 case AtomicExpr::AO__atomic_compare_exchange_n:
5275 Form = GNUCmpXchg;
5276 break;
5277 }
5278
5279 unsigned AdjustedNumArgs = NumArgs[Form];
5280 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
5281 ++AdjustedNumArgs;
5282 // Check we have the right number of arguments.
5283 if (Args.size() < AdjustedNumArgs) {
5284 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
5285 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
5286 << ExprRange;
5287 return ExprError();
5288 } else if (Args.size() > AdjustedNumArgs) {
5289 Diag(Args[AdjustedNumArgs]->getBeginLoc(),
5290 diag::err_typecheck_call_too_many_args)
5291 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
5292 << ExprRange;
5293 return ExprError();
5294 }
5295
5296 // Inspect the first argument of the atomic operation.
5297 Expr *Ptr = Args[0];
5298 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
5299 if (ConvertedPtr.isInvalid())
5300 return ExprError();
5301
5302 Ptr = ConvertedPtr.get();
5303 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
5304 if (!pointerType) {
5305 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
5306 << Ptr->getType() << Ptr->getSourceRange();
5307 return ExprError();
5308 }
5309
5310 // For a __c11 builtin, this should be a pointer to an _Atomic type.
5311 QualType AtomTy = pointerType->getPointeeType(); // 'A'
5312 QualType ValType = AtomTy; // 'C'
5313 if (IsC11) {
5314 if (!AtomTy->isAtomicType()) {
5315 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
5316 << Ptr->getType() << Ptr->getSourceRange();
5317 return ExprError();
5318 }
5319 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
5320 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
5321 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
5322 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
5323 << Ptr->getSourceRange();
5324 return ExprError();
5325 }
5326 ValType = AtomTy->castAs<AtomicType>()->getValueType();
5327 } else if (Form != Load && Form != LoadCopy) {
5328 if (ValType.isConstQualified()) {
5329 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
5330 << Ptr->getType() << Ptr->getSourceRange();
5331 return ExprError();
5332 }
5333 }
5334
5335 // For an arithmetic operation, the implied arithmetic must be well-formed.
5336 if (Form == Arithmetic) {
5337 // gcc does not enforce these rules for GNU atomics, but we do so for
5338 // sanity.
5339 auto IsAllowedValueType = [&](QualType ValType) {
5340 if (ValType->isIntegerType())
5341 return true;
5342 if (ValType->isPointerType())
5343 return true;
5344 if (!ValType->isFloatingType())
5345 return false;
5346 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
5347 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
5348 &Context.getTargetInfo().getLongDoubleFormat() ==
5349 &llvm::APFloat::x87DoubleExtended())
5350 return false;
5351 return true;
5352 };
5353 if (IsAddSub && !IsAllowedValueType(ValType)) {
5354 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp)
5355 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5356 return ExprError();
5357 }
5358 if (!IsAddSub && !ValType->isIntegerType()) {
5359 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int)
5360 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5361 return ExprError();
5362 }
5363 if (IsC11 && ValType->isPointerType() &&
5364 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
5365 diag::err_incomplete_type)) {
5366 return ExprError();
5367 }
5368 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
5369 // For __atomic_*_n operations, the value type must be a scalar integral or
5370 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
5371 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
5372 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5373 return ExprError();
5374 }
5375
5376 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
5377 !AtomTy->isScalarType()) {
5378 // For GNU atomics, require a trivially-copyable type. This is not part of
5379 // the GNU atomics specification, but we enforce it for sanity.
5380 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
5381 << Ptr->getType() << Ptr->getSourceRange();
5382 return ExprError();
5383 }
5384
5385 switch (ValType.getObjCLifetime()) {
5386 case Qualifiers::OCL_None:
5387 case Qualifiers::OCL_ExplicitNone:
5388 // okay
5389 break;
5390
5391 case Qualifiers::OCL_Weak:
5392 case Qualifiers::OCL_Strong:
5393 case Qualifiers::OCL_Autoreleasing:
5394 // FIXME: Can this happen? By this point, ValType should be known
5395 // to be trivially copyable.
5396 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
5397 << ValType << Ptr->getSourceRange();
5398 return ExprError();
5399 }
5400
5401 // All atomic operations have an overload which takes a pointer to a volatile
5402 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
5403 // into the result or the other operands. Similarly atomic_load takes a
5404 // pointer to a const 'A'.
5405 ValType.removeLocalVolatile();
5406 ValType.removeLocalConst();
5407 QualType ResultType = ValType;
5408 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
5409 Form == Init)
5410 ResultType = Context.VoidTy;
5411 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
5412 ResultType = Context.BoolTy;
5413
5414 // The type of a parameter passed 'by value'. In the GNU atomics, such
5415 // arguments are actually passed as pointers.
5416 QualType ByValType = ValType; // 'CP'
5417 bool IsPassedByAddress = false;
5418 if (!IsC11 && !IsN) {
5419 ByValType = Ptr->getType();
5420 IsPassedByAddress = true;
5421 }
5422
5423 SmallVector<Expr *, 5> APIOrderedArgs;
5424 if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
5425 APIOrderedArgs.push_back(Args[0]);
5426 switch (Form) {
5427 case Init:
5428 case Load:
5429 APIOrderedArgs.push_back(Args[1]); // Val1/Order
5430 break;
5431 case LoadCopy:
5432 case Copy:
5433 case Arithmetic:
5434 case Xchg:
5435 APIOrderedArgs.push_back(Args[2]); // Val1
5436 APIOrderedArgs.push_back(Args[1]); // Order
5437 break;
5438 case GNUXchg:
5439 APIOrderedArgs.push_back(Args[2]); // Val1
5440 APIOrderedArgs.push_back(Args[3]); // Val2
5441 APIOrderedArgs.push_back(Args[1]); // Order
5442 break;
5443 case C11CmpXchg:
5444 APIOrderedArgs.push_back(Args[2]); // Val1
5445 APIOrderedArgs.push_back(Args[4]); // Val2
5446 APIOrderedArgs.push_back(Args[1]); // Order
5447 APIOrderedArgs.push_back(Args[3]); // OrderFail
5448 break;
5449 case GNUCmpXchg:
5450 APIOrderedArgs.push_back(Args[2]); // Val1
5451 APIOrderedArgs.push_back(Args[4]); // Val2
5452 APIOrderedArgs.push_back(Args[5]); // Weak
5453 APIOrderedArgs.push_back(Args[1]); // Order
5454 APIOrderedArgs.push_back(Args[3]); // OrderFail
5455 break;
5456 }
5457 } else
5458 APIOrderedArgs.append(Args.begin(), Args.end());
5459
5460 // The first argument's non-CV pointer type is used to deduce the type of
5461 // subsequent arguments, except for:
5462 // - weak flag (always converted to bool)
5463 // - memory order (always converted to int)
5464 // - scope (always converted to int)
5465 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
5466 QualType Ty;
5467 if (i < NumVals[Form] + 1) {
5468 switch (i) {
5469 case 0:
5470 // The first argument is always a pointer. It has a fixed type.
5471 // It is always dereferenced, a nullptr is undefined.
5472 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
5473 // Nothing else to do: we already know all we want about this pointer.
5474 continue;
5475 case 1:
5476 // The second argument is the non-atomic operand. For arithmetic, this
5477 // is always passed by value, and for a compare_exchange it is always
5478 // passed by address. For the rest, GNU uses by-address and C11 uses
5479 // by-value.
5480 assert(Form != Load)((void)0);
5481 if (Form == Arithmetic && ValType->isPointerType())
5482 Ty = Context.getPointerDiffType();
5483 else if (Form == Init || Form == Arithmetic)
5484 Ty = ValType;
5485 else if (Form == Copy || Form == Xchg) {
5486 if (IsPassedByAddress) {
5487 // The value pointer is always dereferenced, a nullptr is undefined.
5488 CheckNonNullArgument(*this, APIOrderedArgs[i],
5489 ExprRange.getBegin());
5490 }
5491 Ty = ByValType;
5492 } else {
5493 Expr *ValArg = APIOrderedArgs[i];
5494 // The value pointer is always dereferenced, a nullptr is undefined.
5495 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin());
5496 LangAS AS = LangAS::Default;
5497 // Keep address space of non-atomic pointer type.
5498 if (const PointerType *PtrTy =
5499 ValArg->getType()->getAs<PointerType>()) {
5500 AS = PtrTy->getPointeeType().getAddressSpace();
5501 }
5502 Ty = Context.getPointerType(
5503 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
5504 }
5505 break;
5506 case 2:
5507 // The third argument to compare_exchange / GNU exchange is the desired
5508 // value, either by-value (for the C11 and *_n variant) or as a pointer.
5509 if (IsPassedByAddress)
5510 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
5511 Ty = ByValType;
5512 break;
5513 case 3:
5514 // The fourth argument to GNU compare_exchange is a 'weak' flag.
5515 Ty = Context.BoolTy;
5516 break;
5517 }
5518 } else {
5519 // The order(s) and scope are always converted to int.
5520 Ty = Context.IntTy;
5521 }
5522
5523 InitializedEntity Entity =
5524 InitializedEntity::InitializeParameter(Context, Ty, false);
5525 ExprResult Arg = APIOrderedArgs[i];
5526 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5527 if (Arg.isInvalid())
5528 return true;
5529 APIOrderedArgs[i] = Arg.get();
5530 }
5531
5532 // Permute the arguments into a 'consistent' order.
5533 SmallVector<Expr*, 5> SubExprs;
5534 SubExprs.push_back(Ptr);
5535 switch (Form) {
5536 case Init:
5537 // Note, AtomicExpr::getVal1() has a special case for this atomic.
5538 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5539 break;
5540 case Load:
5541 SubExprs.push_back(APIOrderedArgs[1]); // Order
5542 break;
5543 case LoadCopy:
5544 case Copy:
5545 case Arithmetic:
5546 case Xchg:
5547 SubExprs.push_back(APIOrderedArgs[2]); // Order
5548 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5549 break;
5550 case GNUXchg:
5551 // Note, AtomicExpr::getVal2() has a special case for this atomic.
5552 SubExprs.push_back(APIOrderedArgs[3]); // Order
5553 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5554 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5555 break;
5556 case C11CmpXchg:
5557 SubExprs.push_back(APIOrderedArgs[3]); // Order
5558 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5559 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail
5560 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5561 break;
5562 case GNUCmpXchg:
5563 SubExprs.push_back(APIOrderedArgs[4]); // Order
5564 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5565 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail
5566 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5567 SubExprs.push_back(APIOrderedArgs[3]); // Weak
5568 break;
5569 }
5570
5571 if (SubExprs.size() >= 2 && Form != Init) {
5572 if (Optional<llvm::APSInt> Result =
5573 SubExprs[1]->getIntegerConstantExpr(Context))
5574 if (!isValidOrderingForOp(Result->getSExtValue(), Op))
5575 Diag(SubExprs[1]->getBeginLoc(),
5576 diag::warn_atomic_op_has_invalid_memory_order)
5577 << SubExprs[1]->getSourceRange();
5578 }
5579
5580 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
5581 auto *Scope = Args[Args.size() - 1];
5582 if (Optional<llvm::APSInt> Result =
5583 Scope->getIntegerConstantExpr(Context)) {
5584 if (!ScopeModel->isValid(Result->getZExtValue()))
5585 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
5586 << Scope->getSourceRange();
5587 }
5588 SubExprs.push_back(Scope);
5589 }
5590
5591 AtomicExpr *AE = new (Context)
5592 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
5593
5594 if ((Op == AtomicExpr::AO__c11_atomic_load ||
5595 Op == AtomicExpr::AO__c11_atomic_store ||
5596 Op == AtomicExpr::AO__opencl_atomic_load ||
5597 Op == AtomicExpr::AO__opencl_atomic_store ) &&
5598 Context.AtomicUsesUnsupportedLibcall(AE))
5599 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
5600 << ((Op == AtomicExpr::AO__c11_atomic_load ||
5601 Op == AtomicExpr::AO__opencl_atomic_load)
5602 ? 0
5603 : 1);
5604
5605 if (ValType->isExtIntType()) {
5606 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit);
5607 return ExprError();
5608 }
5609
5610 return AE;
5611}
5612
5613/// checkBuiltinArgument - Given a call to a builtin function, perform
5614/// normal type-checking on the given argument, updating the call in
5615/// place. This is useful when a builtin function requires custom
5616/// type-checking for some of its arguments but not necessarily all of
5617/// them.
5618///
5619/// Returns true on error.
5620static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
5621 FunctionDecl *Fn = E->getDirectCallee();
5622 assert(Fn && "builtin call without direct callee!")((void)0);
5623
5624 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
5625 InitializedEntity Entity =
5626 InitializedEntity::InitializeParameter(S.Context, Param);
5627
5628 ExprResult Arg = E->getArg(0);
5629 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
5630 if (Arg.isInvalid())
5631 return true;
5632
5633 E->setArg(ArgIndex, Arg.get());
5634 return false;
5635}
5636
5637/// We have a call to a function like __sync_fetch_and_add, which is an
5638/// overloaded function based on the pointer type of its first argument.
5639/// The main BuildCallExpr routines have already promoted the types of
5640/// arguments because all of these calls are prototyped as void(...).
5641///
5642/// This function goes through and does final semantic checking for these
5643/// builtins, as well as generating any warnings.
5644ExprResult
5645Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
5646 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
5647 Expr *Callee = TheCall->getCallee();
5648 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
5649 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5650
5651 // Ensure that we have at least one argument to do type inference from.
5652 if (TheCall->getNumArgs() < 1) {
5653 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5654 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange();
5655 return ExprError();
5656 }
5657
5658 // Inspect the first argument of the atomic builtin. This should always be
5659 // a pointer type, whose element is an integral scalar or pointer type.
5660 // Because it is a pointer type, we don't have to worry about any implicit
5661 // casts here.
5662 // FIXME: We don't allow floating point scalars as input.
5663 Expr *FirstArg = TheCall->getArg(0);
5664 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
5665 if (FirstArgResult.isInvalid())
5666 return ExprError();
5667 FirstArg = FirstArgResult.get();
5668 TheCall->setArg(0, FirstArg);
5669
5670 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
5671 if (!pointerType) {
5672 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
5673 << FirstArg->getType() << FirstArg->getSourceRange();
5674 return ExprError();
5675 }
5676
5677 QualType ValType = pointerType->getPointeeType();
5678 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
5679 !ValType->isBlockPointerType()) {
5680 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
5681 << FirstArg->getType() << FirstArg->getSourceRange();
5682 return ExprError();
5683 }
5684
5685 if (ValType.isConstQualified()) {
5686 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
5687 << FirstArg->getType() << FirstArg->getSourceRange();
5688 return ExprError();
5689 }
5690
5691 switch (ValType.getObjCLifetime()) {
5692 case Qualifiers::OCL_None:
5693 case Qualifiers::OCL_ExplicitNone:
5694 // okay
5695 break;
5696
5697 case Qualifiers::OCL_Weak:
5698 case Qualifiers::OCL_Strong:
5699 case Qualifiers::OCL_Autoreleasing:
5700 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
5701 << ValType << FirstArg->getSourceRange();
5702 return ExprError();
5703 }
5704
5705 // Strip any qualifiers off ValType.
5706 ValType = ValType.getUnqualifiedType();
5707
5708 // The majority of builtins return a value, but a few have special return
5709 // types, so allow them to override appropriately below.
5710 QualType ResultType = ValType;
5711
5712 // We need to figure out which concrete builtin this maps onto. For example,
5713 // __sync_fetch_and_add with a 2 byte object turns into
5714 // __sync_fetch_and_add_2.
5715#define BUILTIN_ROW(x) \
5716 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
5717 Builtin::BI##x##_8, Builtin::BI##x##_16 }
5718
5719 static const unsigned BuiltinIndices[][5] = {
5720 BUILTIN_ROW(__sync_fetch_and_add),
5721 BUILTIN_ROW(__sync_fetch_and_sub),
5722 BUILTIN_ROW(__sync_fetch_and_or),
5723 BUILTIN_ROW(__sync_fetch_and_and),
5724 BUILTIN_ROW(__sync_fetch_and_xor),
5725 BUILTIN_ROW(__sync_fetch_and_nand),
5726
5727 BUILTIN_ROW(__sync_add_and_fetch),
5728 BUILTIN_ROW(__sync_sub_and_fetch),
5729 BUILTIN_ROW(__sync_and_and_fetch),
5730 BUILTIN_ROW(__sync_or_and_fetch),
5731 BUILTIN_ROW(__sync_xor_and_fetch),
5732 BUILTIN_ROW(__sync_nand_and_fetch),
5733
5734 BUILTIN_ROW(__sync_val_compare_and_swap),
5735 BUILTIN_ROW(__sync_bool_compare_and_swap),
5736 BUILTIN_ROW(__sync_lock_test_and_set),
5737 BUILTIN_ROW(__sync_lock_release),
5738 BUILTIN_ROW(__sync_swap)
5739 };
5740#undef BUILTIN_ROW
5741
5742 // Determine the index of the size.
5743 unsigned SizeIndex;
5744 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
5745 case 1: SizeIndex = 0; break;
5746 case 2: SizeIndex = 1; break;
5747 case 4: SizeIndex = 2; break;
5748 case 8: SizeIndex = 3; break;
5749 case 16: SizeIndex = 4; break;
5750 default:
5751 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
5752 << FirstArg->getType() << FirstArg->getSourceRange();
5753 return ExprError();
5754 }
5755
5756 // Each of these builtins has one pointer argument, followed by some number of
5757 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
5758 // that we ignore. Find out which row of BuiltinIndices to read from as well
5759 // as the number of fixed args.
5760 unsigned BuiltinID = FDecl->getBuiltinID();
5761 unsigned BuiltinIndex, NumFixed = 1;
5762 bool WarnAboutSemanticsChange = false;
5763 switch (BuiltinID) {
5764 default: llvm_unreachable("Unknown overloaded atomic builtin!")__builtin_unreachable();
5765 case Builtin::BI__sync_fetch_and_add:
5766 case Builtin::BI__sync_fetch_and_add_1:
5767 case Builtin::BI__sync_fetch_and_add_2:
5768 case Builtin::BI__sync_fetch_and_add_4:
5769 case Builtin::BI__sync_fetch_and_add_8:
5770 case Builtin::BI__sync_fetch_and_add_16:
5771 BuiltinIndex = 0;
5772 break;
5773
5774 case Builtin::BI__sync_fetch_and_sub:
5775 case Builtin::BI__sync_fetch_and_sub_1:
5776 case Builtin::BI__sync_fetch_and_sub_2:
5777 case Builtin::BI__sync_fetch_and_sub_4:
5778 case Builtin::BI__sync_fetch_and_sub_8:
5779 case Builtin::BI__sync_fetch_and_sub_16:
5780 BuiltinIndex = 1;
5781 break;
5782
5783 case Builtin::BI__sync_fetch_and_or:
5784 case Builtin::BI__sync_fetch_and_or_1:
5785 case Builtin::BI__sync_fetch_and_or_2:
5786 case Builtin::BI__sync_fetch_and_or_4:
5787 case Builtin::BI__sync_fetch_and_or_8:
5788 case Builtin::BI__sync_fetch_and_or_16:
5789 BuiltinIndex = 2;
5790 break;
5791
5792 case Builtin::BI__sync_fetch_and_and:
5793 case Builtin::BI__sync_fetch_and_and_1:
5794 case Builtin::BI__sync_fetch_and_and_2:
5795 case Builtin::BI__sync_fetch_and_and_4:
5796 case Builtin::BI__sync_fetch_and_and_8:
5797 case Builtin::BI__sync_fetch_and_and_16:
5798 BuiltinIndex = 3;
5799 break;
5800
5801 case Builtin::BI__sync_fetch_and_xor:
5802 case Builtin::BI__sync_fetch_and_xor_1:
5803 case Builtin::BI__sync_fetch_and_xor_2:
5804 case Builtin::BI__sync_fetch_and_xor_4:
5805 case Builtin::BI__sync_fetch_and_xor_8:
5806 case Builtin::BI__sync_fetch_and_xor_16:
5807 BuiltinIndex = 4;
5808 break;
5809
5810 case Builtin::BI__sync_fetch_and_nand:
5811 case Builtin::BI__sync_fetch_and_nand_1:
5812 case Builtin::BI__sync_fetch_and_nand_2:
5813 case Builtin::BI__sync_fetch_and_nand_4:
5814 case Builtin::BI__sync_fetch_and_nand_8:
5815 case Builtin::BI__sync_fetch_and_nand_16:
5816 BuiltinIndex = 5;
5817 WarnAboutSemanticsChange = true;
5818 break;
5819
5820 case Builtin::BI__sync_add_and_fetch:
5821 case Builtin::BI__sync_add_and_fetch_1:
5822 case Builtin::BI__sync_add_and_fetch_2:
5823 case Builtin::BI__sync_add_and_fetch_4:
5824 case Builtin::BI__sync_add_and_fetch_8:
5825 case Builtin::BI__sync_add_and_fetch_16:
5826 BuiltinIndex = 6;
5827 break;
5828
5829 case Builtin::BI__sync_sub_and_fetch:
5830 case Builtin::BI__sync_sub_and_fetch_1:
5831 case Builtin::BI__sync_sub_and_fetch_2:
5832 case Builtin::BI__sync_sub_and_fetch_4:
5833 case Builtin::BI__sync_sub_and_fetch_8:
5834 case Builtin::BI__sync_sub_and_fetch_16:
5835 BuiltinIndex = 7;
5836 break;
5837
5838 case Builtin::BI__sync_and_and_fetch:
5839 case Builtin::BI__sync_and_and_fetch_1:
5840 case Builtin::BI__sync_and_and_fetch_2:
5841 case Builtin::BI__sync_and_and_fetch_4:
5842 case Builtin::BI__sync_and_and_fetch_8:
5843 case Builtin::BI__sync_and_and_fetch_16:
5844 BuiltinIndex = 8;
5845 break;
5846
5847 case Builtin::BI__sync_or_and_fetch:
5848 case Builtin::BI__sync_or_and_fetch_1:
5849 case Builtin::BI__sync_or_and_fetch_2:
5850 case Builtin::BI__sync_or_and_fetch_4:
5851 case Builtin::BI__sync_or_and_fetch_8:
5852 case Builtin::BI__sync_or_and_fetch_16:
5853 BuiltinIndex = 9;
5854 break;
5855
5856 case Builtin::BI__sync_xor_and_fetch:
5857 case Builtin::BI__sync_xor_and_fetch_1:
5858 case Builtin::BI__sync_xor_and_fetch_2:
5859 case Builtin::BI__sync_xor_and_fetch_4:
5860 case Builtin::BI__sync_xor_and_fetch_8:
5861 case Builtin::BI__sync_xor_and_fetch_16:
5862 BuiltinIndex = 10;
5863 break;
5864
5865 case Builtin::BI__sync_nand_and_fetch:
5866 case Builtin::BI__sync_nand_and_fetch_1:
5867 case Builtin::BI__sync_nand_and_fetch_2:
5868 case Builtin::BI__sync_nand_and_fetch_4:
5869 case Builtin::BI__sync_nand_and_fetch_8:
5870 case Builtin::BI__sync_nand_and_fetch_16:
5871 BuiltinIndex = 11;
5872 WarnAboutSemanticsChange = true;
5873 break;
5874
5875 case Builtin::BI__sync_val_compare_and_swap:
5876 case Builtin::BI__sync_val_compare_and_swap_1:
5877 case Builtin::BI__sync_val_compare_and_swap_2:
5878 case Builtin::BI__sync_val_compare_and_swap_4:
5879 case Builtin::BI__sync_val_compare_and_swap_8:
5880 case Builtin::BI__sync_val_compare_and_swap_16:
5881 BuiltinIndex = 12;
5882 NumFixed = 2;
5883 break;
5884
5885 case Builtin::BI__sync_bool_compare_and_swap:
5886 case Builtin::BI__sync_bool_compare_and_swap_1:
5887 case Builtin::BI__sync_bool_compare_and_swap_2:
5888 case Builtin::BI__sync_bool_compare_and_swap_4:
5889 case Builtin::BI__sync_bool_compare_and_swap_8:
5890 case Builtin::BI__sync_bool_compare_and_swap_16:
5891 BuiltinIndex = 13;
5892 NumFixed = 2;
5893 ResultType = Context.BoolTy;
5894 break;
5895
5896 case Builtin::BI__sync_lock_test_and_set:
5897 case Builtin::BI__sync_lock_test_and_set_1:
5898 case Builtin::BI__sync_lock_test_and_set_2:
5899 case Builtin::BI__sync_lock_test_and_set_4:
5900 case Builtin::BI__sync_lock_test_and_set_8:
5901 case Builtin::BI__sync_lock_test_and_set_16:
5902 BuiltinIndex = 14;
5903 break;
5904
5905 case Builtin::BI__sync_lock_release:
5906 case Builtin::BI__sync_lock_release_1:
5907 case Builtin::BI__sync_lock_release_2:
5908 case Builtin::BI__sync_lock_release_4:
5909 case Builtin::BI__sync_lock_release_8:
5910 case Builtin::BI__sync_lock_release_16:
5911 BuiltinIndex = 15;
5912 NumFixed = 0;
5913 ResultType = Context.VoidTy;
5914 break;
5915
5916 case Builtin::BI__sync_swap:
5917 case Builtin::BI__sync_swap_1:
5918 case Builtin::BI__sync_swap_2:
5919 case Builtin::BI__sync_swap_4:
5920 case Builtin::BI__sync_swap_8:
5921 case Builtin::BI__sync_swap_16:
5922 BuiltinIndex = 16;
5923 break;
5924 }
5925
5926 // Now that we know how many fixed arguments we expect, first check that we
5927 // have at least that many.
5928 if (TheCall->getNumArgs() < 1+NumFixed) {
5929 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5930 << 0 << 1 + NumFixed << TheCall->getNumArgs()
5931 << Callee->getSourceRange();
5932 return ExprError();
5933 }
5934
5935 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
5936 << Callee->getSourceRange();
5937
5938 if (WarnAboutSemanticsChange) {
5939 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
5940 << Callee->getSourceRange();
5941 }
5942
5943 // Get the decl for the concrete builtin from this, we can tell what the
5944 // concrete integer type we should convert to is.
5945 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
5946 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
5947 FunctionDecl *NewBuiltinDecl;
5948 if (NewBuiltinID == BuiltinID)
5949 NewBuiltinDecl = FDecl;
5950 else {
5951 // Perform builtin lookup to avoid redeclaring it.
5952 DeclarationName DN(&Context.Idents.get(NewBuiltinName));
5953 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
5954 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
5955 assert(Res.getFoundDecl())((void)0);
5956 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
5957 if (!NewBuiltinDecl)
5958 return ExprError();
5959 }
5960
5961 // The first argument --- the pointer --- has a fixed type; we
5962 // deduce the types of the rest of the arguments accordingly. Walk
5963 // the remaining arguments, converting them to the deduced value type.
5964 for (unsigned i = 0; i != NumFixed; ++i) {
5965 ExprResult Arg = TheCall->getArg(i+1);
5966
5967 // GCC does an implicit conversion to the pointer or integer ValType. This
5968 // can fail in some cases (1i -> int**), check for this error case now.
5969 // Initialize the argument.
5970 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
5971 ValType, /*consume*/ false);
5972 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5973 if (Arg.isInvalid())
5974 return ExprError();
5975
5976 // Okay, we have something that *can* be converted to the right type. Check
5977 // to see if there is a potentially weird extension going on here. This can
5978 // happen when you do an atomic operation on something like an char* and
5979 // pass in 42. The 42 gets converted to char. This is even more strange
5980 // for things like 45.123 -> char, etc.
5981 // FIXME: Do this check.
5982 TheCall->setArg(i+1, Arg.get());
5983 }
5984
5985 // Create a new DeclRefExpr to refer to the new decl.
5986 DeclRefExpr *NewDRE = DeclRefExpr::Create(
5987 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
5988 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
5989 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
5990
5991 // Set the callee in the CallExpr.
5992 // FIXME: This loses syntactic information.
5993 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
5994 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
5995 CK_BuiltinFnToFnPtr);
5996 TheCall->setCallee(PromotedCall.get());
5997
5998 // Change the result type of the call to match the original value type. This
5999 // is arbitrary, but the codegen for these builtins ins design to handle it
6000 // gracefully.
6001 TheCall->setType(ResultType);
6002
6003 // Prohibit use of _ExtInt with atomic builtins.
6004 // The arguments would have already been converted to the first argument's
6005 // type, so only need to check the first argument.
6006 const auto *ExtIntValType = ValType->getAs<ExtIntType>();
6007 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) {
6008 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
6009 return ExprError();
6010 }
6011
6012 return TheCallResult;
6013}
6014
6015/// SemaBuiltinNontemporalOverloaded - We have a call to
6016/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
6017/// overloaded function based on the pointer type of its last argument.
6018///
6019/// This function goes through and does final semantic checking for these
6020/// builtins.
6021ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
6022 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
6023 DeclRefExpr *DRE =
6024 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
6025 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
6026 unsigned BuiltinID = FDecl->getBuiltinID();
6027 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||((void)0)
6028 BuiltinID == Builtin::BI__builtin_nontemporal_load) &&((void)0)
6029 "Unexpected nontemporal load/store builtin!")((void)0);
6030 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
6031 unsigned numArgs = isStore ? 2 : 1;
6032
6033 // Ensure that we have the proper number of arguments.
6034 if (checkArgCount(*this, TheCall, numArgs))
6035 return ExprError();
6036
6037 // Inspect the last argument of the nontemporal builtin. This should always
6038 // be a pointer type, from which we imply the type of the memory access.
6039 // Because it is a pointer type, we don't have to worry about any implicit
6040 // casts here.
6041 Expr *PointerArg = TheCall->getArg(numArgs - 1);
6042 ExprResult PointerArgResult =
6043 DefaultFunctionArrayLvalueConversion(PointerArg);
6044
6045 if (PointerArgResult.isInvalid())
6046 return ExprError();
6047 PointerArg = PointerArgResult.get();
6048 TheCall->setArg(numArgs - 1, PointerArg);
6049
6050 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
6051 if (!pointerType) {
6052 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
6053 << PointerArg->getType() << PointerArg->getSourceRange();
6054 return ExprError();
6055 }
6056
6057 QualType ValType = pointerType->getPointeeType();
6058
6059 // Strip any qualifiers off ValType.
6060 ValType = ValType.getUnqualifiedType();
6061 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
6062 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
6063 !ValType->isVectorType()) {
6064 Diag(DRE->getBeginLoc(),
6065 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
6066 << PointerArg->getType() << PointerArg->getSourceRange();
6067 return ExprError();
6068 }
6069
6070 if (!isStore) {
6071 TheCall->setType(ValType);
6072 return TheCallResult;
6073 }
6074
6075 ExprResult ValArg = TheCall->getArg(0);
6076 InitializedEntity Entity = InitializedEntity::InitializeParameter(
6077 Context, ValType, /*consume*/ false);
6078 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
6079 if (ValArg.isInvalid())
6080 return ExprError();
6081
6082 TheCall->setArg(0, ValArg.get());
6083 TheCall->setType(Context.VoidTy);
6084 return TheCallResult;
6085}
6086
6087/// CheckObjCString - Checks that the argument to the builtin
6088/// CFString constructor is correct
6089/// Note: It might also make sense to do the UTF-16 conversion here (would
6090/// simplify the backend).
6091bool Sema::CheckObjCString(Expr *Arg) {
6092 Arg = Arg->IgnoreParenCasts();
6093 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
6094
6095 if (!Literal || !Literal->isAscii()) {
6096 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
6097 << Arg->getSourceRange();
6098 return true;
6099 }
6100
6101 if (Literal->containsNonAsciiOrNull()) {
6102 StringRef String = Literal->getString();
6103 unsigned NumBytes = String.size();
6104 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
6105 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
6106 llvm::UTF16 *ToPtr = &ToBuf[0];
6107
6108 llvm::ConversionResult Result =
6109 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
6110 ToPtr + NumBytes, llvm::strictConversion);
6111 // Check for conversion failure.
6112 if (Result != llvm::conversionOK)
6113 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
6114 << Arg->getSourceRange();
6115 }
6116 return false;
6117}
6118
6119/// CheckObjCString - Checks that the format string argument to the os_log()
6120/// and os_trace() functions is correct, and converts it to const char *.
6121ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
6122 Arg = Arg->IgnoreParenCasts();
6123 auto *Literal = dyn_cast<StringLiteral>(Arg);
6124 if (!Literal) {
6125 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) {
6126 Literal = ObjcLiteral->getString();
6127 }
6128 }
6129
6130 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
6131 return ExprError(
6132 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
6133 << Arg->getSourceRange());
6134 }
6135
6136 ExprResult Result(Literal);
6137 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
6138 InitializedEntity Entity =
6139 InitializedEntity::InitializeParameter(Context, ResultTy, false);
6140 Result = PerformCopyInitialization(Entity, SourceLocation(), Result);
6141 return Result;
6142}
6143
6144/// Check that the user is calling the appropriate va_start builtin for the
6145/// target and calling convention.
6146static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
6147 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
6148 bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
6149 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 ||
6150 TT.getArch() == llvm::Triple::aarch64_32);
6151 bool IsWindows = TT.isOSWindows();
6152 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
6153 if (IsX64 || IsAArch64) {
6154 CallingConv CC = CC_C;
6155 if (const FunctionDecl *FD = S.getCurFunctionDecl())
6156 CC = FD->getType()->castAs<FunctionType>()->getCallConv();
6157 if (IsMSVAStart) {
6158 // Don't allow this in System V ABI functions.
6159 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
6160 return S.Diag(Fn->getBeginLoc(),
6161 diag::err_ms_va_start_used_in_sysv_function);
6162 } else {
6163 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
6164 // On x64 Windows, don't allow this in System V ABI functions.
6165 // (Yes, that means there's no corresponding way to support variadic
6166 // System V ABI functions on Windows.)
6167 if ((IsWindows && CC == CC_X86_64SysV) ||
6168 (!IsWindows && CC == CC_Win64))
6169 return S.Diag(Fn->getBeginLoc(),
6170 diag::err_va_start_used_in_wrong_abi_function)
6171 << !IsWindows;
6172 }
6173 return false;
6174 }
6175
6176 if (IsMSVAStart)
6177 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only);
6178 return false;
6179}
6180
6181static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
6182 ParmVarDecl **LastParam = nullptr) {
6183 // Determine whether the current function, block, or obj-c method is variadic
6184 // and get its parameter list.
6185 bool IsVariadic = false;
6186 ArrayRef<ParmVarDecl *> Params;
6187 DeclContext *Caller = S.CurContext;
6188 if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
6189 IsVariadic = Block->isVariadic();
6190 Params = Block->parameters();
6191 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
6192 IsVariadic = FD->isVariadic();
6193 Params = FD->parameters();
6194 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
6195 IsVariadic = MD->isVariadic();
6196 // FIXME: This isn't correct for methods (results in bogus warning).
6197 Params = MD->parameters();
6198 } else if (isa<CapturedDecl>(Caller)) {
6199 // We don't support va_start in a CapturedDecl.
6200 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt);
6201 return true;
6202 } else {
6203 // This must be some other declcontext that parses exprs.
6204 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function);
6205 return true;
6206 }
6207
6208 if (!IsVariadic) {
6209 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function);
6210 return true;
6211 }
6212
6213 if (LastParam)
6214 *LastParam = Params.empty() ? nullptr : Params.back();
6215
6216 return false;
6217}
6218
6219/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
6220/// for validity. Emit an error and return true on failure; return false
6221/// on success.
6222bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
6223 Expr *Fn = TheCall->getCallee();
6224
6225 if (checkVAStartABI(*this, BuiltinID, Fn))
6226 return true;
6227
6228 if (checkArgCount(*this, TheCall, 2))
6229 return true;
6230
6231 // Type-check the first argument normally.
6232 if (checkBuiltinArgument(*this, TheCall, 0))
6233 return true;
6234
6235 // Check that the current function is variadic, and get its last parameter.
6236 ParmVarDecl *LastParam;
6237 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
6238 return true;
6239
6240 // Verify that the second argument to the builtin is the last argument of the
6241 // current function or method.
6242 bool SecondArgIsLastNamedArgument = false;
6243 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
6244
6245 // These are valid if SecondArgIsLastNamedArgument is false after the next
6246 // block.
6247 QualType Type;
6248 SourceLocation ParamLoc;
6249 bool IsCRegister = false;
6250
6251 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
6252 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
6253 SecondArgIsLastNamedArgument = PV == LastParam;
6254
6255 Type = PV->getType();
6256 ParamLoc = PV->getLocation();
6257 IsCRegister =
6258 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
6259 }
6260 }
6261
6262 if (!SecondArgIsLastNamedArgument)
6263 Diag(TheCall->getArg(1)->getBeginLoc(),
6264 diag::warn_second_arg_of_va_start_not_last_named_param);
6265 else if (IsCRegister || Type->isReferenceType() ||
6266 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
6267 // Promotable integers are UB, but enumerations need a bit of
6268 // extra checking to see what their promotable type actually is.
6269 if (!Type->isPromotableIntegerType())
6270 return false;
6271 if (!Type->isEnumeralType())
6272 return true;
6273 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
6274 return !(ED &&
6275 Context.typesAreCompatible(ED->getPromotionType(), Type));
6276 }()) {
6277 unsigned Reason = 0;
6278 if (Type->isReferenceType()) Reason = 1;
6279 else if (IsCRegister) Reason = 2;
6280 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason;
6281 Diag(ParamLoc, diag::note_parameter_type) << Type;
6282 }
6283
6284 TheCall->setType(Context.VoidTy);
6285 return false;
6286}
6287
6288bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
6289 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
6290 // const char *named_addr);
6291
6292 Expr *Func = Call->getCallee();
6293
6294 if (Call->getNumArgs() < 3)
6295 return Diag(Call->getEndLoc(),
6296 diag::err_typecheck_call_too_few_args_at_least)
6297 << 0 /*function call*/ << 3 << Call->getNumArgs();
6298
6299 // Type-check the first argument normally.
6300 if (checkBuiltinArgument(*this, Call, 0))
6301 return true;
6302
6303 // Check that the current function is variadic.
6304 if (checkVAStartIsInVariadicFunction(*this, Func))
6305 return true;
6306
6307 // __va_start on Windows does not validate the parameter qualifiers
6308
6309 const Expr *Arg1 = Call->getArg(1)->IgnoreParens();
6310 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
6311
6312 const Expr *Arg2 = Call->getArg(2)->IgnoreParens();
6313 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
6314
6315 const QualType &ConstCharPtrTy =
6316 Context.getPointerType(Context.CharTy.withConst());
6317 if (!Arg1Ty->isPointerType() ||
6318 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy)
6319 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
6320 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */
6321 << 0 /* qualifier difference */
6322 << 3 /* parameter mismatch */
6323 << 2 << Arg1->getType() << ConstCharPtrTy;
6324
6325 const QualType SizeTy = Context.getSizeType();
6326 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
6327 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible)
6328 << Arg2->getType() << SizeTy << 1 /* different class */
6329 << 0 /* qualifier difference */
6330 << 3 /* parameter mismatch */
6331 << 3 << Arg2->getType() << SizeTy;
6332
6333 return false;
6334}
6335
6336/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
6337/// friends. This is declared to take (...), so we have to check everything.
6338bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
6339 if (checkArgCount(*this, TheCall, 2))
6340 return true;
6341
6342 ExprResult OrigArg0 = TheCall->getArg(0);
6343 ExprResult OrigArg1 = TheCall->getArg(1);
6344
6345 // Do standard promotions between the two arguments, returning their common
6346 // type.
6347 QualType Res = UsualArithmeticConversions(
6348 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison);
6349 if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
6350 return true;
6351
6352 // Make sure any conversions are pushed back into the call; this is
6353 // type safe since unordered compare builtins are declared as "_Bool
6354 // foo(...)".
6355 TheCall->setArg(0, OrigArg0.get());
6356 TheCall->setArg(1, OrigArg1.get());
6357
6358 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
6359 return false;
6360
6361 // If the common type isn't a real floating type, then the arguments were
6362 // invalid for this operation.
6363 if (Res.isNull() || !Res->isRealFloatingType())
6364 return Diag(OrigArg0.get()->getBeginLoc(),
6365 diag::err_typecheck_call_invalid_ordered_compare)
6366 << OrigArg0.get()->getType() << OrigArg1.get()->getType()
6367 << SourceRange(OrigArg0.get()->getBeginLoc(),
6368 OrigArg1.get()->getEndLoc());
6369
6370 return false;
6371}
6372
6373/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
6374/// __builtin_isnan and friends. This is declared to take (...), so we have
6375/// to check everything. We expect the last argument to be a floating point
6376/// value.
6377bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
6378 if (checkArgCount(*this, TheCall, NumArgs))
6379 return true;
6380
6381 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count
6382 // on all preceding parameters just being int. Try all of those.
6383 for (unsigned i = 0; i < NumArgs - 1; ++i) {
6384 Expr *Arg = TheCall->getArg(i);
6385
6386 if (Arg->isTypeDependent())
6387 return false;
6388
6389 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing);
6390
6391 if (Res.isInvalid())
6392 return true;
6393 TheCall->setArg(i, Res.get());
6394 }
6395
6396 Expr *OrigArg = TheCall->getArg(NumArgs-1);
6397
6398 if (OrigArg->isTypeDependent())
6399 return false;
6400
6401 // Usual Unary Conversions will convert half to float, which we want for
6402 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the
6403 // type how it is, but do normal L->Rvalue conversions.
6404 if (Context.getTargetInfo().useFP16ConversionIntrinsics())
6405 OrigArg = UsualUnaryConversions(OrigArg).get();
6406 else
6407 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get();
6408 TheCall->setArg(NumArgs - 1, OrigArg);
6409
6410 // This operation requires a non-_Complex floating-point number.
6411 if (!OrigArg->getType()->isRealFloatingType())
6412 return Diag(OrigArg->getBeginLoc(),
6413 diag::err_typecheck_call_invalid_unary_fp)
6414 << OrigArg->getType() << OrigArg->getSourceRange();
6415
6416 return false;
6417}
6418
6419/// Perform semantic analysis for a call to __builtin_complex.
6420bool Sema::SemaBuiltinComplex(CallExpr *TheCall) {
6421 if (checkArgCount(*this, TheCall, 2))
6422 return true;
6423
6424 bool Dependent = false;
6425 for (unsigned I = 0; I != 2; ++I) {
6426 Expr *Arg = TheCall->getArg(I);
6427 QualType T = Arg->getType();
6428 if (T->isDependentType()) {
6429 Dependent = true;
6430 continue;
6431 }
6432
6433 // Despite supporting _Complex int, GCC requires a real floating point type
6434 // for the operands of __builtin_complex.
6435 if (!T->isRealFloatingType()) {
6436 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp)
6437 << Arg->getType() << Arg->getSourceRange();
6438 }
6439
6440 ExprResult Converted = DefaultLvalueConversion(Arg);
6441 if (Converted.isInvalid())
6442 return true;
6443 TheCall->setArg(I, Converted.get());
6444 }
6445
6446 if (Dependent) {
6447 TheCall->setType(Context.DependentTy);
6448 return false;
6449 }
6450
6451 Expr *Real = TheCall->getArg(0);
6452 Expr *Imag = TheCall->getArg(1);
6453 if (!Context.hasSameType(Real->getType(), Imag->getType())) {
6454 return Diag(Real->getBeginLoc(),
6455 diag::err_typecheck_call_different_arg_types)
6456 << Real->getType() << Imag->getType()
6457 << Real->getSourceRange() << Imag->getSourceRange();
6458 }
6459
6460 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers;
6461 // don't allow this builtin to form those types either.
6462 // FIXME: Should we allow these types?
6463 if (Real->getType()->isFloat16Type())
6464 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
6465 << "_Float16";
6466 if (Real->getType()->isHalfType())
6467 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
6468 << "half";
6469
6470 TheCall->setType(Context.getComplexType(Real->getType()));
6471 return false;
6472}
6473
6474// Customized Sema Checking for VSX builtins that have the following signature:
6475// vector [...] builtinName(vector [...], vector [...], const int);
6476// Which takes the same type of vectors (any legal vector type) for the first
6477// two arguments and takes compile time constant for the third argument.
6478// Example builtins are :
6479// vector double vec_xxpermdi(vector double, vector double, int);
6480// vector short vec_xxsldwi(vector short, vector short, int);
6481bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
6482 unsigned ExpectedNumArgs = 3;
6483 if (checkArgCount(*this, TheCall, ExpectedNumArgs))
6484 return true;
6485
6486 // Check the third argument is a compile time constant
6487 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context))
6488 return Diag(TheCall->getBeginLoc(),
6489 diag::err_vsx_builtin_nonconstant_argument)
6490 << 3 /* argument index */ << TheCall->getDirectCallee()
6491 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
6492 TheCall->getArg(2)->getEndLoc());
6493
6494 QualType Arg1Ty = TheCall->getArg(0)->getType();
6495 QualType Arg2Ty = TheCall->getArg(1)->getType();
6496
6497 // Check the type of argument 1 and argument 2 are vectors.
6498 SourceLocation BuiltinLoc = TheCall->getBeginLoc();
6499 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
6500 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
6501 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
6502 << TheCall->getDirectCallee()
6503 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
6504 TheCall->getArg(1)->getEndLoc());
6505 }
6506
6507 // Check the first two arguments are the same type.
6508 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
6509 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
6510 << TheCall->getDirectCallee()
6511 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
6512 TheCall->getArg(1)->getEndLoc());
6513 }
6514
6515 // When default clang type checking is turned off and the customized type
6516 // checking is used, the returning type of the function must be explicitly
6517 // set. Otherwise it is _Bool by default.
6518 TheCall->setType(Arg1Ty);
6519
6520 return false;
6521}
6522
6523/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
6524// This is declared to take (...), so we have to check everything.
6525ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
6526 if (TheCall->getNumArgs() < 2)
6527 return ExprError(Diag(TheCall->getEndLoc(),
6528 diag::err_typecheck_call_too_few_args_at_least)
6529 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
6530 << TheCall->getSourceRange());
6531
6532 // Determine which of the following types of shufflevector we're checking:
6533 // 1) unary, vector mask: (lhs, mask)
6534 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
6535 QualType resType = TheCall->getArg(0)->getType();
6536 unsigned numElements = 0;
6537
6538 if (!TheCall->getArg(0)->isTypeDependent() &&
6539 !TheCall->getArg(1)->isTypeDependent()) {
6540 QualType LHSType = TheCall->getArg(0)->getType();
6541 QualType RHSType = TheCall->getArg(1)->getType();
6542
6543 if (!LHSType->isVectorType() || !RHSType->isVectorType())
6544 return ExprError(
6545 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
6546 << TheCall->getDirectCallee()
6547 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
6548 TheCall->getArg(1)->getEndLoc()));
6549
6550 numElements = LHSType->castAs<VectorType>()->getNumElements();
6551 unsigned numResElements = TheCall->getNumArgs() - 2;
6552
6553 // Check to see if we have a call with 2 vector arguments, the unary shuffle
6554 // with mask. If so, verify that RHS is an integer vector type with the
6555 // same number of elts as lhs.
6556 if (TheCall->getNumArgs() == 2) {
6557 if (!RHSType->hasIntegerRepresentation() ||
6558 RHSType->castAs<VectorType>()->getNumElements() != numElements)
6559 return ExprError(Diag(TheCall->getBeginLoc(),
6560 diag::err_vec_builtin_incompatible_vector)
6561 << TheCall->getDirectCallee()
6562 << SourceRange(TheCall->getArg(1)->getBeginLoc(),
6563 TheCall->getArg(1)->getEndLoc()));
6564 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
6565 return ExprError(Diag(TheCall->getBeginLoc(),
6566 diag::err_vec_builtin_incompatible_vector)
6567 << TheCall->getDirectCallee()
6568 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
6569 TheCall->getArg(1)->getEndLoc()));
6570 } else if (numElements != numResElements) {
6571 QualType eltType = LHSType->castAs<VectorType>()->getElementType();
6572 resType = Context.getVectorType(eltType, numResElements,
6573 VectorType::GenericVector);
6574 }
6575 }
6576
6577 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
6578 if (TheCall->getArg(i)->isTypeDependent() ||
6579 TheCall->getArg(i)->isValueDependent())
6580 continue;
6581
6582 Optional<llvm::APSInt> Result;
6583 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context)))
6584 return ExprError(Diag(TheCall->getBeginLoc(),
6585 diag::err_shufflevector_nonconstant_argument)
6586 << TheCall->getArg(i)->getSourceRange());
6587
6588 // Allow -1 which will be translated to undef in the IR.
6589 if (Result->isSigned() && Result->isAllOnesValue())
6590 continue;
6591
6592 if (Result->getActiveBits() > 64 ||
6593 Result->getZExtValue() >= numElements * 2)
6594 return ExprError(Diag(TheCall->getBeginLoc(),
6595 diag::err_shufflevector_argument_too_large)
6596 << TheCall->getArg(i)->getSourceRange());
6597 }
6598
6599 SmallVector<Expr*, 32> exprs;
6600
6601 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
6602 exprs.push_back(TheCall->getArg(i));
6603 TheCall->setArg(i, nullptr);
6604 }
6605
6606 return new (Context) ShuffleVectorExpr(Context, exprs, resType,
6607 TheCall->getCallee()->getBeginLoc(),
6608 TheCall->getRParenLoc());
6609}
6610
6611/// SemaConvertVectorExpr - Handle __builtin_convertvector
6612ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
6613 SourceLocation BuiltinLoc,
6614 SourceLocation RParenLoc) {
6615 ExprValueKind VK = VK_PRValue;
6616 ExprObjectKind OK = OK_Ordinary;
6617 QualType DstTy = TInfo->getType();
6618 QualType SrcTy = E->getType();
6619
6620 if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
6621 return ExprError(Diag(BuiltinLoc,
6622 diag::err_convertvector_non_vector)
6623 << E->getSourceRange());
6624 if (!DstTy->isVectorType() && !DstTy->isDependentType())
6625 return ExprError(Diag(BuiltinLoc,
6626 diag::err_convertvector_non_vector_type));
6627
6628 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
6629 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements();
6630 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements();
6631 if (SrcElts != DstElts)
6632 return ExprError(Diag(BuiltinLoc,
6633 diag::err_convertvector_incompatible_vector)
6634 << E->getSourceRange());
6635 }
6636
6637 return new (Context)
6638 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
6639}
6640
6641/// SemaBuiltinPrefetch - Handle __builtin_prefetch.
6642// This is declared to take (const void*, ...) and can take two
6643// optional constant int args.
6644bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
6645 unsigned NumArgs = TheCall->getNumArgs();
6646
6647 if (NumArgs > 3)
6648 return Diag(TheCall->getEndLoc(),
6649 diag::err_typecheck_call_too_many_args_at_most)
6650 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
6651
6652 // Argument 0 is checked for us and the remaining arguments must be
6653 // constant integers.
6654 for (unsigned i = 1; i != NumArgs; ++i)
6655 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
6656 return true;
6657
6658 return false;
6659}
6660
6661/// SemaBuiltinArithmeticFence - Handle __arithmetic_fence.
6662bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) {
6663 if (!Context.getTargetInfo().checkArithmeticFenceSupported())
6664 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
6665 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
6666 if (checkArgCount(*this, TheCall, 1))
6667 return true;
6668 Expr *Arg = TheCall->getArg(0);
6669 if (Arg->isInstantiationDependent())
6670 return false;
6671
6672 QualType ArgTy = Arg->getType();
6673 if (!ArgTy->hasFloatingRepresentation())
6674 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector)
6675 << ArgTy;
6676 if (Arg->isLValue()) {
6677 ExprResult FirstArg = DefaultLvalueConversion(Arg);
6678 TheCall->setArg(0, FirstArg.get());
6679 }
6680 TheCall->setType(TheCall->getArg(0)->getType());
6681 return false;
6682}
6683
6684/// SemaBuiltinAssume - Handle __assume (MS Extension).
6685// __assume does not evaluate its arguments, and should warn if its argument
6686// has side effects.
6687bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
6688 Expr *Arg = TheCall->getArg(0);
6689 if (Arg->isInstantiationDependent()) return false;
6690
6691 if (Arg->HasSideEffects(Context))
6692 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects)
6693 << Arg->getSourceRange()
6694 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
6695
6696 return false;
6697}
6698
6699/// Handle __builtin_alloca_with_align. This is declared
6700/// as (size_t, size_t) where the second size_t must be a power of 2 greater
6701/// than 8.
6702bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
6703 // The alignment must be a constant integer.
6704 Expr *Arg = TheCall->getArg(1);
6705
6706 // We can't check the value of a dependent argument.
6707 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
6708 if (const auto *UE =
6709 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
6710 if (UE->getKind() == UETT_AlignOf ||
6711 UE->getKind() == UETT_PreferredAlignOf)
6712 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof)
6713 << Arg->getSourceRange();
6714
6715 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context);
6716
6717 if (!Result.isPowerOf2())
6718 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
6719 << Arg->getSourceRange();
6720
6721 if (Result < Context.getCharWidth())
6722 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small)
6723 << (unsigned)Context.getCharWidth() << Arg->getSourceRange();
6724
6725 if (Result > std::numeric_limits<int32_t>::max())
6726 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big)
6727 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange();
6728 }
6729
6730 return false;
6731}
6732
6733/// Handle __builtin_assume_aligned. This is declared
6734/// as (const void*, size_t, ...) and can take one optional constant int arg.
6735bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
6736 unsigned NumArgs = TheCall->getNumArgs();
6737
6738 if (NumArgs > 3)
6739 return Diag(TheCall->getEndLoc(),
6740 diag::err_typecheck_call_too_many_args_at_most)
6741 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
6742
6743 // The alignment must be a constant integer.
6744 Expr *Arg = TheCall->getArg(1);
6745
6746 // We can't check the value of a dependent argument.
6747 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
6748 llvm::APSInt Result;
6749 if (SemaBuiltinConstantArg(TheCall, 1, Result))
6750 return true;
6751
6752 if (!Result.isPowerOf2())
6753 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
6754 << Arg->getSourceRange();
6755
6756 if (Result > Sema::MaximumAlignment)
6757 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great)
6758 << Arg->getSourceRange() << Sema::MaximumAlignment;
6759 }
6760
6761 if (NumArgs > 2) {
6762 ExprResult Arg(TheCall->getArg(2));
6763 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
6764 Context.getSizeType(), false);
6765 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
6766 if (Arg.isInvalid()) return true;
6767 TheCall->setArg(2, Arg.get());
6768 }
6769
6770 return false;
6771}
6772
6773bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
6774 unsigned BuiltinID =
6775 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID();
6776 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
6777
6778 unsigned NumArgs = TheCall->getNumArgs();
6779 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
6780 if (NumArgs < NumRequiredArgs) {
6781 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
6782 << 0 /* function call */ << NumRequiredArgs << NumArgs
6783 << TheCall->getSourceRange();
6784 }
6785 if (NumArgs >= NumRequiredArgs + 0x100) {
6786 return Diag(TheCall->getEndLoc(),
6787 diag::err_typecheck_call_too_many_args_at_most)
6788 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
6789 << TheCall->getSourceRange();
6790 }
6791 unsigned i = 0;
6792
6793 // For formatting call, check buffer arg.
6794 if (!IsSizeCall) {
6795 ExprResult Arg(TheCall->getArg(i));
6796 InitializedEntity Entity = InitializedEntity::InitializeParameter(
6797 Context, Context.VoidPtrTy, false);
6798 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
6799 if (Arg.isInvalid())
6800 return true;
6801 TheCall->setArg(i, Arg.get());
6802 i++;
6803 }
6804
6805 // Check string literal arg.
6806 unsigned FormatIdx = i;
6807 {
6808 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i));
6809 if (Arg.isInvalid())
6810 return true;
6811 TheCall->setArg(i, Arg.get());
6812 i++;
6813 }
6814
6815 // Make sure variadic args are scalar.
6816 unsigned FirstDataArg = i;
6817 while (i < NumArgs) {
6818 ExprResult Arg = DefaultVariadicArgumentPromotion(
6819 TheCall->getArg(i), VariadicFunction, nullptr);
6820 if (Arg.isInvalid())
6821 return true;
6822 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType());
6823 if (ArgSize.getQuantity() >= 0x100) {
6824 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big)
6825 << i << (int)ArgSize.getQuantity() << 0xff
6826 << TheCall->getSourceRange();
6827 }
6828 TheCall->setArg(i, Arg.get());
6829 i++;
6830 }
6831
6832 // Check formatting specifiers. NOTE: We're only doing this for the non-size
6833 // call to avoid duplicate diagnostics.
6834 if (!IsSizeCall) {
6835 llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
6836 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
6837 bool Success = CheckFormatArguments(
6838 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog,
6839 VariadicFunction, TheCall->getBeginLoc(), SourceRange(),
6840 CheckedVarArgs);
6841 if (!Success)
6842 return true;
6843 }
6844
6845 if (IsSizeCall) {
6846 TheCall->setType(Context.getSizeType());
6847 } else {
6848 TheCall->setType(Context.VoidPtrTy);
6849 }
6850 return false;
6851}
6852
6853/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
6854/// TheCall is a constant expression.
6855bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
6856 llvm::APSInt &Result) {
6857 Expr *Arg = TheCall->getArg(ArgNum);
6858 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
6859 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
6860
6861 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
6862
6863 Optional<llvm::APSInt> R;
6864 if (!(R = Arg->getIntegerConstantExpr(Context)))
6865 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
6866 << FDecl->getDeclName() << Arg->getSourceRange();
6867 Result = *R;
6868 return false;
6869}
6870
6871/// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
6872/// TheCall is a constant expression in the range [Low, High].
6873bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
6874 int Low, int High, bool RangeIsError) {
6875 if (isConstantEvaluated())
6876 return false;
6877 llvm::APSInt Result;
6878
6879 // We can't check the value of a dependent argument.
6880 Expr *Arg = TheCall->getArg(ArgNum);
6881 if (Arg->isTypeDependent() || Arg->isValueDependent())
6882 return false;
6883
6884 // Check constant-ness first.
6885 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6886 return true;
6887
6888 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
6889 if (RangeIsError)
6890 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range)
6891 << toString(Result, 10) << Low << High << Arg->getSourceRange();
6892 else
6893 // Defer the warning until we know if the code will be emitted so that
6894 // dead code can ignore this.
6895 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
6896 PDiag(diag::warn_argument_invalid_range)
6897 << toString(Result, 10) << Low << High
6898 << Arg->getSourceRange());
6899 }
6900
6901 return false;
6902}
6903
6904/// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
6905/// TheCall is a constant expression is a multiple of Num..
6906bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
6907 unsigned Num) {
6908 llvm::APSInt Result;
6909
6910 // We can't check the value of a dependent argument.
6911 Expr *Arg = TheCall->getArg(ArgNum);
6912 if (Arg->isTypeDependent() || Arg->isValueDependent())
6913 return false;
6914
6915 // Check constant-ness first.
6916 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6917 return true;
6918
6919 if (Result.getSExtValue() % Num != 0)
6920 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple)
6921 << Num << Arg->getSourceRange();
6922
6923 return false;
6924}
6925
6926/// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a
6927/// constant expression representing a power of 2.
6928bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) {
6929 llvm::APSInt Result;
6930
6931 // We can't check the value of a dependent argument.
6932 Expr *Arg = TheCall->getArg(ArgNum);
6933 if (Arg->isTypeDependent() || Arg->isValueDependent())
6934 return false;
6935
6936 // Check constant-ness first.
6937 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6938 return true;
6939
6940 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if
6941 // and only if x is a power of 2.
6942 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0)
6943 return false;
6944
6945 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2)
6946 << Arg->getSourceRange();
6947}
6948
6949static bool IsShiftedByte(llvm::APSInt Value) {
6950 if (Value.isNegative())
6951 return false;
6952
6953 // Check if it's a shifted byte, by shifting it down
6954 while (true) {
6955 // If the value fits in the bottom byte, the check passes.
6956 if (Value < 0x100)
6957 return true;
6958
6959 // Otherwise, if the value has _any_ bits in the bottom byte, the check
6960 // fails.
6961 if ((Value & 0xFF) != 0)
6962 return false;
6963
6964 // If the bottom 8 bits are all 0, but something above that is nonzero,
6965 // then shifting the value right by 8 bits won't affect whether it's a
6966 // shifted byte or not. So do that, and go round again.
6967 Value >>= 8;
6968 }
6969}
6970
6971/// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
6972/// a constant expression representing an arbitrary byte value shifted left by
6973/// a multiple of 8 bits.
6974bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
6975 unsigned ArgBits) {
6976 llvm::APSInt Result;
6977
6978 // We can't check the value of a dependent argument.
6979 Expr *Arg = TheCall->getArg(ArgNum);
6980 if (Arg->isTypeDependent() || Arg->isValueDependent())
6981 return false;
6982
6983 // Check constant-ness first.
6984 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6985 return true;
6986
6987 // Truncate to the given size.
6988 Result = Result.getLoBits(ArgBits);
6989 Result.setIsUnsigned(true);
6990
6991 if (IsShiftedByte(Result))
6992 return false;
6993
6994 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte)
6995 << Arg->getSourceRange();
6996}
6997
6998/// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of
6999/// TheCall is a constant expression representing either a shifted byte value,
7000/// or a value of the form 0x??FF (i.e. a member of the arithmetic progression
7001/// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
7002/// Arm MVE intrinsics.
7003bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
7004 int ArgNum,
7005 unsigned ArgBits) {
7006 llvm::APSInt Result;
7007
7008 // We can't check the value of a dependent argument.
7009 Expr *Arg = TheCall->getArg(ArgNum);
7010 if (Arg->isTypeDependent() || Arg->isValueDependent())
7011 return false;
7012
7013 // Check constant-ness first.
7014 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
7015 return true;
7016
7017 // Truncate to the given size.
7018 Result = Result.getLoBits(ArgBits);
7019 Result.setIsUnsigned(true);
7020
7021 // Check to see if it's in either of the required forms.
7022 if (IsShiftedByte(Result) ||
7023 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF))
7024 return false;
7025
7026 return Diag(TheCall->getBeginLoc(),
7027 diag::err_argument_not_shifted_byte_or_xxff)
7028 << Arg->getSourceRange();
7029}
7030
7031/// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
7032bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
7033 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
7034 if (checkArgCount(*this, TheCall, 2))
7035 return true;
7036 Expr *Arg0 = TheCall->getArg(0);
7037 Expr *Arg1 = TheCall->getArg(1);
7038
7039 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
7040 if (FirstArg.isInvalid())
7041 return true;
7042 QualType FirstArgType = FirstArg.get()->getType();
7043 if (!FirstArgType->isAnyPointerType())
7044 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
7045 << "first" << FirstArgType << Arg0->getSourceRange();
7046 TheCall->setArg(0, FirstArg.get());
7047
7048 ExprResult SecArg = DefaultLvalueConversion(Arg1);
7049 if (SecArg.isInvalid())
7050 return true;
7051 QualType SecArgType = SecArg.get()->getType();
7052 if (!SecArgType->isIntegerType())
7053 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
7054 << "second" << SecArgType << Arg1->getSourceRange();
7055
7056 // Derive the return type from the pointer argument.
7057 TheCall->setType(FirstArgType);
7058 return false;
7059 }
7060
7061 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
7062 if (checkArgCount(*this, TheCall, 2))
7063 return true;
7064
7065 Expr *Arg0 = TheCall->getArg(0);
7066 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
7067 if (FirstArg.isInvalid())
7068 return true;
7069 QualType FirstArgType = FirstArg.get()->getType();
7070 if (!FirstArgType->isAnyPointerType())
7071 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
7072 << "first" << FirstArgType << Arg0->getSourceRange();
7073 TheCall->setArg(0, FirstArg.get());
7074
7075 // Derive the return type from the pointer argument.
7076 TheCall->setType(FirstArgType);
7077
7078 // Second arg must be an constant in range [0,15]
7079 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
7080 }
7081
7082 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
7083 if (checkArgCount(*this, TheCall, 2))
7084 return true;
7085 Expr *Arg0 = TheCall->getArg(0);
7086 Expr *Arg1 = TheCall->getArg(1);
7087
7088 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
7089 if (FirstArg.isInvalid())
7090 return true;
7091 QualType FirstArgType = FirstArg.get()->getType();
7092 if (!FirstArgType->isAnyPointerType())
7093 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
7094 << "first" << FirstArgType << Arg0->getSourceRange();
7095
7096 QualType SecArgType = Arg1->getType();
7097 if (!SecArgType->isIntegerType())
7098 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
7099 << "second" << SecArgType << Arg1->getSourceRange();
7100 TheCall->setType(Context.IntTy);
7101 return false;
7102 }
7103
7104 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
7105 BuiltinID == AArch64::BI__builtin_arm_stg) {
7106 if (checkArgCount(*this, TheCall, 1))
7107 return true;
7108 Expr *Arg0 = TheCall->getArg(0);
7109 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
7110 if (FirstArg.isInvalid())
7111 return true;
7112
7113 QualType FirstArgType = FirstArg.get()->getType();
7114 if (!FirstArgType->isAnyPointerType())
7115 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
7116 << "first" << FirstArgType << Arg0->getSourceRange();
7117 TheCall->setArg(0, FirstArg.get());
7118
7119 // Derive the return type from the pointer argument.
7120 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
7121 TheCall->setType(FirstArgType);
7122 return false;
7123 }
7124
7125 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
7126 Expr *ArgA = TheCall->getArg(0);
7127 Expr *ArgB = TheCall->getArg(1);
7128
7129 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA);
7130 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB);
7131
7132 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
7133 return true;
7134
7135 QualType ArgTypeA = ArgExprA.get()->getType();
7136 QualType ArgTypeB = ArgExprB.get()->getType();
7137
7138 auto isNull = [&] (Expr *E) -> bool {
7139 return E->isNullPointerConstant(
7140 Context, Expr::NPC_ValueDependentIsNotNull); };
7141
7142 // argument should be either a pointer or null
7143 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
7144 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
7145 << "first" << ArgTypeA << ArgA->getSourceRange();
7146
7147 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
7148 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
7149 << "second" << ArgTypeB << ArgB->getSourceRange();
7150
7151 // Ensure Pointee types are compatible
7152 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
7153 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
7154 QualType pointeeA = ArgTypeA->getPointeeType();
7155 QualType pointeeB = ArgTypeB->getPointeeType();
7156 if (!Context.typesAreCompatible(
7157 Context.getCanonicalType(pointeeA).getUnqualifiedType(),
7158 Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
7159 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
7160 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
7161 << ArgB->getSourceRange();
7162 }
7163 }
7164
7165 // at least one argument should be pointer type
7166 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
7167 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
7168 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
7169
7170 if (isNull(ArgA)) // adopt type of the other pointer
7171 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
7172
7173 if (isNull(ArgB))
7174 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
7175
7176 TheCall->setArg(0, ArgExprA.get());
7177 TheCall->setArg(1, ArgExprB.get());
7178 TheCall->setType(Context.LongLongTy);
7179 return false;
7180 }
7181 assert(false && "Unhandled ARM MTE intrinsic")((void)0);
7182 return true;
7183}
7184
7185/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
7186/// TheCall is an ARM/AArch64 special register string literal.
7187bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
7188 int ArgNum, unsigned ExpectedFieldNum,
7189 bool AllowName) {
7190 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7191 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
7192 BuiltinID == ARM::BI__builtin_arm_rsr ||
7193 BuiltinID == ARM::BI__builtin_arm_rsrp ||
7194 BuiltinID == ARM::BI__builtin_arm_wsr ||
7195 BuiltinID == ARM::BI__builtin_arm_wsrp;
7196 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
7197 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
7198 BuiltinID == AArch64::BI__builtin_arm_rsr ||
7199 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
7200 BuiltinID == AArch64::BI__builtin_arm_wsr ||
7201 BuiltinID == AArch64::BI__builtin_arm_wsrp;
7202 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.")((void)0);
7203
7204 // We can't check the value of a dependent argument.
7205 Expr *Arg = TheCall->getArg(ArgNum);
7206 if (Arg->isTypeDependent() || Arg->isValueDependent())
7207 return false;
7208
7209 // Check if the argument is a string literal.
7210 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
7211 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
7212 << Arg->getSourceRange();
7213
7214 // Check the type of special register given.
7215 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
7216 SmallVector<StringRef, 6> Fields;
7217 Reg.split(Fields, ":");
7218
7219 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
7220 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
7221 << Arg->getSourceRange();
7222
7223 // If the string is the name of a register then we cannot check that it is
7224 // valid here but if the string is of one the forms described in ACLE then we
7225 // can check that the supplied fields are integers and within the valid
7226 // ranges.
7227 if (Fields.size() > 1) {
7228 bool FiveFields = Fields.size() == 5;
7229
7230 bool ValidString = true;
7231 if (IsARMBuiltin) {
7232 ValidString &= Fields[0].startswith_insensitive("cp") ||
7233 Fields[0].startswith_insensitive("p");
7234 if (ValidString)
7235 Fields[0] = Fields[0].drop_front(
7236 Fields[0].startswith_insensitive("cp") ? 2 : 1);
7237
7238 ValidString &= Fields[2].startswith_insensitive("c");
7239 if (ValidString)
7240 Fields[2] = Fields[2].drop_front(1);
7241
7242 if (FiveFields) {
7243 ValidString &= Fields[3].startswith_insensitive("c");
7244 if (ValidString)
7245 Fields[3] = Fields[3].drop_front(1);
7246 }
7247 }
7248
7249 SmallVector<int, 5> Ranges;
7250 if (FiveFields)
7251 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
7252 else
7253 Ranges.append({15, 7, 15});
7254
7255 for (unsigned i=0; i<Fields.size(); ++i) {
7256 int IntField;
7257 ValidString &= !Fields[i].getAsInteger(10, IntField);
7258 ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
7259 }
7260
7261 if (!ValidString)
7262 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
7263 << Arg->getSourceRange();
7264 } else if (IsAArch64Builtin && Fields.size() == 1) {
7265 // If the register name is one of those that appear in the condition below
7266 // and the special register builtin being used is one of the write builtins,
7267 // then we require that the argument provided for writing to the register
7268 // is an integer constant expression. This is because it will be lowered to
7269 // an MSR (immediate) instruction, so we need to know the immediate at
7270 // compile time.
7271 if (TheCall->getNumArgs() != 2)
7272 return false;
7273
7274 std::string RegLower = Reg.lower();
7275 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" &&
7276 RegLower != "pan" && RegLower != "uao")
7277 return false;
7278
7279 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
7280 }
7281
7282 return false;
7283}
7284
7285/// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
7286/// Emit an error and return true on failure; return false on success.
7287/// TypeStr is a string containing the type descriptor of the value returned by
7288/// the builtin and the descriptors of the expected type of the arguments.
7289bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeStr) {
7290
7291 assert((TypeStr[0] != '\0') &&((void)0)
7292 "Invalid types in PPC MMA builtin declaration")((void)0);
7293
7294 unsigned Mask = 0;
7295 unsigned ArgNum = 0;
7296
7297 // The first type in TypeStr is the type of the value returned by the
7298 // builtin. So we first read that type and change the type of TheCall.
7299 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
7300 TheCall->setType(type);
7301
7302 while (*TypeStr != '\0') {
7303 Mask = 0;
7304 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
7305 if (ArgNum >= TheCall->getNumArgs()) {
7306 ArgNum++;
7307 break;
7308 }
7309
7310 Expr *Arg = TheCall->getArg(ArgNum);
7311 QualType ArgType = Arg->getType();
7312
7313 if ((ExpectedType->isVoidPointerType() && !ArgType->isPointerType()) ||
7314 (!ExpectedType->isVoidPointerType() &&
7315 ArgType.getCanonicalType() != ExpectedType))
7316 return Diag(Arg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
7317 << ArgType << ExpectedType << 1 << 0 << 0;
7318
7319 // If the value of the Mask is not 0, we have a constraint in the size of
7320 // the integer argument so here we ensure the argument is a constant that
7321 // is in the valid range.
7322 if (Mask != 0 &&
7323 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
7324 return true;
7325
7326 ArgNum++;
7327 }
7328
7329 // In case we exited early from the previous loop, there are other types to
7330 // read from TypeStr. So we need to read them all to ensure we have the right
7331 // number of arguments in TheCall and if it is not the case, to display a
7332 // better error message.
7333 while (*TypeStr != '\0') {
7334 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
7335 ArgNum++;
7336 }
7337 if (checkArgCount(*this, TheCall, ArgNum))
7338 return true;
7339
7340 return false;
7341}
7342
7343/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
7344/// This checks that the target supports __builtin_longjmp and
7345/// that val is a constant 1.
7346bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
7347 if (!Context.getTargetInfo().hasSjLjLowering())
7348 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported)
7349 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
7350
7351 Expr *Arg = TheCall->getArg(1);
7352 llvm::APSInt Result;
7353
7354 // TODO: This is less than ideal. Overload this to take a value.
7355 if (SemaBuiltinConstantArg(TheCall, 1, Result))
7356 return true;
7357
7358 if (Result != 1)
7359 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val)
7360 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc());
7361
7362 return false;
7363}
7364
7365/// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
7366/// This checks that the target supports __builtin_setjmp.
7367bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
7368 if (!Context.getTargetInfo().hasSjLjLowering())
7369 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported)
7370 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
7371 return false;
7372}
7373
7374namespace {
7375
7376class UncoveredArgHandler {
7377 enum { Unknown = -1, AllCovered = -2 };
7378
7379 signed FirstUncoveredArg = Unknown;
7380 SmallVector<const Expr *, 4> DiagnosticExprs;
7381
7382public:
7383 UncoveredArgHandler() = default;
7384
7385 bool hasUncoveredArg() const {
7386 return (FirstUncoveredArg >= 0);
7387 }
7388
7389 unsigned getUncoveredArg() const {
7390 assert(hasUncoveredArg() && "no uncovered argument")((void)0);
7391 return FirstUncoveredArg;
7392 }
7393
7394 void setAllCovered() {
7395 // A string has been found with all arguments covered, so clear out
7396 // the diagnostics.
7397 DiagnosticExprs.clear();
7398 FirstUncoveredArg = AllCovered;
7399 }
7400
7401 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) {
7402 assert(NewFirstUncoveredArg >= 0 && "Outside range")((void)0);
7403
7404 // Don't update if a previous string covers all arguments.
7405 if (FirstUncoveredArg == AllCovered)
7406 return;
7407
7408 // UncoveredArgHandler tracks the highest uncovered argument index
7409 // and with it all the strings that match this index.
7410 if (NewFirstUncoveredArg == FirstUncoveredArg)
7411 DiagnosticExprs.push_back(StrExpr);
7412 else if (NewFirstUncoveredArg > FirstUncoveredArg) {
7413 DiagnosticExprs.clear();
7414 DiagnosticExprs.push_back(StrExpr);
7415 FirstUncoveredArg = NewFirstUncoveredArg;
7416 }
7417 }
7418
7419 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr);
7420};
7421
7422enum StringLiteralCheckType {
7423 SLCT_NotALiteral,
7424 SLCT_UncheckedLiteral,
7425 SLCT_CheckedLiteral
7426};
7427
7428} // namespace
7429
7430static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
7431 BinaryOperatorKind BinOpKind,
7432 bool AddendIsRight) {
7433 unsigned BitWidth = Offset.getBitWidth();
7434 unsigned AddendBitWidth = Addend.getBitWidth();
7435 // There might be negative interim results.
7436 if (Addend.isUnsigned()) {
7437 Addend = Addend.zext(++AddendBitWidth);
7438 Addend.setIsSigned(true);
7439 }
7440 // Adjust the bit width of the APSInts.
7441 if (AddendBitWidth > BitWidth) {
7442 Offset = Offset.sext(AddendBitWidth);
7443 BitWidth = AddendBitWidth;
7444 } else if (BitWidth > AddendBitWidth) {
7445 Addend = Addend.sext(BitWidth);
7446 }
7447
7448 bool Ov = false;
7449 llvm::APSInt ResOffset = Offset;
7450 if (BinOpKind == BO_Add)
7451 ResOffset = Offset.sadd_ov(Addend, Ov);
7452 else {
7453 assert(AddendIsRight && BinOpKind == BO_Sub &&((void)0)
7454 "operator must be add or sub with addend on the right")((void)0);
7455 ResOffset = Offset.ssub_ov(Addend, Ov);
7456 }
7457
7458 // We add an offset to a pointer here so we should support an offset as big as
7459 // possible.
7460 if (Ov) {
7461 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 &&((void)0)
7462 "index (intermediate) result too big")((void)0);
7463 Offset = Offset.sext(2 * BitWidth);
7464 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
7465 return;
7466 }
7467
7468 Offset = ResOffset;
7469}
7470
7471namespace {
7472
7473// This is a wrapper class around StringLiteral to support offsetted string
7474// literals as format strings. It takes the offset into account when returning
7475// the string and its length or the source locations to display notes correctly.
7476class FormatStringLiteral {
7477 const StringLiteral *FExpr;
7478 int64_t Offset;
7479
7480 public:
7481 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
7482 : FExpr(fexpr), Offset(Offset) {}
7483
7484 StringRef getString() const {
7485 return FExpr->getString().drop_front(Offset);
7486 }
7487
7488 unsigned getByteLength() const {
7489 return FExpr->getByteLength() - getCharByteWidth() * Offset;
7490 }
7491
7492 unsigned getLength() const { return FExpr->getLength() - Offset; }
7493 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
7494
7495 StringLiteral::StringKind getKind() const { return FExpr->getKind(); }
7496
7497 QualType getType() const { return FExpr->getType(); }
7498
7499 bool isAscii() const { return FExpr->isAscii(); }
7500 bool isWide() const { return FExpr->isWide(); }
7501 bool isUTF8() const { return FExpr->isUTF8(); }
7502 bool isUTF16() const { return FExpr->isUTF16(); }
7503 bool isUTF32() const { return FExpr->isUTF32(); }
7504 bool isPascal() const { return FExpr->isPascal(); }
7505
7506 SourceLocation getLocationOfByte(
7507 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
7508 const TargetInfo &Target, unsigned *StartToken = nullptr,
7509 unsigned *StartTokenByteOffset = nullptr) const {
7510 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target,
7511 StartToken, StartTokenByteOffset);
7512 }
7513
7514 SourceLocation getBeginLoc() const LLVM_READONLY__attribute__((__pure__)) {
7515 return FExpr->getBeginLoc().getLocWithOffset(Offset);
7516 }
7517
7518 SourceLocation getEndLoc() const LLVM_READONLY__attribute__((__pure__)) { return FExpr->getEndLoc(); }
7519};
7520
7521} // namespace
7522
7523static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
7524 const Expr *OrigFormatExpr,
7525 ArrayRef<const Expr *> Args,
7526 bool HasVAListArg, unsigned format_idx,
7527 unsigned firstDataArg,
7528 Sema::FormatStringType Type,
7529 bool inFunctionCall,
7530 Sema::VariadicCallType CallType,
7531 llvm::SmallBitVector &CheckedVarArgs,
7532 UncoveredArgHandler &UncoveredArg,
7533 bool IgnoreStringsWithoutSpecifiers);
7534
7535// Determine if an expression is a string literal or constant string.
7536// If this function returns false on the arguments to a function expecting a
7537// format string, we will usually need to emit a warning.
7538// True string literals are then checked by CheckFormatString.
7539static StringLiteralCheckType
7540checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
7541 bool HasVAListArg, unsigned format_idx,
7542 unsigned firstDataArg, Sema::FormatStringType Type,
7543 Sema::VariadicCallType CallType, bool InFunctionCall,
7544 llvm::SmallBitVector &CheckedVarArgs,
7545 UncoveredArgHandler &UncoveredArg,
7546 llvm::APSInt Offset,
7547 bool IgnoreStringsWithoutSpecifiers = false) {
7548 if (S.isConstantEvaluated())
7549 return SLCT_NotALiteral;
7550 tryAgain:
7551 assert(Offset.isSigned() && "invalid offset")((void)0);
7552
7553 if (E->isTypeDependent() || E->isValueDependent())
7554 return SLCT_NotALiteral;
7555
7556 E = E->IgnoreParenCasts();
7557
7558 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull))
7559 // Technically -Wformat-nonliteral does not warn about this case.
7560 // The behavior of printf and friends in this case is implementation
7561 // dependent. Ideally if the format string cannot be null then
7562 // it should have a 'nonnull' attribute in the function prototype.
7563 return SLCT_UncheckedLiteral;
7564
7565 switch (E->getStmtClass()) {
7566 case Stmt::BinaryConditionalOperatorClass:
7567 case Stmt::ConditionalOperatorClass: {
7568 // The expression is a literal if both sub-expressions were, and it was
7569 // completely checked only if both sub-expressions were checked.
7570 const AbstractConditionalOperator *C =
7571 cast<AbstractConditionalOperator>(E);
7572
7573 // Determine whether it is necessary to check both sub-expressions, for
7574 // example, because the condition expression is a constant that can be
7575 // evaluated at compile time.
7576 bool CheckLeft = true, CheckRight = true;
7577
7578 bool Cond;
7579 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(),
7580 S.isConstantEvaluated())) {
7581 if (Cond)
7582 CheckRight = false;
7583 else
7584 CheckLeft = false;
7585 }
7586
7587 // We need to maintain the offsets for the right and the left hand side
7588 // separately to check if every possible indexed expression is a valid
7589 // string literal. They might have different offsets for different string
7590 // literals in the end.
7591 StringLiteralCheckType Left;
7592 if (!CheckLeft)
7593 Left = SLCT_UncheckedLiteral;
7594 else {
7595 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args,
7596 HasVAListArg, format_idx, firstDataArg,
7597 Type, CallType, InFunctionCall,
7598 CheckedVarArgs, UncoveredArg, Offset,
7599 IgnoreStringsWithoutSpecifiers);
7600 if (Left == SLCT_NotALiteral || !CheckRight) {
7601 return Left;
7602 }
7603 }
7604
7605 StringLiteralCheckType Right = checkFormatStringExpr(
7606 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg,
7607 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
7608 IgnoreStringsWithoutSpecifiers);
7609
7610 return (CheckLeft && Left < Right) ? Left : Right;
7611 }
7612
7613 case Stmt::ImplicitCastExprClass:
7614 E = cast<ImplicitCastExpr>(E)->getSubExpr();
7615 goto tryAgain;
7616
7617 case Stmt::OpaqueValueExprClass:
7618 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
7619 E = src;
7620 goto tryAgain;
7621 }
7622 return SLCT_NotALiteral;
7623
7624 case Stmt::PredefinedExprClass:
7625 // While __func__, etc., are technically not string literals, they
7626 // cannot contain format specifiers and thus are not a security
7627 // liability.
7628 return SLCT_UncheckedLiteral;
7629
7630 case Stmt::DeclRefExprClass: {
7631 const DeclRefExpr *DR = cast<DeclRefExpr>(E);
7632
7633 // As an exception, do not flag errors for variables binding to
7634 // const string literals.
7635 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
7636 bool isConstant = false;
7637 QualType T = DR->getType();
7638
7639 if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
7640 isConstant = AT->getElementType().isConstant(S.Context);
7641 } else if (const PointerType *PT = T->getAs<PointerType>()) {
7642 isConstant = T.isConstant(S.Context) &&
7643 PT->getPointeeType().isConstant(S.Context);
7644 } else if (T->isObjCObjectPointerType()) {
7645 // In ObjC, there is usually no "const ObjectPointer" type,
7646 // so don't check if the pointee type is constant.
7647 isConstant = T.isConstant(S.Context);
7648 }
7649
7650 if (isConstant) {
7651 if (const Expr *Init = VD->getAnyInitializer()) {
7652 // Look through initializers like const char c[] = { "foo" }
7653 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
7654 if (InitList->isStringLiteralInit())
7655 Init = InitList->getInit(0)->IgnoreParenImpCasts();
7656 }
7657 return checkFormatStringExpr(S, Init, Args,
7658 HasVAListArg, format_idx,
7659 firstDataArg, Type, CallType,
7660 /*InFunctionCall*/ false, CheckedVarArgs,
7661 UncoveredArg, Offset);
7662 }
7663 }
7664
7665 // For vprintf* functions (i.e., HasVAListArg==true), we add a
7666 // special check to see if the format string is a function parameter
7667 // of the function calling the printf function. If the function
7668 // has an attribute indicating it is a printf-like function, then we
7669 // should suppress warnings concerning non-literals being used in a call
7670 // to a vprintf function. For example:
7671 //
7672 // void
7673 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
7674 // va_list ap;
7675 // va_start(ap, fmt);
7676 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
7677 // ...
7678 // }
7679 if (HasVAListArg) {
7680 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
7681 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
7682 int PVIndex = PV->getFunctionScopeIndex() + 1;
7683 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) {
7684 // adjust for implicit parameter
7685 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
7686 if (MD->isInstance())
7687 ++PVIndex;
7688 // We also check if the formats are compatible.
7689 // We can't pass a 'scanf' string to a 'printf' function.
7690 if (PVIndex == PVFormat->getFormatIdx() &&
7691 Type == S.GetFormatStringType(PVFormat))
7692 return SLCT_UncheckedLiteral;
7693 }
7694 }
7695 }
7696 }
7697 }
7698
7699 return SLCT_NotALiteral;
7700 }
7701
7702 case Stmt::CallExprClass:
7703 case Stmt::CXXMemberCallExprClass: {
7704 const CallExpr *CE = cast<CallExpr>(E);
7705 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
7706 bool IsFirst = true;
7707 StringLiteralCheckType CommonResult;
7708 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
7709 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
7710 StringLiteralCheckType Result = checkFormatStringExpr(
7711 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
7712 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
7713 IgnoreStringsWithoutSpecifiers);
7714 if (IsFirst) {
7715 CommonResult = Result;
7716 IsFirst = false;
7717 }
7718 }
7719 if (!IsFirst)
7720 return CommonResult;
7721
7722 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
7723 unsigned BuiltinID = FD->getBuiltinID();
7724 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
7725 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
7726 const Expr *Arg = CE->getArg(0);
7727 return checkFormatStringExpr(S, Arg, Args,
7728 HasVAListArg, format_idx,
7729 firstDataArg, Type, CallType,
7730 InFunctionCall, CheckedVarArgs,
7731 UncoveredArg, Offset,
7732 IgnoreStringsWithoutSpecifiers);
7733 }
7734 }
7735 }
7736
7737 return SLCT_NotALiteral;
7738 }
7739 case Stmt::ObjCMessageExprClass: {
7740 const auto *ME = cast<ObjCMessageExpr>(E);
7741 if (const auto *MD = ME->getMethodDecl()) {
7742 if (const auto *FA = MD->getAttr<FormatArgAttr>()) {
7743 // As a special case heuristic, if we're using the method -[NSBundle
7744 // localizedStringForKey:value:table:], ignore any key strings that lack
7745 // format specifiers. The idea is that if the key doesn't have any
7746 // format specifiers then its probably just a key to map to the
7747 // localized strings. If it does have format specifiers though, then its
7748 // likely that the text of the key is the format string in the
7749 // programmer's language, and should be checked.
7750 const ObjCInterfaceDecl *IFace;
7751 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) &&
7752 IFace->getIdentifier()->isStr("NSBundle") &&
7753 MD->getSelector().isKeywordSelector(
7754 {"localizedStringForKey", "value", "table"})) {
7755 IgnoreStringsWithoutSpecifiers = true;
7756 }
7757
7758 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
7759 return checkFormatStringExpr(
7760 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
7761 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
7762 IgnoreStringsWithoutSpecifiers);
7763 }
7764 }
7765
7766 return SLCT_NotALiteral;
7767 }
7768 case Stmt::ObjCStringLiteralClass:
7769 case Stmt::StringLiteralClass: {
7770 const StringLiteral *StrE = nullptr;
7771
7772 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
7773 StrE = ObjCFExpr->getString();
7774 else
7775 StrE = cast<StringLiteral>(E);
7776
7777 if (StrE) {
7778 if (Offset.isNegative() || Offset > StrE->getLength()) {
7779 // TODO: It would be better to have an explicit warning for out of
7780 // bounds literals.
7781 return SLCT_NotALiteral;
7782 }
7783 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue());
7784 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx,
7785 firstDataArg, Type, InFunctionCall, CallType,
7786 CheckedVarArgs, UncoveredArg,
7787 IgnoreStringsWithoutSpecifiers);
7788 return SLCT_CheckedLiteral;
7789 }
7790
7791 return SLCT_NotALiteral;
7792 }
7793 case Stmt::BinaryOperatorClass: {
7794 const BinaryOperator *BinOp = cast<BinaryOperator>(E);
7795
7796 // A string literal + an int offset is still a string literal.
7797 if (BinOp->isAdditiveOp()) {
7798 Expr::EvalResult LResult, RResult;
7799
7800 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(
7801 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated());
7802 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(
7803 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated());
7804
7805 if (LIsInt != RIsInt) {
7806 BinaryOperatorKind BinOpKind = BinOp->getOpcode();
7807
7808 if (LIsInt) {
7809 if (BinOpKind == BO_Add) {
7810 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt);
7811 E = BinOp->getRHS();
7812 goto tryAgain;
7813 }
7814 } else {
7815 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt);
7816 E = BinOp->getLHS();
7817 goto tryAgain;
7818 }
7819 }
7820 }
7821
7822 return SLCT_NotALiteral;
7823 }
7824 case Stmt::UnaryOperatorClass: {
7825 const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
7826 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
7827 if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
7828 Expr::EvalResult IndexResult;
7829 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context,
7830 Expr::SE_NoSideEffects,
7831 S.isConstantEvaluated())) {
7832 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add,
7833 /*RHS is int*/ true);
7834 E = ASE->getBase();
7835 goto tryAgain;
7836 }
7837 }
7838
7839 return SLCT_NotALiteral;
7840 }
7841
7842 default:
7843 return SLCT_NotALiteral;
7844 }
7845}
7846
7847Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
7848 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
7849 .Case("scanf", FST_Scanf)
7850 .Cases("printf", "printf0", "syslog", FST_Printf)
7851 .Cases("NSString", "CFString", FST_NSString)
7852 .Case("strftime", FST_Strftime)
7853 .Case("strfmon", FST_Strfmon)
7854 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
7855 .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
7856 .Case("os_trace", FST_OSLog)
7857 .Case("os_log", FST_OSLog)
7858 .Default(FST_Unknown);
7859}
7860
7861/// CheckFormatArguments - Check calls to printf and scanf (and similar
7862/// functions) for correct use of format strings.
7863/// Returns true if a format string has been fully checked.
7864bool Sema::CheckFormatArguments(const FormatAttr *Format,
7865 ArrayRef<const Expr *> Args,
7866 bool IsCXXMember,
7867 VariadicCallType CallType,
7868 SourceLocation Loc, SourceRange Range,
7869 llvm::SmallBitVector &CheckedVarArgs) {
7870 FormatStringInfo FSI;
7871 if (getFormatStringInfo(Format, IsCXXMember, &FSI))
7872 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx,
7873 FSI.FirstDataArg, GetFormatStringType(Format),
7874 CallType, Loc, Range, CheckedVarArgs);
7875 return false;
7876}
7877
7878bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
7879 bool HasVAListArg, unsigned format_idx,
7880 unsigned firstDataArg, FormatStringType Type,
7881 VariadicCallType CallType,
7882 SourceLocation Loc, SourceRange Range,
7883 llvm::SmallBitVector &CheckedVarArgs) {
7884 // CHECK: printf/scanf-like function is called with no format string.
7885 if (format_idx >= Args.size()) {
7886 Diag(Loc, diag::warn_missing_format_string) << Range;
7887 return false;
7888 }
7889
7890 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
7891
7892 // CHECK: format string is not a string literal.
7893 //
7894 // Dynamically generated format strings are difficult to
7895 // automatically vet at compile time. Requiring that format strings
7896 // are string literals: (1) permits the checking of format strings by
7897 // the compiler and thereby (2) can practically remove the source of
7898 // many format string exploits.
7899
7900 // Format string can be either ObjC string (e.g. @"%d") or
7901 // C string (e.g. "%d")
7902 // ObjC string uses the same format specifiers as C string, so we can use
7903 // the same format string checking logic for both ObjC and C strings.
7904 UncoveredArgHandler UncoveredArg;
7905 StringLiteralCheckType CT =
7906 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg,
7907 format_idx, firstDataArg, Type, CallType,
7908 /*IsFunctionCall*/ true, CheckedVarArgs,
7909 UncoveredArg,
7910 /*no string offset*/ llvm::APSInt(64, false) = 0);
7911
7912 // Generate a diagnostic where an uncovered argument is detected.
7913 if (UncoveredArg.hasUncoveredArg()) {
7914 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg;
7915 assert(ArgIdx < Args.size() && "ArgIdx outside bounds")((void)0);
7916 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]);
7917 }
7918
7919 if (CT != SLCT_NotALiteral)
7920 // Literal format string found, check done!
7921 return CT == SLCT_CheckedLiteral;
7922
7923 // Strftime is particular as it always uses a single 'time' argument,
7924 // so it is safe to pass a non-literal string.
7925 if (Type == FST_Strftime)
7926 return false;
7927
7928 // Do not emit diag when the string param is a macro expansion and the
7929 // format is either NSString or CFString. This is a hack to prevent
7930 // diag when using the NSLocalizedString and CFCopyLocalizedString macros
7931 // which are usually used in place of NS and CF string literals.
7932 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc();
7933 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc))
7934 return false;
7935
7936 // If there are no arguments specified, warn with -Wformat-security, otherwise
7937 // warn only with -Wformat-nonliteral.
7938 if (Args.size() == firstDataArg) {
7939 Diag(FormatLoc, diag::warn_format_nonliteral_noargs)
7940 << OrigFormatExpr->getSourceRange();
7941 switch (Type) {
7942 default:
7943 break;
7944 case FST_Kprintf:
7945 case FST_FreeBSDKPrintf:
7946 case FST_Printf:
7947 case FST_Syslog:
7948 Diag(FormatLoc, diag::note_format_security_fixit)
7949 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
7950 break;
7951 case FST_NSString:
7952 Diag(FormatLoc, diag::note_format_security_fixit)
7953 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", ");
7954 break;
7955 }
7956 } else {
7957 Diag(FormatLoc, diag::warn_format_nonliteral)
7958 << OrigFormatExpr->getSourceRange();
7959 }
7960 return false;
7961}
7962
7963namespace {
7964
7965class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
7966protected:
7967 Sema &S;
7968 const FormatStringLiteral *FExpr;
7969 const Expr *OrigFormatExpr;
7970 const Sema::FormatStringType FSType;
7971 const unsigned FirstDataArg;
7972 const unsigned NumDataArgs;
7973 const char *Beg; // Start of format string.
7974 const bool HasVAListArg;
7975 ArrayRef<const Expr *> Args;
7976 unsigned FormatIdx;
7977 llvm::SmallBitVector CoveredArgs;
7978 bool usesPositionalArgs = false;
7979 bool atFirstArg = true;
7980 bool inFunctionCall;
7981 Sema::VariadicCallType CallType;
7982 llvm::SmallBitVector &CheckedVarArgs;
7983 UncoveredArgHandler &UncoveredArg;
7984
7985public:
7986 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
7987 const Expr *origFormatExpr,
7988 const Sema::FormatStringType type, unsigned firstDataArg,
7989 unsigned numDataArgs, const char *beg, bool hasVAListArg,
7990 ArrayRef<const Expr *> Args, unsigned formatIdx,
7991 bool inFunctionCall, Sema::VariadicCallType callType,
7992 llvm::SmallBitVector &CheckedVarArgs,
7993 UncoveredArgHandler &UncoveredArg)
7994 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
7995 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
7996 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx),
7997 inFunctionCall(inFunctionCall), CallType(callType),
7998 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
7999 CoveredArgs.resize(numDataArgs);
8000 CoveredArgs.reset();
8001 }
8002
8003 void DoneProcessing();
8004
8005 void HandleIncompleteSpecifier(const char *startSpecifier,
8006 unsigned specifierLen) override;
8007
8008 void HandleInvalidLengthModifier(
8009 const analyze_format_string::FormatSpecifier &FS,
8010 const analyze_format_string::ConversionSpecifier &CS,
8011 const char *startSpecifier, unsigned specifierLen,
8012 unsigned DiagID);
8013
8014 void HandleNonStandardLengthModifier(
8015 const analyze_format_string::FormatSpecifier &FS,
8016 const char *startSpecifier, unsigned specifierLen);
8017
8018 void HandleNonStandardConversionSpecifier(
8019 const analyze_format_string::ConversionSpecifier &CS,
8020 const char *startSpecifier, unsigned specifierLen);
8021
8022 void HandlePosition(const char *startPos, unsigned posLen) override;
8023
8024 void HandleInvalidPosition(const char *startSpecifier,
8025 unsigned specifierLen,
8026 analyze_format_string::PositionContext p) override;
8027
8028 void HandleZeroPosition(const char *startPos, unsigned posLen) override;
8029
8030 void HandleNullChar(const char *nullCharacter) override;
8031
8032 template <typename Range>
8033 static void
8034 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
8035 const PartialDiagnostic &PDiag, SourceLocation StringLoc,
8036 bool IsStringLocation, Range StringRange,
8037 ArrayRef<FixItHint> Fixit = None);
8038
8039protected:
8040 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
8041 const char *startSpec,
8042 unsigned specifierLen,
8043 const char *csStart, unsigned csLen);
8044
8045 void HandlePositionalNonpositionalArgs(SourceLocation Loc,
8046 const char *startSpec,
8047 unsigned specifierLen);
8048
8049 SourceRange getFormatStringRange();
8050 CharSourceRange getSpecifierRange(const char *startSpecifier,
8051 unsigned specifierLen);
8052 SourceLocation getLocationOfByte(const char *x);
8053
8054 const Expr *getDataArg(unsigned i) const;
8055
8056 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
8057 const analyze_format_string::ConversionSpecifier &CS,
8058 const char *startSpecifier, unsigned specifierLen,
8059 unsigned argIndex);
8060
8061 template <typename Range>
8062 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
8063 bool IsStringLocation, Range StringRange,
8064 ArrayRef<FixItHint> Fixit = None);
8065};
8066
8067} // namespace
8068
8069SourceRange CheckFormatHandler::getFormatStringRange() {
8070 return OrigFormatExpr->getSourceRange();
8071}
8072
8073CharSourceRange CheckFormatHandler::
8074getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
8075 SourceLocation Start = getLocationOfByte(startSpecifier);
8076 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1);
8077
8078 // Advance the end SourceLocation by one due to half-open ranges.
8079 End = End.getLocWithOffset(1);
8080
8081 return CharSourceRange::getCharRange(Start, End);
8082}
8083
8084SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
8085 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(),
8086 S.getLangOpts(), S.Context.getTargetInfo());
8087}
8088
8089void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
8090 unsigned specifierLen){
8091 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
8092 getLocationOfByte(startSpecifier),
8093 /*IsStringLocation*/true,
8094 getSpecifierRange(startSpecifier, specifierLen));
8095}
8096
8097void CheckFormatHandler::HandleInvalidLengthModifier(
8098 const analyze_format_string::FormatSpecifier &FS,
8099 const analyze_format_string::ConversionSpecifier &CS,
8100 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
8101 using namespace analyze_format_string;
8102
8103 const LengthModifier &LM = FS.getLengthModifier();
8104 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
8105
8106 // See if we know how to fix this length modifier.
8107 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
8108 if (FixedLM) {
8109 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
8110 getLocationOfByte(LM.getStart()),
8111 /*IsStringLocation*/true,
8112 getSpecifierRange(startSpecifier, specifierLen));
8113
8114 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
8115 << FixedLM->toString()
8116 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
8117
8118 } else {
8119 FixItHint Hint;
8120 if (DiagID == diag::warn_format_nonsensical_length)
8121 Hint = FixItHint::CreateRemoval(LMRange);
8122
8123 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
8124 getLocationOfByte(LM.getStart()),
8125 /*IsStringLocation*/true,
8126 getSpecifierRange(startSpecifier, specifierLen),
8127 Hint);
8128 }
8129}
8130
8131void CheckFormatHandler::HandleNonStandardLengthModifier(
8132 const analyze_format_string::FormatSpecifier &FS,
8133 const char *startSpecifier, unsigned specifierLen) {
8134 using namespace analyze_format_string;
8135
8136 const LengthModifier &LM = FS.getLengthModifier();
8137 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
8138
8139 // See if we know how to fix this length modifier.
8140 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
8141 if (FixedLM) {
8142 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
8143 << LM.toString() << 0,
8144 getLocationOfByte(LM.getStart()),
8145 /*IsStringLocation*/true,
8146 getSpecifierRange(startSpecifier, specifierLen));
8147
8148 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
8149 << FixedLM->toString()
8150 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
8151
8152 } else {
8153 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
8154 << LM.toString() << 0,
8155 getLocationOfByte(LM.getStart()),
8156 /*IsStringLocation*/true,
8157 getSpecifierRange(startSpecifier, specifierLen));
8158 }
8159}
8160
8161void CheckFormatHandler::HandleNonStandardConversionSpecifier(
8162 const analyze_format_string::ConversionSpecifier &CS,
8163 const char *startSpecifier, unsigned specifierLen) {
8164 using namespace analyze_format_string;
8165
8166 // See if we know how to fix this conversion specifier.
8167 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
8168 if (FixedCS) {
8169 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
8170 << CS.toString() << /*conversion specifier*/1,
8171 getLocationOfByte(CS.getStart()),
8172 /*IsStringLocation*/true,
8173 getSpecifierRange(startSpecifier, specifierLen));
8174
8175 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength());
8176 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
8177 << FixedCS->toString()
8178 << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
8179 } else {
8180 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
8181 << CS.toString() << /*conversion specifier*/1,
8182 getLocationOfByte(CS.getStart()),
8183 /*IsStringLocation*/true,
8184 getSpecifierRange(startSpecifier, specifierLen));
8185 }
8186}
8187
8188void CheckFormatHandler::HandlePosition(const char *startPos,
8189 unsigned posLen) {
8190 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
8191 getLocationOfByte(startPos),
8192 /*IsStringLocation*/true,
8193 getSpecifierRange(startPos, posLen));
8194}
8195
8196void
8197CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
8198 analyze_format_string::PositionContext p) {
8199 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
8200 << (unsigned) p,
8201 getLocationOfByte(startPos), /*IsStringLocation*/true,
8202 getSpecifierRange(startPos, posLen));
8203}
8204
8205void CheckFormatHandler::HandleZeroPosition(const char *startPos,
8206 unsigned posLen) {
8207 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
8208 getLocationOfByte(startPos),
8209 /*IsStringLocation*/true,
8210 getSpecifierRange(startPos, posLen));
8211}
8212
8213void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
8214 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) {
8215 // The presence of a null character is likely an error.
8216 EmitFormatDiagnostic(
8217 S.PDiag(diag::warn_printf_format_string_contains_null_char),
8218 getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
8219 getFormatStringRange());
8220 }
8221}
8222
8223// Note that this may return NULL if there was an error parsing or building
8224// one of the argument expressions.
8225const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
8226 return Args[FirstDataArg + i];
8227}
8228
8229void CheckFormatHandler::DoneProcessing() {
8230 // Does the number of data arguments exceed the number of
8231 // format conversions in the format string?
8232 if (!HasVAListArg) {
8233 // Find any arguments that weren't covered.
8234 CoveredArgs.flip();
8235 signed notCoveredArg = CoveredArgs.find_first();
8236 if (notCoveredArg >= 0) {
8237 assert((unsigned)notCoveredArg < NumDataArgs)((void)0);
8238 UncoveredArg.Update(notCoveredArg, OrigFormatExpr);
8239 } else {
8240 UncoveredArg.setAllCovered();
8241 }
8242 }
8243}
8244
8245void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
8246 const Expr *ArgExpr) {
8247 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 &&((void)0)
8248 "Invalid state")((void)0);
8249
8250 if (!ArgExpr)
8251 return;
8252
8253 SourceLocation Loc = ArgExpr->getBeginLoc();
8254
8255 if (S.getSourceManager().isInSystemMacro(Loc))
8256 return;
8257
8258 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used);
8259 for (auto E : DiagnosticExprs)
8260 PDiag << E->getSourceRange();
8261
8262 CheckFormatHandler::EmitFormatDiagnostic(
8263 S, IsFunctionCall, DiagnosticExprs[0],
8264 PDiag, Loc, /*IsStringLocation*/false,
8265 DiagnosticExprs[0]->getSourceRange());
8266}
8267
8268bool
8269CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
8270 SourceLocation Loc,
8271 const char *startSpec,
8272 unsigned specifierLen,
8273 const char *csStart,
8274 unsigned csLen) {
8275 bool keepGoing = true;
8276 if (argIndex < NumDataArgs) {
8277 // Consider the argument coverered, even though the specifier doesn't
8278 // make sense.
8279 CoveredArgs.set(argIndex);
8280 }
8281 else {
8282 // If argIndex exceeds the number of data arguments we
8283 // don't issue a warning because that is just a cascade of warnings (and
8284 // they may have intended '%%' anyway). We don't want to continue processing
8285 // the format string after this point, however, as we will like just get
8286 // gibberish when trying to match arguments.
8287 keepGoing = false;
8288 }
8289
8290 StringRef Specifier(csStart, csLen);
8291
8292 // If the specifier in non-printable, it could be the first byte of a UTF-8
8293 // sequence. In that case, print the UTF-8 code point. If not, print the byte
8294 // hex value.
8295 std::string CodePointStr;
8296 if (!llvm::sys::locale::isPrint(*csStart)) {
8297 llvm::UTF32 CodePoint;
8298 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart);
8299 const llvm::UTF8 *E =
8300 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen);
8301 llvm::ConversionResult Result =
8302 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion);
8303
8304 if (Result != llvm::conversionOK) {
8305 unsigned char FirstChar = *csStart;
8306 CodePoint = (llvm::UTF32)FirstChar;
8307 }
8308
8309 llvm::raw_string_ostream OS(CodePointStr);
8310 if (CodePoint < 256)
8311 OS << "\\x" << llvm::format("%02x", CodePoint);
8312 else if (CodePoint <= 0xFFFF)
8313 OS << "\\u" << llvm::format("%04x", CodePoint);
8314 else
8315 OS << "\\U" << llvm::format("%08x", CodePoint);
8316 OS.flush();
8317 Specifier = CodePointStr;
8318 }
8319
8320 EmitFormatDiagnostic(
8321 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc,
8322 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen));
8323
8324 return keepGoing;
8325}
8326
8327void
8328CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
8329 const char *startSpec,
8330 unsigned specifierLen) {
8331 EmitFormatDiagnostic(
8332 S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
8333 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
8334}
8335
8336bool
8337CheckFormatHandler::CheckNumArgs(
8338 const analyze_format_string::FormatSpecifier &FS,
8339 const analyze_format_string::ConversionSpecifier &CS,
8340 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
8341
8342 if (argIndex >= NumDataArgs) {
8343 PartialDiagnostic PDiag = FS.usesPositionalArg()
8344 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
8345 << (argIndex+1) << NumDataArgs)
8346 : S.PDiag(diag::warn_printf_insufficient_data_args);
8347 EmitFormatDiagnostic(
8348 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true,
8349 getSpecifierRange(startSpecifier, specifierLen));
8350
8351 // Since more arguments than conversion tokens are given, by extension
8352 // all arguments are covered, so mark this as so.
8353 UncoveredArg.setAllCovered();
8354 return false;
8355 }
8356 return true;
8357}
8358
8359template<typename Range>
8360void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
8361 SourceLocation Loc,
8362 bool IsStringLocation,
8363 Range StringRange,
8364 ArrayRef<FixItHint> FixIt) {
8365 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
8366 Loc, IsStringLocation, StringRange, FixIt);
8367}
8368
8369/// If the format string is not within the function call, emit a note
8370/// so that the function call and string are in diagnostic messages.
8371///
8372/// \param InFunctionCall if true, the format string is within the function
8373/// call and only one diagnostic message will be produced. Otherwise, an
8374/// extra note will be emitted pointing to location of the format string.
8375///
8376/// \param ArgumentExpr the expression that is passed as the format string
8377/// argument in the function call. Used for getting locations when two
8378/// diagnostics are emitted.
8379///
8380/// \param PDiag the callee should already have provided any strings for the
8381/// diagnostic message. This function only adds locations and fixits
8382/// to diagnostics.
8383///
8384/// \param Loc primary location for diagnostic. If two diagnostics are
8385/// required, one will be at Loc and a new SourceLocation will be created for
8386/// the other one.
8387///
8388/// \param IsStringLocation if true, Loc points to the format string should be
8389/// used for the note. Otherwise, Loc points to the argument list and will
8390/// be used with PDiag.
8391///
8392/// \param StringRange some or all of the string to highlight. This is
8393/// templated so it can accept either a CharSourceRange or a SourceRange.
8394///
8395/// \param FixIt optional fix it hint for the format string.
8396template <typename Range>
8397void CheckFormatHandler::EmitFormatDiagnostic(
8398 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr,
8399 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation,
8400 Range StringRange, ArrayRef<FixItHint> FixIt) {
8401 if (InFunctionCall) {
8402 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag);
8403 D << StringRange;
8404 D << FixIt;
8405 } else {
8406 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
8407 << ArgumentExpr->getSourceRange();
8408
8409 const Sema::SemaDiagnosticBuilder &Note =
8410 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
8411 diag::note_format_string_defined);
8412
8413 Note << StringRange;
8414 Note << FixIt;
8415 }
8416}
8417
8418//===--- CHECK: Printf format string checking ------------------------------===//
8419
8420namespace {
8421
8422class CheckPrintfHandler : public CheckFormatHandler {
8423public:
8424 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr,
8425 const Expr *origFormatExpr,
8426 const Sema::FormatStringType type, unsigned firstDataArg,
8427 unsigned numDataArgs, bool isObjC, const char *beg,
8428 bool hasVAListArg, ArrayRef<const Expr *> Args,
8429 unsigned formatIdx, bool inFunctionCall,
8430 Sema::VariadicCallType CallType,
8431 llvm::SmallBitVector &CheckedVarArgs,
8432 UncoveredArgHandler &UncoveredArg)
8433 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
8434 numDataArgs, beg, hasVAListArg, Args, formatIdx,
8435 inFunctionCall, CallType, CheckedVarArgs,
8436 UncoveredArg) {}
8437
8438 bool isObjCContext() const { return FSType == Sema::FST_NSString; }
8439
8440 /// Returns true if '%@' specifiers are allowed in the format string.
8441 bool allowsObjCArg() const {
8442 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog ||
8443 FSType == Sema::FST_OSTrace;
8444 }
8445
8446 bool HandleInvalidPrintfConversionSpecifier(
8447 const analyze_printf::PrintfSpecifier &FS,
8448 const char *startSpecifier,
8449 unsigned specifierLen) override;
8450
8451 void handleInvalidMaskType(StringRef MaskType) override;
8452
8453 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
8454 const char *startSpecifier,
8455 unsigned specifierLen) override;
8456 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
8457 const char *StartSpecifier,
8458 unsigned SpecifierLen,
8459 const Expr *E);
8460
8461 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
8462 const char *startSpecifier, unsigned specifierLen);
8463 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
8464 const analyze_printf::OptionalAmount &Amt,
8465 unsigned type,
8466 const char *startSpecifier, unsigned specifierLen);
8467 void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
8468 const analyze_printf::OptionalFlag &flag,
8469 const char *startSpecifier, unsigned specifierLen);
8470 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
8471 const analyze_printf::OptionalFlag &ignoredFlag,
8472 const analyze_printf::OptionalFlag &flag,
8473 const char *startSpecifier, unsigned specifierLen);
8474 bool checkForCStrMembers(const analyze_printf::ArgType &AT,
8475 const Expr *E);
8476
8477 void HandleEmptyObjCModifierFlag(const char *startFlag,
8478 unsigned flagLen) override;
8479
8480 void HandleInvalidObjCModifierFlag(const char *startFlag,
8481 unsigned flagLen) override;
8482
8483 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart,
8484 const char *flagsEnd,
8485 const char *conversionPosition)
8486 override;
8487};
8488
8489} // namespace
8490
8491bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
8492 const analyze_printf::PrintfSpecifier &FS,
8493 const char *startSpecifier,
8494 unsigned specifierLen) {
8495 const analyze_printf::PrintfConversionSpecifier &CS =
8496 FS.getConversionSpecifier();
8497
8498 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
8499 getLocationOfByte(CS.getStart()),
8500 startSpecifier, specifierLen,
8501 CS.getStart(), CS.getLength());
8502}
8503
8504void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
8505 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size);
8506}
8507
8508bool CheckPrintfHandler::HandleAmount(
8509 const analyze_format_string::OptionalAmount &Amt,
8510 unsigned k, const char *startSpecifier,
8511 unsigned specifierLen) {
8512 if (Amt.hasDataArgument()) {
8513 if (!HasVAListArg) {
8514 unsigned argIndex = Amt.getArgIndex();
8515 if (argIndex >= NumDataArgs) {
8516 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
8517 << k,
8518 getLocationOfByte(Amt.getStart()),
8519 /*IsStringLocation*/true,
8520 getSpecifierRange(startSpecifier, specifierLen));
8521 // Don't do any more checking. We will just emit
8522 // spurious errors.
8523 return false;
8524 }
8525
8526 // Type check the data argument. It should be an 'int'.
8527 // Although not in conformance with C99, we also allow the argument to be
8528 // an 'unsigned int' as that is a reasonably safe case. GCC also
8529 // doesn't emit a warning for that case.
8530 CoveredArgs.set(argIndex);
8531 const Expr *Arg = getDataArg(argIndex);
8532 if (!Arg)
8533 return false;
8534
8535 QualType T = Arg->getType();
8536
8537 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context);
8538 assert(AT.isValid())((void)0);
8539
8540 if (!AT.matchesType(S.Context, T)) {
8541 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
8542 << k << AT.getRepresentativeTypeName(S.Context)
8543 << T << Arg->getSourceRange(),
8544 getLocationOfByte(Amt.getStart()),
8545 /*IsStringLocation*/true,
8546 getSpecifierRange(startSpecifier, specifierLen));
8547 // Don't do any more checking. We will just emit
8548 // spurious errors.
8549 return false;
8550 }
8551 }
8552 }
8553 return true;
8554}
8555
8556void CheckPrintfHandler::HandleInvalidAmount(
8557 const analyze_printf::PrintfSpecifier &FS,
8558 const analyze_printf::OptionalAmount &Amt,
8559 unsigned type,
8560 const char *startSpecifier,
8561 unsigned specifierLen) {
8562 const analyze_printf::PrintfConversionSpecifier &CS =
8563 FS.getConversionSpecifier();
8564
8565 FixItHint fixit =
8566 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
8567 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
8568 Amt.getConstantLength()))
8569 : FixItHint();
8570
8571 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
8572 << type << CS.toString(),
8573 getLocationOfByte(Amt.getStart()),
8574 /*IsStringLocation*/true,
8575 getSpecifierRange(startSpecifier, specifierLen),
8576 fixit);
8577}
8578
8579void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
8580 const analyze_printf::OptionalFlag &flag,
8581 const char *startSpecifier,
8582 unsigned specifierLen) {
8583 // Warn about pointless flag with a fixit removal.
8584 const analyze_printf::PrintfConversionSpecifier &CS =
8585 FS.getConversionSpecifier();
8586 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
8587 << flag.toString() << CS.toString(),
8588 getLocationOfByte(flag.getPosition()),
8589 /*IsStringLocation*/true,
8590 getSpecifierRange(startSpecifier, specifierLen),
8591 FixItHint::CreateRemoval(
8592 getSpecifierRange(flag.getPosition(), 1)));
8593}
8594
8595void CheckPrintfHandler::HandleIgnoredFlag(
8596 const analyze_printf::PrintfSpecifier &FS,
8597 const analyze_printf::OptionalFlag &ignoredFlag,
8598 const analyze_printf::OptionalFlag &flag,
8599 const char *startSpecifier,
8600 unsigned specifierLen) {
8601 // Warn about ignored flag with a fixit removal.
8602 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
8603 << ignoredFlag.toString() << flag.toString(),
8604 getLocationOfByte(ignoredFlag.getPosition()),
8605 /*IsStringLocation*/true,
8606 getSpecifierRange(startSpecifier, specifierLen),
8607 FixItHint::CreateRemoval(
8608 getSpecifierRange(ignoredFlag.getPosition(), 1)));
8609}
8610
8611void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag,
8612 unsigned flagLen) {
8613 // Warn about an empty flag.
8614 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag),
8615 getLocationOfByte(startFlag),
8616 /*IsStringLocation*/true,
8617 getSpecifierRange(startFlag, flagLen));
8618}
8619
8620void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag,
8621 unsigned flagLen) {
8622 // Warn about an invalid flag.
8623 auto Range = getSpecifierRange(startFlag, flagLen);
8624 StringRef flag(startFlag, flagLen);
8625 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag,
8626 getLocationOfByte(startFlag),
8627 /*IsStringLocation*/true,
8628 Range, FixItHint::CreateRemoval(Range));
8629}
8630
8631void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
8632 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) {
8633 // Warn about using '[...]' without a '@' conversion.
8634 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1);
8635 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion;
8636 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1),
8637 getLocationOfByte(conversionPosition),
8638 /*IsStringLocation*/true,
8639 Range, FixItHint::CreateRemoval(Range));
8640}
8641
8642// Determines if the specified is a C++ class or struct containing
8643// a member with the specified name and kind (e.g. a CXXMethodDecl named
8644// "c_str()").
8645template<typename MemberKind>
8646static llvm::SmallPtrSet<MemberKind*, 1>
8647CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
8648 const RecordType *RT = Ty->getAs<RecordType>();
8649 llvm::SmallPtrSet<MemberKind*, 1> Results;
8650
8651 if (!RT)
8652 return Results;
8653 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
8654 if (!RD || !RD->getDefinition())
8655 return Results;
8656
8657 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
8658 Sema::LookupMemberName);
8659 R.suppressDiagnostics();
8660
8661 // We just need to include all members of the right kind turned up by the
8662 // filter, at this point.
8663 if (S.LookupQualifiedName(R, RT->getDecl()))
8664 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
8665 NamedDecl *decl = (*I)->getUnderlyingDecl();
8666 if (MemberKind *FK = dyn_cast<MemberKind>(decl))
8667 Results.insert(FK);
8668 }
8669 return Results;
8670}
8671
8672/// Check if we could call '.c_str()' on an object.
8673///
8674/// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
8675/// allow the call, or if it would be ambiguous).
8676bool Sema::hasCStrMethod(const Expr *E) {
8677 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
8678
8679 MethodSet Results =
8680 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType());
8681 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
8682 MI != ME; ++MI)
8683 if ((*MI)->getMinRequiredArguments() == 0)
8684 return true;
8685 return false;
8686}
8687
8688// Check if a (w)string was passed when a (w)char* was needed, and offer a
8689// better diagnostic if so. AT is assumed to be valid.
8690// Returns true when a c_str() conversion method is found.
8691bool CheckPrintfHandler::checkForCStrMembers(
8692 const analyze_printf::ArgType &AT, const Expr *E) {
8693 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
8694
8695 MethodSet Results =
8696 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType());
8697
8698 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
8699 MI != ME; ++MI) {
8700 const CXXMethodDecl *Method = *MI;
8701 if (Method->getMinRequiredArguments() == 0 &&
8702 AT.matchesType(S.Context, Method->getReturnType())) {
8703 // FIXME: Suggest parens if the expression needs them.
8704 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc());
8705 S.Diag(E->getBeginLoc(), diag::note_printf_c_str)
8706 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()");
8707 return true;
8708 }
8709 }
8710
8711 return false;
8712}
8713
8714bool
8715CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
8716 &FS,
8717 const char *startSpecifier,
8718 unsigned specifierLen) {
8719 using namespace analyze_format_string;
8720 using namespace analyze_printf;
8721
8722 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
8723
8724 if (FS.consumesDataArgument()) {
8725 if (atFirstArg) {
8726 atFirstArg = false;
8727 usesPositionalArgs = FS.usesPositionalArg();
8728 }
8729 else if (usesPositionalArgs != FS.usesPositionalArg()) {
8730 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
8731 startSpecifier, specifierLen);
8732 return false;
8733 }
8734 }
8735
8736 // First check if the field width, precision, and conversion specifier
8737 // have matching data arguments.
8738 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0,
8739 startSpecifier, specifierLen)) {
8740 return false;
8741 }
8742
8743 if (!HandleAmount(FS.getPrecision(), /* precision */ 1,
8744 startSpecifier, specifierLen)) {
8745 return false;
8746 }
8747
8748 if (!CS.consumesDataArgument()) {
8749 // FIXME: Technically specifying a precision or field width here
8750 // makes no sense. Worth issuing a warning at some point.
8751 return true;
8752 }
8753
8754 // Consume the argument.
8755 unsigned argIndex = FS.getArgIndex();
8756 if (argIndex < NumDataArgs) {
8757 // The check to see if the argIndex is valid will come later.
8758 // We set the bit here because we may exit early from this
8759 // function if we encounter some other error.
8760 CoveredArgs.set(argIndex);
8761 }
8762
8763 // FreeBSD kernel extensions.
8764 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
8765 CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
8766 // We need at least two arguments.
8767 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1))
8768 return false;
8769
8770 // Claim the second argument.
8771 CoveredArgs.set(argIndex + 1);
8772
8773 const Expr *Ex = getDataArg(argIndex);
8774 if (CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
8775 // Type check the first argument (pointer for %D)
8776 const analyze_printf::ArgType &AT = ArgType::CPointerTy;
8777 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
8778 EmitFormatDiagnostic(
8779 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
8780 << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
8781 << false << Ex->getSourceRange(),
8782 Ex->getBeginLoc(), /*IsStringLocation*/false,
8783 getSpecifierRange(startSpecifier, specifierLen));
8784 } else {
8785 // Check the length modifier for %b
8786 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
8787 S.getLangOpts()))
8788 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8789 diag::warn_format_nonsensical_length);
8790 else if (!FS.hasStandardLengthModifier())
8791 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
8792 else if (!FS.hasStandardLengthConversionCombination())
8793 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8794 diag::warn_format_non_standard_conversion_spec);
8795
8796 // Type check the first argument of %b
8797 if (!checkFormatExpr(FS, startSpecifier, specifierLen, Ex))
8798 return false;
8799 }
8800
8801 // Type check the second argument (char * for both %b and %D)
8802 Ex = getDataArg(argIndex + 1);
8803 const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
8804 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
8805 EmitFormatDiagnostic(
8806 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
8807 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
8808 << false << Ex->getSourceRange(),
8809 Ex->getBeginLoc(), /*IsStringLocation*/ false,
8810 getSpecifierRange(startSpecifier, specifierLen));
8811
8812 return true;
8813 }
8814
8815 // Check for using an Objective-C specific conversion specifier
8816 // in a non-ObjC literal.
8817 if (!allowsObjCArg() && CS.isObjCArg()) {
8818 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
8819 specifierLen);
8820 }
8821
8822 // %P can only be used with os_log.
8823 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) {
8824 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
8825 specifierLen);
8826 }
8827
8828 // %n is not allowed with os_log.
8829 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) {
8830 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg),
8831 getLocationOfByte(CS.getStart()),
8832 /*IsStringLocation*/ false,
8833 getSpecifierRange(startSpecifier, specifierLen));
8834
8835 return true;
8836 }
8837
8838 // %n is not allowed anywhere
8839 if (CS.getKind() == ConversionSpecifier::nArg) {
8840 EmitFormatDiagnostic(S.PDiag(diag::warn_format_narg),
8841 getLocationOfByte(CS.getStart()),
8842 /*IsStringLocation*/ false,
8843 getSpecifierRange(startSpecifier, specifierLen));
8844 return true;
8845 }
8846
8847 // Only scalars are allowed for os_trace.
8848 if (FSType == Sema::FST_OSTrace &&
8849 (CS.getKind() == ConversionSpecifier::PArg ||
8850 CS.getKind() == ConversionSpecifier::sArg ||
8851 CS.getKind() == ConversionSpecifier::ObjCObjArg)) {
8852 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
8853 specifierLen);
8854 }
8855
8856 // Check for use of public/private annotation outside of os_log().
8857 if (FSType != Sema::FST_OSLog) {
8858 if (FS.isPublic().isSet()) {
8859 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
8860 << "public",
8861 getLocationOfByte(FS.isPublic().getPosition()),
8862 /*IsStringLocation*/ false,
8863 getSpecifierRange(startSpecifier, specifierLen));
8864 }
8865 if (FS.isPrivate().isSet()) {
8866 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
8867 << "private",
8868 getLocationOfByte(FS.isPrivate().getPosition()),
8869 /*IsStringLocation*/ false,
8870 getSpecifierRange(startSpecifier, specifierLen));
8871 }
8872 }
8873
8874 // Check for invalid use of field width
8875 if (!FS.hasValidFieldWidth()) {
8876 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
8877 startSpecifier, specifierLen);
8878 }
8879
8880 // Check for invalid use of precision
8881 if (!FS.hasValidPrecision()) {
8882 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1,
8883 startSpecifier, specifierLen);
8884 }
8885
8886 // Precision is mandatory for %P specifier.
8887 if (CS.getKind() == ConversionSpecifier::PArg &&
8888 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) {
8889 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision),
8890 getLocationOfByte(startSpecifier),
8891 /*IsStringLocation*/ false,
8892 getSpecifierRange(startSpecifier, specifierLen));
8893 }
8894
8895 // Check each flag does not conflict with any other component.
8896 if (!FS.hasValidThousandsGroupingPrefix())
8897 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
8898 if (!FS.hasValidLeadingZeros())
8899 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
8900 if (!FS.hasValidPlusPrefix())
8901 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen);
8902 if (!FS.hasValidSpacePrefix())
8903 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen);
8904 if (!FS.hasValidAlternativeForm())
8905 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen);
8906 if (!FS.hasValidLeftJustified())
8907 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen);
8908
8909 // Check that flags are not ignored by another flag
8910 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
8911 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(),
8912 startSpecifier, specifierLen);
8913 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
8914 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(),
8915 startSpecifier, specifierLen);
8916
8917 // Check the length modifier is valid with the given conversion specifier.
8918 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
8919 S.getLangOpts()))
8920 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8921 diag::warn_format_nonsensical_length);
8922 else if (!FS.hasStandardLengthModifier())
8923 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
8924 else if (!FS.hasStandardLengthConversionCombination())
8925 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8926 diag::warn_format_non_standard_conversion_spec);
8927
8928 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
8929 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
8930
8931 // The remaining checks depend on the data arguments.
8932 if (HasVAListArg)
8933 return true;
8934
8935 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
8936 return false;
8937
8938 const Expr *Arg = getDataArg(argIndex);
8939 if (!Arg)
8940 return true;
8941
8942 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg);
8943}
8944
8945static bool requiresParensToAddCast(const Expr *E) {
8946 // FIXME: We should have a general way to reason about operator
8947 // precedence and whether parens are actually needed here.
8948 // Take care of a few common cases where they aren't.
8949 const Expr *Inside = E->IgnoreImpCasts();
8950 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside))
8951 Inside = POE->getSyntacticForm()->IgnoreImpCasts();
8952
8953 switch (Inside->getStmtClass()) {
8954 case Stmt::ArraySubscriptExprClass:
8955 case Stmt::CallExprClass:
8956 case Stmt::CharacterLiteralClass:
8957 case Stmt::CXXBoolLiteralExprClass:
8958 case Stmt::DeclRefExprClass:
8959 case Stmt::FloatingLiteralClass:
8960 case Stmt::IntegerLiteralClass:
8961 case Stmt::MemberExprClass:
8962 case Stmt::ObjCArrayLiteralClass:
8963 case Stmt::ObjCBoolLiteralExprClass:
8964 case Stmt::ObjCBoxedExprClass:
8965 case Stmt::ObjCDictionaryLiteralClass:
8966 case Stmt::ObjCEncodeExprClass:
8967 case Stmt::ObjCIvarRefExprClass:
8968 case Stmt::ObjCMessageExprClass:
8969 case Stmt::ObjCPropertyRefExprClass:
8970 case Stmt::ObjCStringLiteralClass:
8971 case Stmt::ObjCSubscriptRefExprClass:
8972 case Stmt::ParenExprClass:
8973 case Stmt::StringLiteralClass:
8974 case Stmt::UnaryOperatorClass:
8975 return false;
8976 default:
8977 return true;
8978 }
8979}
8980
8981static std::pair<QualType, StringRef>
8982shouldNotPrintDirectly(const ASTContext &Context,
8983 QualType IntendedTy,
8984 const Expr *E) {
8985 // Use a 'while' to peel off layers of typedefs.
8986 QualType TyTy = IntendedTy;
8987 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
8988 StringRef Name = UserTy->getDecl()->getName();
8989 QualType CastTy = llvm::StringSwitch<QualType>(Name)
8990 .Case("CFIndex", Context.getNSIntegerType())
8991 .Case("NSInteger", Context.getNSIntegerType())
8992 .Case("NSUInteger", Context.getNSUIntegerType())
8993 .Case("SInt32", Context.IntTy)
8994 .Case("UInt32", Context.UnsignedIntTy)
8995 .Default(QualType());
8996
8997 if (!CastTy.isNull())
8998 return std::make_pair(CastTy, Name);
8999
9000 TyTy = UserTy->desugar();
9001 }
9002
9003 // Strip parens if necessary.
9004 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
9005 return shouldNotPrintDirectly(Context,
9006 PE->getSubExpr()->getType(),
9007 PE->getSubExpr());
9008
9009 // If this is a conditional expression, then its result type is constructed
9010 // via usual arithmetic conversions and thus there might be no necessary
9011 // typedef sugar there. Recurse to operands to check for NSInteger &
9012 // Co. usage condition.
9013 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
9014 QualType TrueTy, FalseTy;
9015 StringRef TrueName, FalseName;
9016
9017 std::tie(TrueTy, TrueName) =
9018 shouldNotPrintDirectly(Context,
9019 CO->getTrueExpr()->getType(),
9020 CO->getTrueExpr());
9021 std::tie(FalseTy, FalseName) =
9022 shouldNotPrintDirectly(Context,
9023 CO->getFalseExpr()->getType(),
9024 CO->getFalseExpr());
9025
9026 if (TrueTy == FalseTy)
9027 return std::make_pair(TrueTy, TrueName);
9028 else if (TrueTy.isNull())
9029 return std::make_pair(FalseTy, FalseName);
9030 else if (FalseTy.isNull())
9031 return std::make_pair(TrueTy, TrueName);
9032 }
9033
9034 return std::make_pair(QualType(), StringRef());
9035}
9036
9037/// Return true if \p ICE is an implicit argument promotion of an arithmetic
9038/// type. Bit-field 'promotions' from a higher ranked type to a lower ranked
9039/// type do not count.
9040static bool
9041isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
9042 QualType From = ICE->getSubExpr()->getType();
9043 QualType To = ICE->getType();
9044 // It's an integer promotion if the destination type is the promoted
9045 // source type.
9046 if (ICE->getCastKind() == CK_IntegralCast &&
9047 From->isPromotableIntegerType() &&
9048 S.Context.getPromotedIntegerType(From) == To)
9049 return true;
9050 // Look through vector types, since we do default argument promotion for
9051 // those in OpenCL.
9052 if (const auto *VecTy = From->getAs<ExtVectorType>())
9053 From = VecTy->getElementType();
9054 if (const auto *VecTy = To->getAs<ExtVectorType>())
9055 To = VecTy->getElementType();
9056 // It's a floating promotion if the source type is a lower rank.
9057 return ICE->getCastKind() == CK_FloatingCast &&
9058 S.Context.getFloatingTypeOrder(From, To) < 0;
9059}
9060
9061bool
9062CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
9063 const char *StartSpecifier,
9064 unsigned SpecifierLen,
9065 const Expr *E) {
9066 using namespace analyze_format_string;
9067 using namespace analyze_printf;
9068
9069 // Now type check the data expression that matches the
9070 // format specifier.
9071 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext());
9072 if (!AT.isValid())
9073 return true;
9074
9075 QualType ExprTy = E->getType();
9076 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) {
9077 ExprTy = TET->getUnderlyingExpr()->getType();
9078 }
9079
9080 // Diagnose attempts to print a boolean value as a character. Unlike other
9081 // -Wformat diagnostics, this is fine from a type perspective, but it still
9082 // doesn't make sense.
9083 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg &&
9084 E->isKnownToHaveBooleanValue()) {
9085 const CharSourceRange &CSR =
9086 getSpecifierRange(StartSpecifier, SpecifierLen);
9087 SmallString<4> FSString;
9088 llvm::raw_svector_ostream os(FSString);
9089 FS.toString(os);
9090 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character)
9091 << FSString,
9092 E->getExprLoc(), false, CSR);
9093 return true;
9094 }
9095
9096 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy);
9097 if (Match == analyze_printf::ArgType::Match)
9098 return true;
9099
9100 // Look through argument promotions for our error message's reported type.
9101 // This includes the integral and floating promotions, but excludes array
9102 // and function pointer decay (seeing that an argument intended to be a
9103 // string has type 'char [6]' is probably more confusing than 'char *') and
9104 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type).
9105 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
9106 if (isArithmeticArgumentPromotion(S, ICE)) {
9107 E = ICE->getSubExpr();
9108 ExprTy = E->getType();
9109
9110 // Check if we didn't match because of an implicit cast from a 'char'
9111 // or 'short' to an 'int'. This is done because printf is a varargs
9112 // function.
9113 if (ICE->getType() == S.Context.IntTy ||
9114 ICE->getType() == S.Context.UnsignedIntTy) {
9115 // All further checking is done on the subexpression
9116 const analyze_printf::ArgType::MatchKind ImplicitMatch =
9117 AT.matchesType(S.Context, ExprTy);
9118 if (ImplicitMatch == analyze_printf::ArgType::Match)
9119 return true;
9120 if (ImplicitMatch == ArgType::NoMatchPedantic ||
9121 ImplicitMatch == ArgType::NoMatchTypeConfusion)
9122 Match = ImplicitMatch;
9123 }
9124 }
9125 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) {
9126 // Special case for 'a', which has type 'int' in C.
9127 // Note, however, that we do /not/ want to treat multibyte constants like
9128 // 'MooV' as characters! This form is deprecated but still exists. In
9129 // addition, don't treat expressions as of type 'char' if one byte length
9130 // modifier is provided.
9131 if (ExprTy == S.Context.IntTy &&
9132 FS.getLengthModifier().getKind() != LengthModifier::AsChar)
9133 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue()))
9134 ExprTy = S.Context.CharTy;
9135 }
9136
9137 // Look through enums to their underlying type.
9138 bool IsEnum = false;
9139 if (auto EnumTy = ExprTy->getAs<EnumType>()) {
9140 ExprTy = EnumTy->getDecl()->getIntegerType();
9141 IsEnum = true;
9142 }
9143
9144 // %C in an Objective-C context prints a unichar, not a wchar_t.
9145 // If the argument is an integer of some kind, believe the %C and suggest
9146 // a cast instead of changing the conversion specifier.
9147 QualType IntendedTy = ExprTy;
9148 if (isObjCContext() &&
9149 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
9150 if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
9151 !ExprTy->isCharType()) {
9152 // 'unichar' is defined as a typedef of unsigned short, but we should
9153 // prefer using the typedef if it is visible.
9154 IntendedTy = S.Context.UnsignedShortTy;
9155
9156 // While we are here, check if the value is an IntegerLiteral that happens
9157 // to be within the valid range.
9158 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) {
9159 const llvm::APInt &V = IL->getValue();
9160 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy))
9161 return true;
9162 }
9163
9164 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(),
9165 Sema::LookupOrdinaryName);
9166 if (S.LookupName(Result, S.getCurScope())) {
9167 NamedDecl *ND = Result.getFoundDecl();
9168 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
9169 if (TD->getUnderlyingType() == IntendedTy)
9170 IntendedTy = S.Context.getTypedefType(TD);
9171 }
9172 }
9173 }
9174
9175 // Special-case some of Darwin's platform-independence types by suggesting
9176 // casts to primitive types that are known to be large enough.
9177 bool ShouldNotPrintDirectly = false; StringRef CastTyName;
9178 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
9179 QualType CastTy;
9180 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
9181 if (!CastTy.isNull()) {
9182 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
9183 // (long in ASTContext). Only complain to pedants.
9184 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
9185 (AT.isSizeT() || AT.isPtrdiffT()) &&
9186 AT.matchesType(S.Context, CastTy))
9187 Match = ArgType::NoMatchPedantic;
9188 IntendedTy = CastTy;
9189 ShouldNotPrintDirectly = true;
9190 }
9191 }
9192
9193 // We may be able to offer a FixItHint if it is a supported type.
9194 PrintfSpecifier fixedFS = FS;
9195 bool Success =
9196 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext());
9197
9198 if (Success) {
9199 // Get the fix string from the fixed format specifier
9200 SmallString<16> buf;
9201 llvm::raw_svector_ostream os(buf);
9202 fixedFS.toString(os);
9203
9204 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
9205
9206 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
9207 unsigned Diag;
9208 switch (Match) {
9209 case ArgType::Match: llvm_unreachable("expected non-matching")__builtin_unreachable();
9210 case ArgType::NoMatchPedantic:
9211 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
9212 break;
9213 case ArgType::NoMatchTypeConfusion:
9214 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion;
9215 break;
9216 case ArgType::NoMatch:
9217 Diag = diag::warn_format_conversion_argument_type_mismatch;
9218 break;
9219 }
9220
9221 // In this case, the specifier is wrong and should be changed to match
9222 // the argument.
9223 EmitFormatDiagnostic(S.PDiag(Diag)
9224 << AT.getRepresentativeTypeName(S.Context)
9225 << IntendedTy << IsEnum << E->getSourceRange(),
9226 E->getBeginLoc(),
9227 /*IsStringLocation*/ false, SpecRange,
9228 FixItHint::CreateReplacement(SpecRange, os.str()));
9229 } else {
9230 // The canonical type for formatting this value is different from the
9231 // actual type of the expression. (This occurs, for example, with Darwin's
9232 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
9233 // should be printed as 'long' for 64-bit compatibility.)
9234 // Rather than emitting a normal format/argument mismatch, we want to
9235 // add a cast to the recommended type (and correct the format string
9236 // if necessary).
9237 SmallString<16> CastBuf;
9238 llvm::raw_svector_ostream CastFix(CastBuf);
9239 CastFix << "(";
9240 IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
9241 CastFix << ")";
9242
9243 SmallVector<FixItHint,4> Hints;
9244 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly)
9245 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
9246
9247 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
9248 // If there's already a cast present, just replace it.
9249 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
9250 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
9251
9252 } else if (!requiresParensToAddCast(E)) {
9253 // If the expression has high enough precedence,
9254 // just write the C-style cast.
9255 Hints.push_back(
9256 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
9257 } else {
9258 // Otherwise, add parens around the expression as well as the cast.
9259 CastFix << "(";
9260 Hints.push_back(
9261 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
9262
9263 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc());
9264 Hints.push_back(FixItHint::CreateInsertion(After, ")"));
9265 }
9266
9267 if (ShouldNotPrintDirectly) {
9268 // The expression has a type that should not be printed directly.
9269 // We extract the name from the typedef because we don't want to show
9270 // the underlying type in the diagnostic.
9271 StringRef Name;
9272 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy))
9273 Name = TypedefTy->getDecl()->getName();
9274 else
9275 Name = CastTyName;
9276 unsigned Diag = Match == ArgType::NoMatchPedantic
9277 ? diag::warn_format_argument_needs_cast_pedantic
9278 : diag::warn_format_argument_needs_cast;
9279 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum
9280 << E->getSourceRange(),
9281 E->getBeginLoc(), /*IsStringLocation=*/false,
9282 SpecRange, Hints);
9283 } else {
9284 // In this case, the expression could be printed using a different
9285 // specifier, but we've decided that the specifier is probably correct
9286 // and we should cast instead. Just use the normal warning message.
9287 EmitFormatDiagnostic(
9288 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
9289 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
9290 << E->getSourceRange(),
9291 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints);
9292 }
9293 }
9294 } else {
9295 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
9296 SpecifierLen);
9297 // Since the warning for passing non-POD types to variadic functions
9298 // was deferred until now, we emit a warning for non-POD
9299 // arguments here.
9300 switch (S.isValidVarArgType(ExprTy)) {
9301 case Sema::VAK_Valid:
9302 case Sema::VAK_ValidInCXX11: {
9303 unsigned Diag;
9304 switch (Match) {
9305 case ArgType::Match: llvm_unreachable("expected non-matching")__builtin_unreachable();
9306 case ArgType::NoMatchPedantic:
9307 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
9308 break;
9309 case ArgType::NoMatchTypeConfusion:
9310 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion;
9311 break;
9312 case ArgType::NoMatch:
9313 Diag = diag::warn_format_conversion_argument_type_mismatch;
9314 break;
9315 }
9316
9317 EmitFormatDiagnostic(
9318 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
9319 << IsEnum << CSR << E->getSourceRange(),
9320 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
9321 break;
9322 }
9323 case Sema::VAK_Undefined:
9324 case Sema::VAK_MSVCUndefined:
9325 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string)
9326 << S.getLangOpts().CPlusPlus11 << ExprTy
9327 << CallType
9328 << AT.getRepresentativeTypeName(S.Context) << CSR
9329 << E->getSourceRange(),
9330 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
9331 checkForCStrMembers(AT, E);
9332 break;
9333
9334 case Sema::VAK_Invalid:
9335 if (ExprTy->isObjCObjectType())
9336 EmitFormatDiagnostic(
9337 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
9338 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
9339 << AT.getRepresentativeTypeName(S.Context) << CSR
9340 << E->getSourceRange(),
9341 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
9342 else
9343 // FIXME: If this is an initializer list, suggest removing the braces
9344 // or inserting a cast to the target type.
9345 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format)
9346 << isa<InitListExpr>(E) << ExprTy << CallType
9347 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange();
9348 break;
9349 }
9350
9351 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&((void)0)
9352 "format string specifier index out of range")((void)0);
9353 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
9354 }
9355
9356 return true;
9357}
9358
9359//===--- CHECK: Scanf format string checking ------------------------------===//
9360
9361namespace {
9362
9363class CheckScanfHandler : public CheckFormatHandler {
9364public:
9365 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
9366 const Expr *origFormatExpr, Sema::FormatStringType type,
9367 unsigned firstDataArg, unsigned numDataArgs,
9368 const char *beg, bool hasVAListArg,
9369 ArrayRef<const Expr *> Args, unsigned formatIdx,
9370 bool inFunctionCall, Sema::VariadicCallType CallType,
9371 llvm::SmallBitVector &CheckedVarArgs,
9372 UncoveredArgHandler &UncoveredArg)
9373 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
9374 numDataArgs, beg, hasVAListArg, Args, formatIdx,
9375 inFunctionCall, CallType, CheckedVarArgs,
9376 UncoveredArg) {}
9377
9378 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
9379 const char *startSpecifier,
9380 unsigned specifierLen) override;
9381
9382 bool HandleInvalidScanfConversionSpecifier(
9383 const analyze_scanf::ScanfSpecifier &FS,
9384 const char *startSpecifier,
9385 unsigned specifierLen) override;
9386
9387 void HandleIncompleteScanList(const char *start, const char *end) override;
9388};
9389
9390} // namespace
9391
9392void CheckScanfHandler::HandleIncompleteScanList(const char *start,
9393 const char *end) {
9394 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
9395 getLocationOfByte(end), /*IsStringLocation*/true,
9396 getSpecifierRange(start, end - start));
9397}
9398
9399bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
9400 const analyze_scanf::ScanfSpecifier &FS,
9401 const char *startSpecifier,
9402 unsigned specifierLen) {
9403 const analyze_scanf::ScanfConversionSpecifier &CS =
9404 FS.getConversionSpecifier();
9405
9406 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
9407 getLocationOfByte(CS.getStart()),
9408 startSpecifier, specifierLen,
9409 CS.getStart(), CS.getLength());
9410}
9411
9412bool CheckScanfHandler::HandleScanfSpecifier(
9413 const analyze_scanf::ScanfSpecifier &FS,
9414 const char *startSpecifier,
9415 unsigned specifierLen) {
9416 using namespace analyze_scanf;
9417 using namespace analyze_format_string;
9418
9419 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
9420
9421 // Handle case where '%' and '*' don't consume an argument. These shouldn't
9422 // be used to decide if we are using positional arguments consistently.
9423 if (FS.consumesDataArgument()) {
9424 if (atFirstArg) {
9425 atFirstArg = false;
9426 usesPositionalArgs = FS.usesPositionalArg();
9427 }
9428 else if (usesPositionalArgs != FS.usesPositionalArg()) {
9429 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
9430 startSpecifier, specifierLen);
9431 return false;
9432 }
9433 }
9434
9435 // Check if the field with is non-zero.
9436 const OptionalAmount &Amt = FS.getFieldWidth();
9437 if (Amt.getHowSpecified() == OptionalAmount::Constant) {
9438 if (Amt.getConstantAmount() == 0) {
9439 const CharSourceRange &R = getSpecifierRange(Amt.getStart(),
9440 Amt.getConstantLength());
9441 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
9442 getLocationOfByte(Amt.getStart()),
9443 /*IsStringLocation*/true, R,
9444 FixItHint::CreateRemoval(R));
9445 }
9446 }
9447
9448 if (!FS.consumesDataArgument()) {
9449 // FIXME: Technically specifying a precision or field width here
9450 // makes no sense. Worth issuing a warning at some point.
9451 return true;
9452 }
9453
9454 // Consume the argument.
9455 unsigned argIndex = FS.getArgIndex();
9456 if (argIndex < NumDataArgs) {
9457 // The check to see if the argIndex is valid will come later.
9458 // We set the bit here because we may exit early from this
9459 // function if we encounter some other error.
9460 CoveredArgs.set(argIndex);
9461 }
9462
9463 // Check the length modifier is valid with the given conversion specifier.
9464 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
9465 S.getLangOpts()))
9466 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
9467 diag::warn_format_nonsensical_length);
9468 else if (!FS.hasStandardLengthModifier())
9469 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
9470 else if (!FS.hasStandardLengthConversionCombination())
9471 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
9472 diag::warn_format_non_standard_conversion_spec);
9473
9474 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
9475 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
9476
9477 // The remaining checks depend on the data arguments.
9478 if (HasVAListArg)
9479 return true;
9480
9481 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
9482 return false;
9483
9484 // Check that the argument type matches the format specifier.
9485 const Expr *Ex = getDataArg(argIndex);
9486 if (!Ex)
9487 return true;
9488
9489 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context);
9490
9491 if (!AT.isValid()) {
9492 return true;
9493 }
9494
9495 analyze_format_string::ArgType::MatchKind Match =
9496 AT.matchesType(S.Context, Ex->getType());
9497 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
9498 if (Match == analyze_format_string::ArgType::Match)
9499 return true;
9500
9501 ScanfSpecifier fixedFS = FS;
9502 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
9503 S.getLangOpts(), S.Context);
9504
9505 unsigned Diag =
9506 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
9507 : diag::warn_format_conversion_argument_type_mismatch;
9508
9509 if (Success) {
9510 // Get the fix string from the fixed format specifier.
9511 SmallString<128> buf;
9512 llvm::raw_svector_ostream os(buf);
9513 fixedFS.toString(os);
9514
9515 EmitFormatDiagnostic(
9516 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context)
9517 << Ex->getType() << false << Ex->getSourceRange(),
9518 Ex->getBeginLoc(),
9519 /*IsStringLocation*/ false,
9520 getSpecifierRange(startSpecifier, specifierLen),
9521 FixItHint::CreateReplacement(
9522 getSpecifierRange(startSpecifier, specifierLen), os.str()));
9523 } else {
9524 EmitFormatDiagnostic(S.PDiag(Diag)
9525 << AT.getRepresentativeTypeName(S.Context)
9526 << Ex->getType() << false << Ex->getSourceRange(),
9527 Ex->getBeginLoc(),
9528 /*IsStringLocation*/ false,
9529 getSpecifierRange(startSpecifier, specifierLen));
9530 }
9531
9532 return true;
9533}
9534
9535static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
9536 const Expr *OrigFormatExpr,
9537 ArrayRef<const Expr *> Args,
9538 bool HasVAListArg, unsigned format_idx,
9539 unsigned firstDataArg,
9540 Sema::FormatStringType Type,
9541 bool inFunctionCall,
9542 Sema::VariadicCallType CallType,
9543 llvm::SmallBitVector &CheckedVarArgs,
9544 UncoveredArgHandler &UncoveredArg,
9545 bool IgnoreStringsWithoutSpecifiers) {
9546 // CHECK: is the format string a wide literal?
9547 if (!FExpr->isAscii() && !FExpr->isUTF8()) {
9548 CheckFormatHandler::EmitFormatDiagnostic(
9549 S, inFunctionCall, Args[format_idx],
9550 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(),
9551 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
9552 return;
9553 }
9554
9555 // Str - The format string. NOTE: this is NOT null-terminated!
9556 StringRef StrRef = FExpr->getString();
9557 const char *Str = StrRef.data();
9558 // Account for cases where the string literal is truncated in a declaration.
9559 const ConstantArrayType *T =
9560 S.Context.getAsConstantArrayType(FExpr->getType());
9561 assert(T && "String literal not of constant array type!")((void)0);
9562 size_t TypeSize = T->getSize().getZExtValue();
9563 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
9564 const unsigned numDataArgs = Args.size() - firstDataArg;
9565
9566 if (IgnoreStringsWithoutSpecifiers &&
9567 !analyze_format_string::parseFormatStringHasFormattingSpecifiers(
9568 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo()))
9569 return;
9570
9571 // Emit a warning if the string literal is truncated and does not contain an
9572 // embedded null character.
9573 if (TypeSize <= StrRef.size() &&
9574 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) {
9575 CheckFormatHandler::EmitFormatDiagnostic(
9576 S, inFunctionCall, Args[format_idx],
9577 S.PDiag(diag::warn_printf_format_string_not_null_terminated),
9578 FExpr->getBeginLoc(),
9579 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
9580 return;
9581 }
9582
9583 // CHECK: empty format string?
9584 if (StrLen == 0 && numDataArgs > 0) {
9585 CheckFormatHandler::EmitFormatDiagnostic(
9586 S, inFunctionCall, Args[format_idx],
9587 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(),
9588 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
9589 return;
9590 }
9591
9592 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
9593 Type == Sema::FST_Kprintf || Type == Sema::FST_FreeBSDKPrintf ||
9594 Type == Sema::FST_OSLog || Type == Sema::FST_OSTrace ||
9595 Type == Sema::FST_Syslog) {
9596 CheckPrintfHandler H(
9597 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
9598 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str,
9599 HasVAListArg, Args, format_idx, inFunctionCall, CallType,
9600 CheckedVarArgs, UncoveredArg);
9601
9602 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
9603 S.getLangOpts(),
9604 S.Context.getTargetInfo(),
9605 Type == Sema::FST_Kprintf || Type == Sema::FST_FreeBSDKPrintf))
9606 H.DoneProcessing();
9607 } else if (Type == Sema::FST_Scanf) {
9608 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
9609 numDataArgs, Str, HasVAListArg, Args, format_idx,
9610 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg);
9611
9612 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
9613 S.getLangOpts(),
9614 S.Context.getTargetInfo()))
9615 H.DoneProcessing();
9616 } // TODO: handle other formats
9617}
9618
9619bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
9620 // Str - The format string. NOTE: this is NOT null-terminated!
9621 StringRef StrRef = FExpr->getString();
9622 const char *Str = StrRef.data();
9623 // Account for cases where the string literal is truncated in a declaration.
9624 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
9625 assert(T && "String literal not of constant array type!")((void)0);
9626 size_t TypeSize = T->getSize().getZExtValue();
9627 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
9628 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
9629 getLangOpts(),
9630 Context.getTargetInfo());
9631}
9632
9633//===--- CHECK: Warn on use of wrong absolute value function. -------------===//
9634
9635// Returns the related absolute value function that is larger, of 0 if one
9636// does not exist.
9637static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
9638 switch (AbsFunction) {
9639 default:
9640 return 0;
9641
9642 case Builtin::BI__builtin_abs:
9643 return Builtin::BI__builtin_labs;
9644 case Builtin::BI__builtin_labs:
9645 return Builtin::BI__builtin_llabs;
9646 case Builtin::BI__builtin_llabs:
9647 return 0;
9648
9649 case Builtin::BI__builtin_fabsf:
9650 return Builtin::BI__builtin_fabs;
9651 case Builtin::BI__builtin_fabs:
9652 return Builtin::BI__builtin_fabsl;
9653 case Builtin::BI__builtin_fabsl:
9654 return 0;
9655
9656 case Builtin::BI__builtin_cabsf:
9657 return Builtin::BI__builtin_cabs;
9658 case Builtin::BI__builtin_cabs:
9659 return Builtin::BI__builtin_cabsl;
9660 case Builtin::BI__builtin_cabsl:
9661 return 0;
9662
9663 case Builtin::BIabs:
9664 return Builtin::BIlabs;
9665 case Builtin::BIlabs:
9666 return Builtin::BIllabs;
9667 case Builtin::BIllabs:
9668 return 0;
9669
9670 case Builtin::BIfabsf:
9671 return Builtin::BIfabs;
9672 case Builtin::BIfabs:
9673 return Builtin::BIfabsl;
9674 case Builtin::BIfabsl:
9675 return 0;
9676
9677 case Builtin::BIcabsf:
9678 return Builtin::BIcabs;
9679 case Builtin::BIcabs:
9680 return Builtin::BIcabsl;
9681 case Builtin::BIcabsl:
9682 return 0;
9683 }
9684}
9685
9686// Returns the argument type of the absolute value function.
9687static QualType getAbsoluteValueArgumentType(ASTContext &Context,
9688 unsigned AbsType) {
9689 if (AbsType == 0)
9690 return QualType();
9691
9692 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
9693 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error);
9694 if (Error != ASTContext::GE_None)
9695 return QualType();
9696
9697 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
9698 if (!FT)
9699 return QualType();
9700
9701 if (FT->getNumParams() != 1)
9702 return QualType();
9703
9704 return FT->getParamType(0);
9705}
9706
9707// Returns the best absolute value function, or zero, based on type and
9708// current absolute value function.
9709static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
9710 unsigned AbsFunctionKind) {
9711 unsigned BestKind = 0;
9712 uint64_t ArgSize = Context.getTypeSize(ArgType);
9713 for (unsigned Kind = AbsFunctionKind; Kind != 0;
9714 Kind = getLargerAbsoluteValueFunction(Kind)) {
9715 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind);
9716 if (Context.getTypeSize(ParamType) >= ArgSize) {
9717 if (BestKind == 0)
9718 BestKind = Kind;
9719 else if (Context.hasSameType(ParamType, ArgType)) {
9720 BestKind = Kind;
9721 break;
9722 }
9723 }
9724 }
9725 return BestKind;
9726}
9727
9728enum AbsoluteValueKind {
9729 AVK_Integer,
9730 AVK_Floating,
9731 AVK_Complex
9732};
9733
9734static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
9735 if (T->isIntegralOrEnumerationType())
9736 return AVK_Integer;
9737 if (T->isRealFloatingType())
9738 return AVK_Floating;
9739 if (T->isAnyComplexType())
9740 return AVK_Complex;
9741
9742 llvm_unreachable("Type not integer, floating, or complex")__builtin_unreachable();
9743}
9744
9745// Changes the absolute value function to a different type. Preserves whether
9746// the function is a builtin.
9747static unsigned changeAbsFunction(unsigned AbsKind,
9748 AbsoluteValueKind ValueKind) {
9749 switch (ValueKind) {
9750 case AVK_Integer:
9751 switch (AbsKind) {
9752 default:
9753 return 0;
9754 case Builtin::BI__builtin_fabsf:
9755 case Builtin::BI__builtin_fabs:
9756 case Builtin::BI__builtin_fabsl:
9757 case Builtin::BI__builtin_cabsf:
9758 case Builtin::BI__builtin_cabs:
9759 case Builtin::BI__builtin_cabsl:
9760 return Builtin::BI__builtin_abs;
9761 case Builtin::BIfabsf:
9762 case Builtin::BIfabs:
9763 case Builtin::BIfabsl:
9764 case Builtin::BIcabsf:
9765 case Builtin::BIcabs:
9766 case Builtin::BIcabsl:
9767 return Builtin::BIabs;
9768 }
9769 case AVK_Floating:
9770 switch (AbsKind) {
9771 default:
9772 return 0;
9773 case Builtin::BI__builtin_abs:
9774 case Builtin::BI__builtin_labs:
9775 case Builtin::BI__builtin_llabs:
9776 case Builtin::BI__builtin_cabsf:
9777 case Builtin::BI__builtin_cabs:
9778 case Builtin::BI__builtin_cabsl:
9779 return Builtin::BI__builtin_fabsf;
9780 case Builtin::BIabs:
9781 case Builtin::BIlabs:
9782 case Builtin::BIllabs:
9783 case Builtin::BIcabsf:
9784 case Builtin::BIcabs:
9785 case Builtin::BIcabsl:
9786 return Builtin::BIfabsf;
9787 }
9788 case AVK_Complex:
9789 switch (AbsKind) {
9790 default:
9791 return 0;
9792 case Builtin::BI__builtin_abs:
9793 case Builtin::BI__builtin_labs:
9794 case Builtin::BI__builtin_llabs:
9795 case Builtin::BI__builtin_fabsf:
9796 case Builtin::BI__builtin_fabs:
9797 case Builtin::BI__builtin_fabsl:
9798 return Builtin::BI__builtin_cabsf;
9799 case Builtin::BIabs:
9800 case Builtin::BIlabs:
9801 case Builtin::BIllabs:
9802 case Builtin::BIfabsf:
9803 case Builtin::BIfabs:
9804 case Builtin::BIfabsl:
9805 return Builtin::BIcabsf;
9806 }
9807 }
9808 llvm_unreachable("Unable to convert function")__builtin_unreachable();
9809}
9810
9811static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
9812 const IdentifierInfo *FnInfo = FDecl->getIdentifier();
9813 if (!FnInfo)
9814 return 0;
9815
9816 switch (FDecl->getBuiltinID()) {
9817 default:
9818 return 0;
9819 case Builtin::BI__builtin_abs:
9820 case Builtin::BI__builtin_fabs:
9821 case Builtin::BI__builtin_fabsf:
9822 case Builtin::BI__builtin_fabsl:
9823 case Builtin::BI__builtin_labs:
9824 case Builtin::BI__builtin_llabs:
9825 case Builtin::BI__builtin_cabs:
9826 case Builtin::BI__builtin_cabsf:
9827 case Builtin::BI__builtin_cabsl:
9828 case Builtin::BIabs:
9829 case Builtin::BIlabs:
9830 case Builtin::BIllabs:
9831 case Builtin::BIfabs:
9832 case Builtin::BIfabsf:
9833 case Builtin::BIfabsl:
9834 case Builtin::BIcabs:
9835 case Builtin::BIcabsf:
9836 case Builtin::BIcabsl:
9837 return FDecl->getBuiltinID();
9838 }
9839 llvm_unreachable("Unknown Builtin type")__builtin_unreachable();
9840}
9841
9842// If the replacement is valid, emit a note with replacement function.
9843// Additionally, suggest including the proper header if not already included.
9844static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
9845 unsigned AbsKind, QualType ArgType) {
9846 bool EmitHeaderHint = true;
9847 const char *HeaderName = nullptr;
9848 const char *FunctionName = nullptr;
9849 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
9850 FunctionName = "std::abs";
9851 if (ArgType->isIntegralOrEnumerationType()) {
9852 HeaderName = "cstdlib";
9853 } else if (ArgType->isRealFloatingType()) {
9854 HeaderName = "cmath";
9855 } else {
9856 llvm_unreachable("Invalid Type")__builtin_unreachable();
9857 }
9858
9859 // Lookup all std::abs
9860 if (NamespaceDecl *Std = S.getStdNamespace()) {
9861 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName);
9862 R.suppressDiagnostics();
9863 S.LookupQualifiedName(R, Std);
9864
9865 for (const auto *I : R) {
9866 const FunctionDecl *FDecl = nullptr;
9867 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) {
9868 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl());
9869 } else {
9870 FDecl = dyn_cast<FunctionDecl>(I);
9871 }
9872 if (!FDecl)
9873 continue;
9874
9875 // Found std::abs(), check that they are the right ones.
9876 if (FDecl->getNumParams() != 1)
9877 continue;
9878
9879 // Check that the parameter type can handle the argument.
9880 QualType ParamType = FDecl->getParamDecl(0)->getType();
9881 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) &&
9882 S.Context.getTypeSize(ArgType) <=
9883 S.Context.getTypeSize(ParamType)) {
9884 // Found a function, don't need the header hint.
9885 EmitHeaderHint = false;
9886 break;
9887 }
9888 }
9889 }
9890 } else {
9891 FunctionName = S.Context.BuiltinInfo.getName(AbsKind);
9892 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind);
9893
9894 if (HeaderName) {
9895 DeclarationName DN(&S.Context.Idents.get(FunctionName));
9896 LookupResult R(S, DN, Loc, Sema::LookupAnyName);
9897 R.suppressDiagnostics();
9898 S.LookupName(R, S.getCurScope());
9899
9900 if (R.isSingleResult()) {
9901 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl());
9902 if (FD && FD->getBuiltinID() == AbsKind) {
9903 EmitHeaderHint = false;
9904 } else {
9905 return;
9906 }
9907 } else if (!R.empty()) {
9908 return;
9909 }
9910 }
9911 }
9912
9913 S.Diag(Loc, diag::note_replace_abs_function)
9914 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName);
9915
9916 if (!HeaderName)
9917 return;
9918
9919 if (!EmitHeaderHint)
9920 return;
9921
9922 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName
9923 << FunctionName;
9924}
9925
9926template <std::size_t StrLen>
9927static bool IsStdFunction(const FunctionDecl *FDecl,
9928 const char (&Str)[StrLen]) {
9929 if (!FDecl)
9930 return false;
9931 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str))
9932 return false;
9933 if (!FDecl->isInStdNamespace())
9934 return false;
9935
9936 return true;
9937}
9938
9939// Warn when using the wrong abs() function.
9940void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
9941 const FunctionDecl *FDecl) {
9942 if (Call->getNumArgs() != 1)
9943 return;
9944
9945 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
9946 bool IsStdAbs = IsStdFunction(FDecl, "abs");
9947 if (AbsKind == 0 && !IsStdAbs)
9948 return;
9949
9950 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType();
9951 QualType ParamType = Call->getArg(0)->getType();
9952
9953 // Unsigned types cannot be negative. Suggest removing the absolute value
9954 // function call.
9955 if (ArgType->isUnsignedIntegerType()) {
9956 const char *FunctionName =
9957 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind);
9958 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
9959 Diag(Call->getExprLoc(), diag::note_remove_abs)
9960 << FunctionName
9961 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
9962 return;
9963 }
9964
9965 // Taking the absolute value of a pointer is very suspicious, they probably
9966 // wanted to index into an array, dereference a pointer, call a function, etc.
9967 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
9968 unsigned DiagType = 0;
9969 if (ArgType->isFunctionType())
9970 DiagType = 1;
9971 else if (ArgType->isArrayType())
9972 DiagType = 2;
9973
9974 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
9975 return;
9976 }
9977
9978 // std::abs has overloads which prevent most of the absolute value problems
9979 // from occurring.
9980 if (IsStdAbs)
9981 return;
9982
9983 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType);
9984 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType);
9985
9986 // The argument and parameter are the same kind. Check if they are the right
9987 // size.
9988 if (ArgValueKind == ParamValueKind) {
9989 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType))
9990 return;
9991
9992 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind);
9993 Diag(Call->getExprLoc(), diag::warn_abs_too_small)
9994 << FDecl << ArgType << ParamType;
9995
9996 if (NewAbsKind == 0)
9997 return;
9998
9999 emitReplacement(*this, Call->getExprLoc(),
10000 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
10001 return;
10002 }
10003
10004 // ArgValueKind != ParamValueKind
10005 // The wrong type of absolute value function was used. Attempt to find the
10006 // proper one.
10007 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind);
10008 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind);
10009 if (NewAbsKind == 0)
10010 return;
10011
10012 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
10013 << FDecl << ParamValueKind << ArgValueKind;
10014
10015 emitReplacement(*this, Call->getExprLoc(),
10016 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
10017}
10018
10019//===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
10020void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
10021 const FunctionDecl *FDecl) {
10022 if (!Call || !FDecl) return;
10023
10024 // Ignore template specializations and macros.
10025 if (inTemplateInstantiation()) return;
10026 if (Call->getExprLoc().isMacroID()) return;
10027
10028 // Only care about the one template argument, two function parameter std::max
10029 if (Call->getNumArgs() != 2) return;
10030 if (!IsStdFunction(FDecl, "max")) return;
10031 const auto * ArgList = FDecl->getTemplateSpecializationArgs();
10032 if (!ArgList) return;
10033 if (ArgList->size() != 1) return;
10034
10035 // Check that template type argument is unsigned integer.
10036 const auto& TA = ArgList->get(0);
10037 if (TA.getKind() != TemplateArgument::Type) return;
10038 QualType ArgType = TA.getAsType();
10039 if (!ArgType->isUnsignedIntegerType()) return;
10040
10041 // See if either argument is a literal zero.
10042 auto IsLiteralZeroArg = [](const Expr* E) -> bool {
10043 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E);
10044 if (!MTE) return false;
10045 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr());
10046 if (!Num) return false;
10047 if (Num->getValue() != 0) return false;
10048 return true;
10049 };
10050
10051 const Expr *FirstArg = Call->getArg(0);
10052 const Expr *SecondArg = Call->getArg(1);
10053 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg);
10054 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg);
10055
10056 // Only warn when exactly one argument is zero.
10057 if (IsFirstArgZero == IsSecondArgZero) return;
10058
10059 SourceRange FirstRange = FirstArg->getSourceRange();
10060 SourceRange SecondRange = SecondArg->getSourceRange();
10061
10062 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange;
10063
10064 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero)
10065 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange;
10066
10067 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
10068 SourceRange RemovalRange;
10069 if (IsFirstArgZero) {
10070 RemovalRange = SourceRange(FirstRange.getBegin(),
10071 SecondRange.getBegin().getLocWithOffset(-1));
10072 } else {
10073 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()),
10074 SecondRange.getEnd());
10075 }
10076
10077 Diag(Call->getExprLoc(), diag::note_remove_max_call)
10078 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange())
10079 << FixItHint::CreateRemoval(RemovalRange);
10080}
10081
10082//===--- CHECK: Standard memory functions ---------------------------------===//
10083
10084/// Takes the expression passed to the size_t parameter of functions
10085/// such as memcmp, strncat, etc and warns if it's a comparison.
10086///
10087/// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
10088static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
10089 IdentifierInfo *FnName,
10090 SourceLocation FnLoc,
10091 SourceLocation RParenLoc) {
10092 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E);
10093 if (!Size)
10094 return false;
10095
10096 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||:
10097 if (!Size->isComparisonOp() && !Size->isLogicalOp())
10098 return false;
10099
10100 SourceRange SizeRange = Size->getSourceRange();
10101 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
10102 << SizeRange << FnName;
10103 S.Diag(FnLoc, diag::note_memsize_comparison_paren)
10104 << FnName
10105 << FixItHint::CreateInsertion(
10106 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")")
10107 << FixItHint::CreateRemoval(RParenLoc);
10108 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
10109 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
10110 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()),
10111 ")");
10112
10113 return true;
10114}
10115
10116/// Determine whether the given type is or contains a dynamic class type
10117/// (e.g., whether it has a vtable).
10118static const CXXRecordDecl *getContainedDynamicClass(QualType T,
10119 bool &IsContained) {
10120 // Look through array types while ignoring qualifiers.
10121 const Type *Ty = T->getBaseElementTypeUnsafe();
10122 IsContained = false;
10123
10124 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
10125 RD = RD ? RD->getDefinition() : nullptr;
10126 if (!RD || RD->isInvalidDecl())
10127 return nullptr;
10128
10129 if (RD->isDynamicClass())
10130 return RD;
10131
10132 // Check all the fields. If any bases were dynamic, the class is dynamic.
10133 // It's impossible for a class to transitively contain itself by value, so
10134 // infinite recursion is impossible.
10135 for (auto *FD : RD->fields()) {
10136 bool SubContained;
10137 if (const CXXRecordDecl *ContainedRD =
10138 getContainedDynamicClass(FD->getType(), SubContained)) {
10139 IsContained = true;
10140 return ContainedRD;
10141 }
10142 }
10143
10144 return nullptr;
10145}
10146
10147static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) {
10148 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E))
10149 if (Unary->getKind() == UETT_SizeOf)
10150 return Unary;
10151 return nullptr;
10152}
10153
10154/// If E is a sizeof expression, returns its argument expression,
10155/// otherwise returns NULL.
10156static const Expr *getSizeOfExprArg(const Expr *E) {
10157 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
10158 if (!SizeOf->isArgumentType())
10159 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
10160 return nullptr;
10161}
10162
10163/// If E is a sizeof expression, returns its argument type.
10164static QualType getSizeOfArgType(const Expr *E) {
10165 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
10166 return SizeOf->getTypeOfArgument();
10167 return QualType();
10168}
10169
10170namespace {
10171
10172struct SearchNonTrivialToInitializeField
10173 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> {
10174 using Super =
10175 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>;
10176
10177 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {}
10178
10179 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
10180 SourceLocation SL) {
10181 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
10182 asDerived().visitArray(PDIK, AT, SL);
10183 return;
10184 }
10185
10186 Super::visitWithKind(PDIK, FT, SL);
10187 }
10188
10189 void visitARCStrong(QualType FT, SourceLocation SL) {
10190 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
10191 }
10192 void visitARCWeak(QualType FT, SourceLocation SL) {
10193 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
10194 }
10195 void visitStruct(QualType FT, SourceLocation SL) {
10196 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
10197 visit(FD->getType(), FD->getLocation());
10198 }
10199 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
10200 const ArrayType *AT, SourceLocation SL) {
10201 visit(getContext().getBaseElementType(AT), SL);
10202 }
10203 void visitTrivial(QualType FT, SourceLocation SL) {}
10204
10205 static void diag(QualType RT, const Expr *E, Sema &S) {
10206 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation());
10207 }
10208
10209 ASTContext &getContext() { return S.getASTContext(); }
10210
10211 const Expr *E;
10212 Sema &S;
10213};
10214
10215struct SearchNonTrivialToCopyField
10216 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> {
10217 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>;
10218
10219 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {}
10220
10221 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
10222 SourceLocation SL) {
10223 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
10224 asDerived().visitArray(PCK, AT, SL);
10225 return;
10226 }
10227
10228 Super::visitWithKind(PCK, FT, SL);
10229 }
10230
10231 void visitARCStrong(QualType FT, SourceLocation SL) {
10232 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
10233 }
10234 void visitARCWeak(QualType FT, SourceLocation SL) {
10235 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
10236 }
10237 void visitStruct(QualType FT, SourceLocation SL) {
10238 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
10239 visit(FD->getType(), FD->getLocation());
10240 }
10241 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
10242 SourceLocation SL) {
10243 visit(getContext().getBaseElementType(AT), SL);
10244 }
10245 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
10246 SourceLocation SL) {}
10247 void visitTrivial(QualType FT, SourceLocation SL) {}
10248 void visitVolatileTrivial(QualType FT, SourceLocation SL) {}
10249
10250 static void diag(QualType RT, const Expr *E, Sema &S) {
10251 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation());
10252 }
10253
10254 ASTContext &getContext() { return S.getASTContext(); }
10255
10256 const Expr *E;
10257 Sema &S;
10258};
10259
10260}
10261
10262/// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
10263static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) {
10264 SizeofExpr = SizeofExpr->IgnoreParenImpCasts();
10265
10266 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) {
10267 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add)
10268 return false;
10269
10270 return doesExprLikelyComputeSize(BO->getLHS()) ||
10271 doesExprLikelyComputeSize(BO->getRHS());
10272 }
10273
10274 return getAsSizeOfExpr(SizeofExpr) != nullptr;
10275}
10276
10277/// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
10278///
10279/// \code
10280/// #define MACRO 0
10281/// foo(MACRO);
10282/// foo(0);
10283/// \endcode
10284///
10285/// This should return true for the first call to foo, but not for the second
10286/// (regardless of whether foo is a macro or function).
10287static bool isArgumentExpandedFromMacro(SourceManager &SM,
10288 SourceLocation CallLoc,
10289 SourceLocation ArgLoc) {
10290 if (!CallLoc.isMacroID())
10291 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc);
10292
10293 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) !=
10294 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc));
10295}
10296
10297/// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
10298/// last two arguments transposed.
10299static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
10300 if (BId != Builtin::BImemset && BId != Builtin::BIbzero)
10301 return;
10302
10303 const Expr *SizeArg =
10304 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
10305
10306 auto isLiteralZero = [](const Expr *E) {
10307 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0;
10308 };
10309
10310 // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
10311 SourceLocation CallLoc = Call->getRParenLoc();
10312 SourceManager &SM = S.getSourceManager();
10313 if (isLiteralZero(SizeArg) &&
10314 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) {
10315
10316 SourceLocation DiagLoc = SizeArg->getExprLoc();
10317
10318 // Some platforms #define bzero to __builtin_memset. See if this is the
10319 // case, and if so, emit a better diagnostic.
10320 if (BId == Builtin::BIbzero ||
10321 (CallLoc.isMacroID() && Lexer::getImmediateMacroName(
10322 CallLoc, SM, S.getLangOpts()) == "bzero")) {
10323 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size);
10324 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence);
10325 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) {
10326 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0;
10327 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0;
10328 }
10329 return;
10330 }
10331
10332 // If the second argument to a memset is a sizeof expression and the third
10333 // isn't, this is also likely an error. This should catch
10334 // 'memset(buf, sizeof(buf), 0xff)'.
10335 if (BId == Builtin::BImemset &&
10336 doesExprLikelyComputeSize(Call->getArg(1)) &&
10337 !doesExprLikelyComputeSize(Call->getArg(2))) {
10338 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc();
10339 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1;
10340 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1;
10341 return;
10342 }
10343}
10344
10345/// Check for dangerous or invalid arguments to memset().
10346///
10347/// This issues warnings on known problematic, dangerous or unspecified
10348/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
10349/// function calls.
10350///
10351/// \param Call The call expression to diagnose.
10352void Sema::CheckMemaccessArguments(const CallExpr *Call,
10353 unsigned BId,
10354 IdentifierInfo *FnName) {
10355 assert(BId != 0)((void)0);
10356
10357 // It is possible to have a non-standard definition of memset. Validate
10358 // we have enough arguments, and if not, abort further checking.
10359 unsigned ExpectedNumArgs =
10360 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3);
10361 if (Call->getNumArgs() < ExpectedNumArgs)
10362 return;
10363
10364 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero ||
10365 BId == Builtin::BIstrndup ? 1 : 2);
10366 unsigned LenArg =
10367 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2);
10368 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
10369
10370 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
10371 Call->getBeginLoc(), Call->getRParenLoc()))
10372 return;
10373
10374 // Catch cases like 'memset(buf, sizeof(buf), 0)'.
10375 CheckMemaccessSize(*this, BId, Call);
10376
10377 // We have special checking when the length is a sizeof expression.
10378 QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
10379 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
10380 llvm::FoldingSetNodeID SizeOfArgID;
10381
10382 // Although widely used, 'bzero' is not a standard function. Be more strict
10383 // with the argument types before allowing diagnostics and only allow the
10384 // form bzero(ptr, sizeof(...)).
10385 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType();
10386 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>())
10387 return;
10388
10389 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
10390 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
10391 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
10392
10393 QualType DestTy = Dest->getType();
10394 QualType PointeeTy;
10395 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
10396 PointeeTy = DestPtrTy->getPointeeType();
10397
10398 // Never warn about void type pointers. This can be used to suppress
10399 // false positives.
10400 if (PointeeTy->isVoidType())
10401 continue;
10402
10403 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
10404 // actually comparing the expressions for equality. Because computing the
10405 // expression IDs can be expensive, we only do this if the diagnostic is
10406 // enabled.
10407 if (SizeOfArg &&
10408 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess,
10409 SizeOfArg->getExprLoc())) {
10410 // We only compute IDs for expressions if the warning is enabled, and
10411 // cache the sizeof arg's ID.
10412 if (SizeOfArgID == llvm::FoldingSetNodeID())
10413 SizeOfArg->Profile(SizeOfArgID, Context, true);
10414 llvm::FoldingSetNodeID DestID;
10415 Dest->Profile(DestID, Context, true);
10416 if (DestID == SizeOfArgID) {
10417 // TODO: For strncpy() and friends, this could suggest sizeof(dst)
10418 // over sizeof(src) as well.
10419 unsigned ActionIdx = 0; // Default is to suggest dereferencing.
10420 StringRef ReadableName = FnName->getName();
10421
10422 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
10423 if (UnaryOp->getOpcode() == UO_AddrOf)
10424 ActionIdx = 1; // If its an address-of operator, just remove it.
10425 if (!PointeeTy->isIncompleteType() &&
10426 (Context.getTypeSize(PointeeTy) == Context.getCharWidth()))
10427 ActionIdx = 2; // If the pointee's size is sizeof(char),
10428 // suggest an explicit length.
10429
10430 // If the function is defined as a builtin macro, do not show macro
10431 // expansion.
10432 SourceLocation SL = SizeOfArg->getExprLoc();
10433 SourceRange DSR = Dest->getSourceRange();
10434 SourceRange SSR = SizeOfArg->getSourceRange();
10435 SourceManager &SM = getSourceManager();
10436
10437 if (SM.isMacroArgExpansion(SL)) {
10438 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts);
10439 SL = SM.getSpellingLoc(SL);
10440 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()),
10441 SM.getSpellingLoc(DSR.getEnd()));
10442 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()),
10443 SM.getSpellingLoc(SSR.getEnd()));
10444 }
10445
10446 DiagRuntimeBehavior(SL, SizeOfArg,
10447 PDiag(diag::warn_sizeof_pointer_expr_memaccess)
10448 << ReadableName
10449 << PointeeTy
10450 << DestTy
10451 << DSR
10452 << SSR);
10453 DiagRuntimeBehavior(SL, SizeOfArg,
10454 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
10455 << ActionIdx
10456 << SSR);
10457
10458 break;
10459 }
10460 }
10461
10462 // Also check for cases where the sizeof argument is the exact same
10463 // type as the memory argument, and where it points to a user-defined
10464 // record type.
10465 if (SizeOfArgTy != QualType()) {
10466 if (PointeeTy->isRecordType() &&
10467 Context.typesAreCompatible(SizeOfArgTy, DestTy)) {
10468 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
10469 PDiag(diag::warn_sizeof_pointer_type_memaccess)
10470 << FnName << SizeOfArgTy << ArgIdx
10471 << PointeeTy << Dest->getSourceRange()
10472 << LenExpr->getSourceRange());
10473 break;
10474 }
10475 }
10476 } else if (DestTy->isArrayType()) {
10477 PointeeTy = DestTy;
10478 }
10479
10480 if (PointeeTy == QualType())
10481 continue;
10482
10483 // Always complain about dynamic classes.
10484 bool IsContained;
10485 if (const CXXRecordDecl *ContainedRD =
10486 getContainedDynamicClass(PointeeTy, IsContained)) {
10487
10488 unsigned OperationType = 0;
10489 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp;
10490 // "overwritten" if we're warning about the destination for any call
10491 // but memcmp; otherwise a verb appropriate to the call.
10492 if (ArgIdx != 0 || IsCmp) {
10493 if (BId == Builtin::BImemcpy)
10494 OperationType = 1;
10495 else if(BId == Builtin::BImemmove)
10496 OperationType = 2;
10497 else if (IsCmp)
10498 OperationType = 3;
10499 }
10500
10501 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
10502 PDiag(diag::warn_dyn_class_memaccess)
10503 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName
10504 << IsContained << ContainedRD << OperationType
10505 << Call->getCallee()->getSourceRange());
10506 } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
10507 BId != Builtin::BImemset)
10508 DiagRuntimeBehavior(
10509 Dest->getExprLoc(), Dest,
10510 PDiag(diag::warn_arc_object_memaccess)
10511 << ArgIdx << FnName << PointeeTy
10512 << Call->getCallee()->getSourceRange());
10513 else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
10514 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
10515 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
10516 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
10517 PDiag(diag::warn_cstruct_memaccess)
10518 << ArgIdx << FnName << PointeeTy << 0);
10519 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this);
10520 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
10521 RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
10522 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
10523 PDiag(diag::warn_cstruct_memaccess)
10524 << ArgIdx << FnName << PointeeTy << 1);
10525 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this);
10526 } else {
10527 continue;
10528 }
10529 } else
10530 continue;
10531
10532 DiagRuntimeBehavior(
10533 Dest->getExprLoc(), Dest,
10534 PDiag(diag::note_bad_memaccess_silence)
10535 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
10536 break;
10537 }
10538}
10539
10540// A little helper routine: ignore addition and subtraction of integer literals.
10541// This intentionally does not ignore all integer constant expressions because
10542// we don't want to remove sizeof().
10543static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
10544 Ex = Ex->IgnoreParenCasts();
10545
10546 while (true) {
10547 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex);
10548 if (!BO || !BO->isAdditiveOp())
10549 break;
10550
10551 const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
10552 const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
10553
10554 if (isa<IntegerLiteral>(RHS))
10555 Ex = LHS;
10556 else if (isa<IntegerLiteral>(LHS))
10557 Ex = RHS;
10558 else
10559 break;
10560 }
10561
10562 return Ex;
10563}
10564
10565static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
10566 ASTContext &Context) {
10567 // Only handle constant-sized or VLAs, but not flexible members.
10568 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
10569 // Only issue the FIXIT for arrays of size > 1.
10570 if (CAT->getSize().getSExtValue() <= 1)
10571 return false;
10572 } else if (!Ty->isVariableArrayType()) {
10573 return false;
10574 }
10575 return true;
10576}
10577
10578// Warn if the user has made the 'size' argument to strlcpy or strlcat
10579// be the size of the source, instead of the destination.
10580void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
10581 IdentifierInfo *FnName) {
10582
10583 // Don't crash if the user has the wrong number of arguments
10584 unsigned NumArgs = Call->getNumArgs();
10585 if ((NumArgs != 3) && (NumArgs != 4))
10586 return;
10587
10588 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
10589 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context);
10590 const Expr *CompareWithSrc = nullptr;
10591
10592 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName,
10593 Call->getBeginLoc(), Call->getRParenLoc()))
10594 return;
10595
10596 // Look for 'strlcpy(dst, x, sizeof(x))'
10597 if (const Expr *Ex = getSizeOfExprArg(SizeArg))
10598 CompareWithSrc = Ex;
10599 else {
10600 // Look for 'strlcpy(dst, x, strlen(x))'
10601 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
10602 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
10603 SizeCall->getNumArgs() == 1)
10604 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
10605 }
10606 }
10607
10608 if (!CompareWithSrc)
10609 return;
10610
10611 // Determine if the argument to sizeof/strlen is equal to the source
10612 // argument. In principle there's all kinds of things you could do
10613 // here, for instance creating an == expression and evaluating it with
10614 // EvaluateAsBooleanCondition, but this uses a more direct technique:
10615 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg);
10616 if (!SrcArgDRE)
10617 return;
10618
10619 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc);
10620 if (!CompareWithSrcDRE ||
10621 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
10622 return;
10623
10624 const Expr *OriginalSizeArg = Call->getArg(2);
10625 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size)
10626 << OriginalSizeArg->getSourceRange() << FnName;
10627
10628 // Output a FIXIT hint if the destination is an array (rather than a
10629 // pointer to an array). This could be enhanced to handle some
10630 // pointers if we know the actual size, like if DstArg is 'array+2'
10631 // we could say 'sizeof(array)-2'.
10632 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts();
10633 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context))
10634 return;
10635
10636 SmallString<128> sizeString;
10637 llvm::raw_svector_ostream OS(sizeString);
10638 OS << "sizeof(";
10639 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
10640 OS << ")";
10641
10642 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size)
10643 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
10644 OS.str());
10645}
10646
10647/// Check if two expressions refer to the same declaration.
10648static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
10649 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1))
10650 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2))
10651 return D1->getDecl() == D2->getDecl();
10652 return false;
10653}
10654
10655static const Expr *getStrlenExprArg(const Expr *E) {
10656 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
10657 const FunctionDecl *FD = CE->getDirectCallee();
10658 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
10659 return nullptr;
10660 return CE->getArg(0)->IgnoreParenCasts();
10661 }
10662 return nullptr;
10663}
10664
10665// Warn on anti-patterns as the 'size' argument to strncat.
10666// The correct size argument should look like following:
10667// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
10668void Sema::CheckStrncatArguments(const CallExpr *CE,
10669 IdentifierInfo *FnName) {
10670 // Don't crash if the user has the wrong number of arguments.
10671 if (CE->getNumArgs() < 3)
10672 return;
10673 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts();
10674 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
10675 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
10676
10677 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(),
10678 CE->getRParenLoc()))
10679 return;
10680
10681 // Identify common expressions, which are wrongly used as the size argument
10682 // to strncat and may lead to buffer overflows.
10683 unsigned PatternType = 0;
10684 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) {
10685 // - sizeof(dst)
10686 if (referToTheSameDecl(SizeOfArg, DstArg))
10687 PatternType = 1;
10688 // - sizeof(src)
10689 else if (referToTheSameDecl(SizeOfArg, SrcArg))
10690 PatternType = 2;
10691 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) {
10692 if (BE->getOpcode() == BO_Sub) {
10693 const Expr *L = BE->getLHS()->IgnoreParenCasts();
10694 const Expr *R = BE->getRHS()->IgnoreParenCasts();
10695 // - sizeof(dst) - strlen(dst)
10696 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) &&
10697 referToTheSameDecl(DstArg, getStrlenExprArg(R)))
10698 PatternType = 1;
10699 // - sizeof(src) - (anything)
10700 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L)))
10701 PatternType = 2;
10702 }
10703 }
10704
10705 if (PatternType == 0)
10706 return;
10707
10708 // Generate the diagnostic.
10709 SourceLocation SL = LenArg->getBeginLoc();
10710 SourceRange SR = LenArg->getSourceRange();
10711 SourceManager &SM = getSourceManager();
10712
10713 // If the function is defined as a builtin macro, do not show macro expansion.
10714 if (SM.isMacroArgExpansion(SL)) {
10715 SL = SM.getSpellingLoc(SL);
10716 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()),
10717 SM.getSpellingLoc(SR.getEnd()));
10718 }
10719
10720 // Check if the destination is an array (rather than a pointer to an array).
10721 QualType DstTy = DstArg->getType();
10722 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy,
10723 Context);
10724 if (!isKnownSizeArray) {
10725 if (PatternType == 1)
10726 Diag(SL, diag::warn_strncat_wrong_size) << SR;
10727 else
10728 Diag(SL, diag::warn_strncat_src_size) << SR;
10729 return;
10730 }
10731
10732 if (PatternType == 1)
10733 Diag(SL, diag::warn_strncat_large_size) << SR;
10734 else
10735 Diag(SL, diag::warn_strncat_src_size) << SR;
10736
10737 SmallString<128> sizeString;
10738 llvm::raw_svector_ostream OS(sizeString);
10739 OS << "sizeof(";
10740 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
10741 OS << ") - ";
10742 OS << "strlen(";
10743 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
10744 OS << ") - 1";
10745
10746 Diag(SL, diag::note_strncat_wrong_size)
10747 << FixItHint::CreateReplacement(SR, OS.str());
10748}
10749
10750namespace {
10751void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName,
10752 const UnaryOperator *UnaryExpr, const Decl *D) {
10753 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) {
10754 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object)
10755 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D);
10756 return;
10757 }
10758}
10759
10760void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName,
10761 const UnaryOperator *UnaryExpr) {
10762 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) {
10763 const Decl *D = Lvalue->getDecl();
10764 if (isa<DeclaratorDecl>(D))
10765 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType())
10766 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D);
10767 }
10768
10769 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr()))
10770 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr,
10771 Lvalue->getMemberDecl());
10772}
10773
10774void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName,
10775 const UnaryOperator *UnaryExpr) {
10776 const auto *Lambda = dyn_cast<LambdaExpr>(
10777 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens());
10778 if (!Lambda)
10779 return;
10780
10781 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object)
10782 << CalleeName << 2 /*object: lambda expression*/;
10783}
10784
10785void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName,
10786 const DeclRefExpr *Lvalue) {
10787 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl());
10788 if (Var == nullptr)
10789 return;
10790
10791 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object)
10792 << CalleeName << 0 /*object: */ << Var;
10793}
10794
10795void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
10796 const CastExpr *Cast) {
10797 SmallString<128> SizeString;
10798 llvm::raw_svector_ostream OS(SizeString);
10799
10800 clang::CastKind Kind = Cast->getCastKind();
10801 if (Kind == clang::CK_BitCast &&
10802 !Cast->getSubExpr()->getType()->isFunctionPointerType())
10803 return;
10804 if (Kind == clang::CK_IntegralToPointer &&
10805 !isa<IntegerLiteral>(
10806 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens()))
10807 return;
10808
10809 switch (Cast->getCastKind()) {
10810 case clang::CK_BitCast:
10811 case clang::CK_IntegralToPointer:
10812 case clang::CK_FunctionToPointerDecay:
10813 OS << '\'';
10814 Cast->printPretty(OS, nullptr, S.getPrintingPolicy());
10815 OS << '\'';
10816 break;
10817 default:
10818 return;
10819 }
10820
10821 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object)
10822 << CalleeName << 0 /*object: */ << OS.str();
10823}
10824} // namespace
10825
10826/// Alerts the user that they are attempting to free a non-malloc'd object.
10827void Sema::CheckFreeArguments(const CallExpr *E) {
10828 const std::string CalleeName =
10829 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
12
Assuming the object is not a 'FunctionDecl'
13
Called C++ object pointer is null
10830
10831 { // Prefer something that doesn't involve a cast to make things simpler.
10832 const Expr *Arg = E->getArg(0)->IgnoreParenCasts();
10833 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg))
10834 switch (UnaryExpr->getOpcode()) {
10835 case UnaryOperator::Opcode::UO_AddrOf:
10836 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr);
10837 case UnaryOperator::Opcode::UO_Plus:
10838 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr);
10839 default:
10840 break;
10841 }
10842
10843 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg))
10844 if (Lvalue->getType()->isArrayType())
10845 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue);
10846
10847 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) {
10848 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object)
10849 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier();
10850 return;
10851 }
10852
10853 if (isa<BlockExpr>(Arg)) {
10854 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object)
10855 << CalleeName << 1 /*object: block*/;
10856 return;
10857 }
10858 }
10859 // Maybe the cast was important, check after the other cases.
10860 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0)))
10861 return CheckFreeArgumentsCast(*this, CalleeName, Cast);
10862}
10863
10864void
10865Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
10866 SourceLocation ReturnLoc,
10867 bool isObjCMethod,
10868 const AttrVec *Attrs,
10869 const FunctionDecl *FD) {
10870 // Check if the return value is null but should not be.
10871 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
10872 (!isObjCMethod && isNonNullType(Context, lhsType))) &&
10873 CheckNonNullExpr(*this, RetValExp))
10874 Diag(ReturnLoc, diag::warn_null_ret)
10875 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
10876
10877 // C++11 [basic.stc.dynamic.allocation]p4:
10878 // If an allocation function declared with a non-throwing
10879 // exception-specification fails to allocate storage, it shall return
10880 // a null pointer. Any other allocation function that fails to allocate
10881 // storage shall indicate failure only by throwing an exception [...]
10882 if (FD) {
10883 OverloadedOperatorKind Op = FD->getOverloadedOperator();
10884 if (Op == OO_New || Op == OO_Array_New) {
10885 const FunctionProtoType *Proto
10886 = FD->getType()->castAs<FunctionProtoType>();
10887 if (!Proto->isNothrow(/*ResultIfDependent*/true) &&
10888 CheckNonNullExpr(*this, RetValExp))
10889 Diag(ReturnLoc, diag::warn_operator_new_returns_null)
10890 << FD << getLangOpts().CPlusPlus11;
10891 }
10892 }
10893
10894 // PPC MMA non-pointer types are not allowed as return type. Checking the type
10895 // here prevent the user from using a PPC MMA type as trailing return type.
10896 if (Context.getTargetInfo().getTriple().isPPC64())
10897 CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
10898}
10899
10900//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
10901
10902/// Check for comparisons of floating point operands using != and ==.
10903/// Issue a warning if these are no self-comparisons, as they are not likely
10904/// to do what the programmer intended.
10905void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
10906 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
10907 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
10908
10909 // Special case: check for x == x (which is OK).
10910 // Do not emit warnings for such cases.
10911 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
10912 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
10913 if (DRL->getDecl() == DRR->getDecl())
10914 return;
10915
10916 // Special case: check for comparisons against literals that can be exactly
10917 // represented by APFloat. In such cases, do not emit a warning. This
10918 // is a heuristic: often comparison against such literals are used to
10919 // detect if a value in a variable has not changed. This clearly can
10920 // lead to false negatives.
10921 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
10922 if (FLL->isExact())
10923 return;
10924 } else
10925 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen))
10926 if (FLR->isExact())
10927 return;
10928
10929 // Check for comparisons with builtin types.
10930 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
10931 if (CL->getBuiltinCallee())
10932 return;
10933
10934 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
10935 if (CR->getBuiltinCallee())
10936 return;
10937
10938 // Emit the diagnostic.
10939 Diag(Loc, diag::warn_floatingpoint_eq)
10940 << LHS->getSourceRange() << RHS->getSourceRange();
10941}
10942
10943//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
10944//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
10945
10946namespace {
10947
10948/// Structure recording the 'active' range of an integer-valued
10949/// expression.
10950struct IntRange {
10951 /// The number of bits active in the int. Note that this includes exactly one
10952 /// sign bit if !NonNegative.
10953 unsigned Width;
10954
10955 /// True if the int is known not to have negative values. If so, all leading
10956 /// bits before Width are known zero, otherwise they are known to be the
10957 /// same as the MSB within Width.
10958 bool NonNegative;
10959
10960 IntRange(unsigned Width, bool NonNegative)
10961 : Width(Width), NonNegative(NonNegative) {}
10962
10963 /// Number of bits excluding the sign bit.
10964 unsigned valueBits() const {
10965 return NonNegative ? Width : Width - 1;
10966 }
10967
10968 /// Returns the range of the bool type.
10969 static IntRange forBoolType() {
10970 return IntRange(1, true);
10971 }
10972
10973 /// Returns the range of an opaque value of the given integral type.
10974 static IntRange forValueOfType(ASTContext &C, QualType T) {
10975 return forValueOfCanonicalType(C,
10976 T->getCanonicalTypeInternal().getTypePtr());
10977 }
10978
10979 /// Returns the range of an opaque value of a canonical integral type.
10980 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
10981 assert(T->isCanonicalUnqualified())((void)0);
10982
10983 if (const VectorType *VT = dyn_cast<VectorType>(T))
10984 T = VT->getElementType().getTypePtr();
10985 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
10986 T = CT->getElementType().getTypePtr();
10987 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
10988 T = AT->getValueType().getTypePtr();
10989
10990 if (!C.getLangOpts().CPlusPlus) {
10991 // For enum types in C code, use the underlying datatype.
10992 if (const EnumType *ET = dyn_cast<EnumType>(T))
10993 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr();
10994 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) {
10995 // For enum types in C++, use the known bit width of the enumerators.
10996 EnumDecl *Enum = ET->getDecl();
10997 // In C++11, enums can have a fixed underlying type. Use this type to
10998 // compute the range.
10999 if (Enum->isFixed()) {
11000 return IntRange(C.getIntWidth(QualType(T, 0)),
11001 !ET->isSignedIntegerOrEnumerationType());
11002 }
11003
11004 unsigned NumPositive = Enum->getNumPositiveBits();
11005 unsigned NumNegative = Enum->getNumNegativeBits();
11006
11007 if (NumNegative == 0)
11008 return IntRange(NumPositive, true/*NonNegative*/);
11009 else
11010 return IntRange(std::max(NumPositive + 1, NumNegative),
11011 false/*NonNegative*/);
11012 }
11013
11014 if (const auto *EIT = dyn_cast<ExtIntType>(T))
11015 return IntRange(EIT->getNumBits(), EIT->isUnsigned());
11016
11017 const BuiltinType *BT = cast<BuiltinType>(T);
11018 assert(BT->isInteger())((void)0);
11019
11020 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
11021 }
11022
11023 /// Returns the "target" range of a canonical integral type, i.e.
11024 /// the range of values expressible in the type.
11025 ///
11026 /// This matches forValueOfCanonicalType except that enums have the
11027 /// full range of their type, not the range of their enumerators.
11028 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
11029 assert(T->isCanonicalUnqualified())((void)0);
11030
11031 if (const VectorType *VT = dyn_cast<VectorType>(T))
11032 T = VT->getElementType().getTypePtr();
11033 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
11034 T = CT->getElementType().getTypePtr();
11035 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
11036 T = AT->getValueType().getTypePtr();
11037 if (const EnumType *ET = dyn_cast<EnumType>(T))
11038 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
11039
11040 if (const auto *EIT = dyn_cast<ExtIntType>(T))
11041 return IntRange(EIT->getNumBits(), EIT->isUnsigned());
11042
11043 const BuiltinType *BT = cast<BuiltinType>(T);
11044 assert(BT->isInteger())((void)0);
11045
11046 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
11047 }
11048
11049 /// Returns the supremum of two ranges: i.e. their conservative merge.
11050 static IntRange join(IntRange L, IntRange R) {
11051 bool Unsigned = L.NonNegative && R.NonNegative;
11052 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned,
11053 L.NonNegative && R.NonNegative);
11054 }
11055
11056 /// Return the range of a bitwise-AND of the two ranges.
11057 static IntRange bit_and(IntRange L, IntRange R) {
11058 unsigned Bits = std::max(L.Width, R.Width);
11059 bool NonNegative = false;
11060 if (L.NonNegative) {
11061 Bits = std::min(Bits, L.Width);
11062 NonNegative = true;
11063 }
11064 if (R.NonNegative) {
11065 Bits = std::min(Bits, R.Width);
11066 NonNegative = true;
11067 }
11068 return IntRange(Bits, NonNegative);
11069 }
11070
11071 /// Return the range of a sum of the two ranges.
11072 static IntRange sum(IntRange L, IntRange R) {
11073 bool Unsigned = L.NonNegative && R.NonNegative;
11074 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned,
11075 Unsigned);
11076 }
11077
11078 /// Return the range of a difference of the two ranges.
11079 static IntRange difference(IntRange L, IntRange R) {
11080 // We need a 1-bit-wider range if:
11081 // 1) LHS can be negative: least value can be reduced.
11082 // 2) RHS can be negative: greatest value can be increased.
11083 bool CanWiden = !L.NonNegative || !R.NonNegative;
11084 bool Unsigned = L.NonNegative && R.Width == 0;
11085 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden +
11086 !Unsigned,
11087 Unsigned);
11088 }
11089
11090 /// Return the range of a product of the two ranges.
11091 static IntRange product(IntRange L, IntRange R) {
11092 // If both LHS and RHS can be negative, we can form
11093 // -2^L * -2^R = 2^(L + R)
11094 // which requires L + R + 1 value bits to represent.
11095 bool CanWiden = !L.NonNegative && !R.NonNegative;
11096 bool Unsigned = L.NonNegative && R.NonNegative;
11097 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned,
11098 Unsigned);
11099 }
11100
11101 /// Return the range of a remainder operation between the two ranges.
11102 static IntRange rem(IntRange L, IntRange R) {
11103 // The result of a remainder can't be larger than the result of
11104 // either side. The sign of the result is the sign of the LHS.
11105 bool Unsigned = L.NonNegative;
11106 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned,
11107 Unsigned);
11108 }
11109};
11110
11111} // namespace
11112
11113static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
11114 unsigned MaxWidth) {
11115 if (value.isSigned() && value.isNegative())
11116 return IntRange(value.getMinSignedBits(), false);
11117
11118 if (value.getBitWidth() > MaxWidth)
11119 value = value.trunc(MaxWidth);
11120
11121 // isNonNegative() just checks the sign bit without considering
11122 // signedness.
11123 return IntRange(value.getActiveBits(), true);
11124}
11125
11126static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
11127 unsigned MaxWidth) {
11128 if (result.isInt())
11129 return GetValueRange(C, result.getInt(), MaxWidth);
11130
11131 if (result.isVector()) {
11132 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth);
11133 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
11134 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth);
11135 R = IntRange::join(R, El);
11136 }
11137 return R;
11138 }
11139
11140 if (result.isComplexInt()) {
11141 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth);
11142 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth);
11143 return IntRange::join(R, I);
11144 }
11145
11146 // This can happen with lossless casts to intptr_t of "based" lvalues.
11147 // Assume it might use arbitrary bits.
11148 // FIXME: The only reason we need to pass the type in here is to get
11149 // the sign right on this one case. It would be nice if APValue
11150 // preserved this.
11151 assert(result.isLValue() || result.isAddrLabelDiff())((void)0);
11152 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
11153}
11154
11155static QualType GetExprType(const Expr *E) {
11156 QualType Ty = E->getType();
11157 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>())
11158 Ty = AtomicRHS->getValueType();
11159 return Ty;
11160}
11161
11162/// Pseudo-evaluate the given integer expression, estimating the
11163/// range of values it might take.
11164///
11165/// \param MaxWidth The width to which the value will be truncated.
11166/// \param Approximate If \c true, return a likely range for the result: in
11167/// particular, assume that aritmetic on narrower types doesn't leave
11168/// those types. If \c false, return a range including all possible
11169/// result values.
11170static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
11171 bool InConstantContext, bool Approximate) {
11172 E = E->IgnoreParens();
11173
11174 // Try a full evaluation first.
11175 Expr::EvalResult result;
11176 if (E->EvaluateAsRValue(result, C, InConstantContext))
11177 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth);
11178
11179 // I think we only want to look through implicit casts here; if the
11180 // user has an explicit widening cast, we should treat the value as
11181 // being of the new, wider type.
11182 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) {
11183 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
11184 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext,
11185 Approximate);
11186
11187 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE));
11188
11189 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast ||
11190 CE->getCastKind() == CK_BooleanToSignedIntegral;
11191
11192 // Assume that non-integer casts can span the full range of the type.
11193 if (!isIntegerCast)
11194 return OutputTypeRange;
11195
11196 IntRange SubRange = GetExprRange(C, CE->getSubExpr(),
11197 std::min(MaxWidth, OutputTypeRange.Width),
11198 InConstantContext, Approximate);
11199
11200 // Bail out if the subexpr's range is as wide as the cast type.
11201 if (SubRange.Width >= OutputTypeRange.Width)
11202 return OutputTypeRange;
11203
11204 // Otherwise, we take the smaller width, and we're non-negative if
11205 // either the output type or the subexpr is.
11206 return IntRange(SubRange.Width,
11207 SubRange.NonNegative || OutputTypeRange.NonNegative);
11208 }
11209
11210 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
11211 // If we can fold the condition, just take that operand.
11212 bool CondResult;
11213 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
11214 return GetExprRange(C,
11215 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(),
11216 MaxWidth, InConstantContext, Approximate);
11217
11218 // Otherwise, conservatively merge.
11219 // GetExprRange requires an integer expression, but a throw expression
11220 // results in a void type.
11221 Expr *E = CO->getTrueExpr();
11222 IntRange L = E->getType()->isVoidType()
11223 ? IntRange{0, true}
11224 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
11225 E = CO->getFalseExpr();
11226 IntRange R = E->getType()->isVoidType()
11227 ? IntRange{0, true}
11228 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
11229 return IntRange::join(L, R);
11230 }
11231
11232 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
11233 IntRange (*Combine)(IntRange, IntRange) = IntRange::join;
11234
11235 switch (BO->getOpcode()) {
11236 case BO_Cmp:
11237 llvm_unreachable("builtin <=> should have class type")__builtin_unreachable();
11238
11239 // Boolean-valued operations are single-bit and positive.
11240 case BO_LAnd:
11241 case BO_LOr:
11242 case BO_LT:
11243 case BO_GT:
11244 case BO_LE:
11245 case BO_GE:
11246 case BO_EQ:
11247 case BO_NE:
11248 return IntRange::forBoolType();
11249
11250 // The type of the assignments is the type of the LHS, so the RHS
11251 // is not necessarily the same type.
11252 case BO_MulAssign:
11253 case BO_DivAssign:
11254 case BO_RemAssign:
11255 case BO_AddAssign:
11256 case BO_SubAssign:
11257 case BO_XorAssign:
11258 case BO_OrAssign:
11259 // TODO: bitfields?
11260 return IntRange::forValueOfType(C, GetExprType(E));
11261
11262 // Simple assignments just pass through the RHS, which will have
11263 // been coerced to the LHS type.
11264 case BO_Assign:
11265 // TODO: bitfields?
11266 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext,
11267 Approximate);
11268
11269 // Operations with opaque sources are black-listed.
11270 case BO_PtrMemD:
11271 case BO_PtrMemI:
11272 return IntRange::forValueOfType(C, GetExprType(E));
11273
11274 // Bitwise-and uses the *infinum* of the two source ranges.
11275 case BO_And:
11276 case BO_AndAssign:
11277 Combine = IntRange::bit_and;
11278 break;
11279
11280 // Left shift gets black-listed based on a judgement call.
11281 case BO_Shl:
11282 // ...except that we want to treat '1 << (blah)' as logically
11283 // positive. It's an important idiom.
11284 if (IntegerLiteral *I
11285 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) {
11286 if (I->getValue() == 1) {
11287 IntRange R = IntRange::forValueOfType(C, GetExprType(E));
11288 return IntRange(R.Width, /*NonNegative*/ true);
11289 }
11290 }
11291 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11292
11293 case BO_ShlAssign:
11294 return IntRange::forValueOfType(C, GetExprType(E));
11295
11296 // Right shift by a constant can narrow its left argument.
11297 case BO_Shr:
11298 case BO_ShrAssign: {
11299 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext,
11300 Approximate);
11301
11302 // If the shift amount is a positive constant, drop the width by
11303 // that much.
11304 if (Optional<llvm::APSInt> shift =
11305 BO->getRHS()->getIntegerConstantExpr(C)) {
11306 if (shift->isNonNegative()) {
11307 unsigned zext = shift->getZExtValue();
11308 if (zext >= L.Width)
11309 L.Width = (L.NonNegative ? 0 : 1);
11310 else
11311 L.Width -= zext;
11312 }
11313 }
11314
11315 return L;
11316 }
11317
11318 // Comma acts as its right operand.
11319 case BO_Comma:
11320 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext,
11321 Approximate);
11322
11323 case BO_Add:
11324 if (!Approximate)
11325 Combine = IntRange::sum;
11326 break;
11327
11328 case BO_Sub:
11329 if (BO->getLHS()->getType()->isPointerType())
11330 return IntRange::forValueOfType(C, GetExprType(E));
11331 if (!Approximate)
11332 Combine = IntRange::difference;
11333 break;
11334
11335 case BO_Mul:
11336 if (!Approximate)
11337 Combine = IntRange::product;
11338 break;
11339
11340 // The width of a division result is mostly determined by the size
11341 // of the LHS.
11342 case BO_Div: {
11343 // Don't 'pre-truncate' the operands.
11344 unsigned opWidth = C.getIntWidth(GetExprType(E));
11345 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext,
11346 Approximate);
11347
11348 // If the divisor is constant, use that.
11349 if (Optional<llvm::APSInt> divisor =
11350 BO->getRHS()->getIntegerConstantExpr(C)) {
11351 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor))
11352 if (log2 >= L.Width)
11353 L.Width = (L.NonNegative ? 0 : 1);
11354 else
11355 L.Width = std::min(L.Width - log2, MaxWidth);
11356 return L;
11357 }
11358
11359 // Otherwise, just use the LHS's width.
11360 // FIXME: This is wrong if the LHS could be its minimal value and the RHS
11361 // could be -1.
11362 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext,
11363 Approximate);
11364 return IntRange(L.Width, L.NonNegative && R.NonNegative);
11365 }
11366
11367 case BO_Rem:
11368 Combine = IntRange::rem;
11369 break;
11370
11371 // The default behavior is okay for these.
11372 case BO_Xor:
11373 case BO_Or:
11374 break;
11375 }
11376
11377 // Combine the two ranges, but limit the result to the type in which we
11378 // performed the computation.
11379 QualType T = GetExprType(E);
11380 unsigned opWidth = C.getIntWidth(T);
11381 IntRange L =
11382 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate);
11383 IntRange R =
11384 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate);
11385 IntRange C = Combine(L, R);
11386 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType();
11387 C.Width = std::min(C.Width, MaxWidth);
11388 return C;
11389 }
11390
11391 if (const auto *UO = dyn_cast<UnaryOperator>(E)) {
11392 switch (UO->getOpcode()) {
11393 // Boolean-valued operations are white-listed.
11394 case UO_LNot:
11395 return IntRange::forBoolType();
11396
11397 // Operations with opaque sources are black-listed.
11398 case UO_Deref:
11399 case UO_AddrOf: // should be impossible
11400 return IntRange::forValueOfType(C, GetExprType(E));
11401
11402 default:
11403 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext,
11404 Approximate);
11405 }
11406 }
11407
11408 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
11409 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext,
11410 Approximate);
11411
11412 if (const auto *BitField = E->getSourceBitField())
11413 return IntRange(BitField->getBitWidthValue(C),
11414 BitField->getType()->isUnsignedIntegerOrEnumerationType());
11415
11416 return IntRange::forValueOfType(C, GetExprType(E));
11417}
11418
11419static IntRange GetExprRange(ASTContext &C, const Expr *E,
11420 bool InConstantContext, bool Approximate) {
11421 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext,
11422 Approximate);
11423}
11424
11425/// Checks whether the given value, which currently has the given
11426/// source semantics, has the same value when coerced through the
11427/// target semantics.
11428static bool IsSameFloatAfterCast(const llvm::APFloat &value,
11429 const llvm::fltSemantics &Src,
11430 const llvm::fltSemantics &Tgt) {
11431 llvm::APFloat truncated = value;
11432
11433 bool ignored;
11434 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored);
11435 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored);
11436
11437 return truncated.bitwiseIsEqual(value);
11438}
11439
11440/// Checks whether the given value, which currently has the given
11441/// source semantics, has the same value when coerced through the
11442/// target semantics.
11443///
11444/// The value might be a vector of floats (or a complex number).
11445static bool IsSameFloatAfterCast(const APValue &value,
11446 const llvm::fltSemantics &Src,
11447 const llvm::fltSemantics &Tgt) {
11448 if (value.isFloat())
11449 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
11450
11451 if (value.isVector()) {
11452 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
11453 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt))
11454 return false;
11455 return true;
11456 }
11457
11458 assert(value.isComplexFloat())((void)0);
11459 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) &&
11460 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
11461}
11462
11463static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC,
11464 bool IsListInit = false);
11465
11466static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
11467 // Suppress cases where we are comparing against an enum constant.
11468 if (const DeclRefExpr *DR =
11469 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
11470 if (isa<EnumConstantDecl>(DR->getDecl()))
11471 return true;
11472
11473 // Suppress cases where the value is expanded from a macro, unless that macro
11474 // is how a language represents a boolean literal. This is the case in both C
11475 // and Objective-C.
11476 SourceLocation BeginLoc = E->getBeginLoc();
11477 if (BeginLoc.isMacroID()) {
11478 StringRef MacroName = Lexer::getImmediateMacroName(
11479 BeginLoc, S.getSourceManager(), S.getLangOpts());
11480 return MacroName != "YES" && MacroName != "NO" &&
11481 MacroName != "true" && MacroName != "false";
11482 }
11483
11484 return false;
11485}
11486
11487static bool isKnownToHaveUnsignedValue(Expr *E) {
11488 return E->getType()->isIntegerType() &&
11489 (!E->getType()->isSignedIntegerType() ||
11490 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType());
11491}
11492
11493namespace {
11494/// The promoted range of values of a type. In general this has the
11495/// following structure:
11496///
11497/// |-----------| . . . |-----------|
11498/// ^ ^ ^ ^
11499/// Min HoleMin HoleMax Max
11500///
11501/// ... where there is only a hole if a signed type is promoted to unsigned
11502/// (in which case Min and Max are the smallest and largest representable
11503/// values).
11504struct PromotedRange {
11505 // Min, or HoleMax if there is a hole.
11506 llvm::APSInt PromotedMin;
11507 // Max, or HoleMin if there is a hole.
11508 llvm::APSInt PromotedMax;
11509
11510 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) {
11511 if (R.Width == 0)
11512 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned);
11513 else if (R.Width >= BitWidth && !Unsigned) {
11514 // Promotion made the type *narrower*. This happens when promoting
11515 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'.
11516 // Treat all values of 'signed int' as being in range for now.
11517 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned);
11518 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned);
11519 } else {
11520 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative)
11521 .extOrTrunc(BitWidth);
11522 PromotedMin.setIsUnsigned(Unsigned);
11523
11524 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative)
11525 .extOrTrunc(BitWidth);
11526 PromotedMax.setIsUnsigned(Unsigned);
11527 }
11528 }
11529
11530 // Determine whether this range is contiguous (has no hole).
11531 bool isContiguous() const { return PromotedMin <= PromotedMax; }
11532
11533 // Where a constant value is within the range.
11534 enum ComparisonResult {
11535 LT = 0x1,
11536 LE = 0x2,
11537 GT = 0x4,
11538 GE = 0x8,
11539 EQ = 0x10,
11540 NE = 0x20,
11541 InRangeFlag = 0x40,
11542
11543 Less = LE | LT | NE,
11544 Min = LE | InRangeFlag,
11545 InRange = InRangeFlag,
11546 Max = GE | InRangeFlag,
11547 Greater = GE | GT | NE,
11548
11549 OnlyValue = LE | GE | EQ | InRangeFlag,
11550 InHole = NE
11551 };
11552
11553 ComparisonResult compare(const llvm::APSInt &Value) const {
11554 assert(Value.getBitWidth() == PromotedMin.getBitWidth() &&((void)0)
11555 Value.isUnsigned() == PromotedMin.isUnsigned())((void)0);
11556 if (!isContiguous()) {
11557 assert(Value.isUnsigned() && "discontiguous range for signed compare")((void)0);
11558 if (Value.isMinValue()) return Min;
11559 if (Value.isMaxValue()) return Max;
11560 if (Value >= PromotedMin) return InRange;
11561 if (Value <= PromotedMax) return InRange;
11562 return InHole;
11563 }
11564
11565 switch (llvm::APSInt::compareValues(Value, PromotedMin)) {
11566 case -1: return Less;
11567 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min;
11568 case 1:
11569 switch (llvm::APSInt::compareValues(Value, PromotedMax)) {
11570 case -1: return InRange;
11571 case 0: return Max;
11572 case 1: return Greater;
11573 }
11574 }
11575
11576 llvm_unreachable("impossible compare result")__builtin_unreachable();
11577 }
11578
11579 static llvm::Optional<StringRef>
11580 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) {
11581 if (Op == BO_Cmp) {
11582 ComparisonResult LTFlag = LT, GTFlag = GT;
11583 if (ConstantOnRHS) std::swap(LTFlag, GTFlag);
11584
11585 if (R & EQ) return StringRef("'std::strong_ordering::equal'");
11586 if (R & LTFlag) return StringRef("'std::strong_ordering::less'");
11587 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'");
11588 return llvm::None;
11589 }
11590
11591 ComparisonResult TrueFlag, FalseFlag;
11592 if (Op == BO_EQ) {
11593 TrueFlag = EQ;
11594 FalseFlag = NE;
11595 } else if (Op == BO_NE) {
11596 TrueFlag = NE;
11597 FalseFlag = EQ;
11598 } else {
11599 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) {
11600 TrueFlag = LT;
11601 FalseFlag = GE;
11602 } else {
11603 TrueFlag = GT;
11604 FalseFlag = LE;
11605 }
11606 if (Op == BO_GE || Op == BO_LE)
11607 std::swap(TrueFlag, FalseFlag);
11608 }
11609 if (R & TrueFlag)
11610 return StringRef("true");
11611 if (R & FalseFlag)
11612 return StringRef("false");
11613 return llvm::None;
11614 }
11615};
11616}
11617
11618static bool HasEnumType(Expr *E) {
11619 // Strip off implicit integral promotions.
11620 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
11621 if (ICE->getCastKind() != CK_IntegralCast &&
11622 ICE->getCastKind() != CK_NoOp)
11623 break;
11624 E = ICE->getSubExpr();
11625 }
11626
11627 return E->getType()->isEnumeralType();
11628}
11629
11630static int classifyConstantValue(Expr *Constant) {
11631 // The values of this enumeration are used in the diagnostics
11632 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare.
11633 enum ConstantValueKind {
11634 Miscellaneous = 0,
11635 LiteralTrue,
11636 LiteralFalse
11637 };
11638 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant))
11639 return BL->getValue() ? ConstantValueKind::LiteralTrue
11640 : ConstantValueKind::LiteralFalse;
11641 return ConstantValueKind::Miscellaneous;
11642}
11643
11644static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
11645 Expr *Constant, Expr *Other,
11646 const llvm::APSInt &Value,
11647 bool RhsConstant) {
11648 if (S.inTemplateInstantiation())
11649 return false;
11650
11651 Expr *OriginalOther = Other;
11652
11653 Constant = Constant->IgnoreParenImpCasts();
11654 Other = Other->IgnoreParenImpCasts();
11655
11656 // Suppress warnings on tautological comparisons between values of the same
11657 // enumeration type. There are only two ways we could warn on this:
11658 // - If the constant is outside the range of representable values of
11659 // the enumeration. In such a case, we should warn about the cast
11660 // to enumeration type, not about the comparison.
11661 // - If the constant is the maximum / minimum in-range value. For an
11662 // enumeratin type, such comparisons can be meaningful and useful.
11663 if (Constant->getType()->isEnumeralType() &&
11664 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType()))
11665 return false;
11666
11667 IntRange OtherValueRange = GetExprRange(
11668 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false);
11669
11670 QualType OtherT = Other->getType();
11671 if (const auto *AT = OtherT->getAs<AtomicType>())
11672 OtherT = AT->getValueType();
11673 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT);
11674
11675 // Special case for ObjC BOOL on targets where its a typedef for a signed char
11676 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this.
11677 bool IsObjCSignedCharBool = S.getLangOpts().ObjC &&
11678 S.NSAPIObj->isObjCBOOLType(OtherT) &&
11679 OtherT->isSpecificBuiltinType(BuiltinType::SChar);
11680
11681 // Whether we're treating Other as being a bool because of the form of
11682 // expression despite it having another type (typically 'int' in C).
11683 bool OtherIsBooleanDespiteType =
11684 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
11685 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool)
11686 OtherTypeRange = OtherValueRange = IntRange::forBoolType();
11687
11688 // Check if all values in the range of possible values of this expression
11689 // lead to the same comparison outcome.
11690 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(),
11691 Value.isUnsigned());
11692 auto Cmp = OtherPromotedValueRange.compare(Value);
11693 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant);
11694 if (!Result)
11695 return false;
11696
11697 // Also consider the range determined by the type alone. This allows us to
11698 // classify the warning under the proper diagnostic group.
11699 bool TautologicalTypeCompare = false;
11700 {
11701 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(),
11702 Value.isUnsigned());
11703 auto TypeCmp = OtherPromotedTypeRange.compare(Value);
11704 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp,
11705 RhsConstant)) {
11706 TautologicalTypeCompare = true;
11707 Cmp = TypeCmp;
11708 Result = TypeResult;
11709 }
11710 }
11711
11712 // Don't warn if the non-constant operand actually always evaluates to the
11713 // same value.
11714 if (!TautologicalTypeCompare && OtherValueRange.Width == 0)
11715 return false;
11716
11717 // Suppress the diagnostic for an in-range comparison if the constant comes
11718 // from a macro or enumerator. We don't want to diagnose
11719 //
11720 // some_long_value <= INT_MAX
11721 //
11722 // when sizeof(int) == sizeof(long).
11723 bool InRange = Cmp & PromotedRange::InRangeFlag;
11724 if (InRange && IsEnumConstOrFromMacro(S, Constant))
11725 return false;
11726
11727 // A comparison of an unsigned bit-field against 0 is really a type problem,
11728 // even though at the type level the bit-field might promote to 'signed int'.
11729 if (Other->refersToBitField() && InRange && Value == 0 &&
11730 Other->getType()->isUnsignedIntegerOrEnumerationType())
11731 TautologicalTypeCompare = true;
11732
11733 // If this is a comparison to an enum constant, include that
11734 // constant in the diagnostic.
11735 const EnumConstantDecl *ED = nullptr;
11736 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant))
11737 ED = dyn_cast<EnumConstantDecl>(DR->getDecl());
11738
11739 // Should be enough for uint128 (39 decimal digits)
11740 SmallString<64> PrettySourceValue;
11741 llvm::raw_svector_ostream OS(PrettySourceValue);
11742 if (ED) {
11743 OS << '\'' << *ED << "' (" << Value << ")";
11744 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>(
11745 Constant->IgnoreParenImpCasts())) {
11746 OS << (BL->getValue() ? "YES" : "NO");
11747 } else {
11748 OS << Value;
11749 }
11750
11751 if (!TautologicalTypeCompare) {
11752 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range)
11753 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative
11754 << E->getOpcodeStr() << OS.str() << *Result
11755 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
11756 return true;
11757 }
11758
11759 if (IsObjCSignedCharBool) {
11760 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
11761 S.PDiag(diag::warn_tautological_compare_objc_bool)
11762 << OS.str() << *Result);
11763 return true;
11764 }
11765
11766 // FIXME: We use a somewhat different formatting for the in-range cases and
11767 // cases involving boolean values for historical reasons. We should pick a
11768 // consistent way of presenting these diagnostics.
11769 if (!InRange || Other->isKnownToHaveBooleanValue()) {
11770
11771 S.DiagRuntimeBehavior(
11772 E->getOperatorLoc(), E,
11773 S.PDiag(!InRange ? diag::warn_out_of_range_compare
11774 : diag::warn_tautological_bool_compare)
11775 << OS.str() << classifyConstantValue(Constant) << OtherT
11776 << OtherIsBooleanDespiteType << *Result
11777 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
11778 } else {
11779 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy;
11780 unsigned Diag =
11781 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
11782 ? (HasEnumType(OriginalOther)
11783 ? diag::warn_unsigned_enum_always_true_comparison
11784 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison
11785 : diag::warn_unsigned_always_true_comparison)
11786 : diag::warn_tautological_constant_compare;
11787
11788 S.Diag(E->getOperatorLoc(), Diag)
11789 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result
11790 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
11791 }
11792
11793 return true;
11794}
11795
11796/// Analyze the operands of the given comparison. Implements the
11797/// fallback case from AnalyzeComparison.
11798static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
11799 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
11800 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
11801}
11802
11803/// Implements -Wsign-compare.
11804///
11805/// \param E the binary operator to check for warnings
11806static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
11807 // The type the comparison is being performed in.
11808 QualType T = E->getLHS()->getType();
11809
11810 // Only analyze comparison operators where both sides have been converted to
11811 // the same type.
11812 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType()))
11813 return AnalyzeImpConvsInComparison(S, E);
11814
11815 // Don't analyze value-dependent comparisons directly.
11816 if (E->isValueDependent())
11817 return AnalyzeImpConvsInComparison(S, E);
11818
11819 Expr *LHS = E->getLHS();
11820 Expr *RHS = E->getRHS();
11821
11822 if (T->isIntegralType(S.Context)) {
11823 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context);
11824 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context);
11825
11826 // We don't care about expressions whose result is a constant.
11827 if (RHSValue && LHSValue)
11828 return AnalyzeImpConvsInComparison(S, E);
11829
11830 // We only care about expressions where just one side is literal
11831 if ((bool)RHSValue ^ (bool)LHSValue) {
11832 // Is the constant on the RHS or LHS?
11833 const bool RhsConstant = (bool)RHSValue;
11834 Expr *Const = RhsConstant ? RHS : LHS;
11835 Expr *Other = RhsConstant ? LHS : RHS;
11836 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue;
11837
11838 // Check whether an integer constant comparison results in a value
11839 // of 'true' or 'false'.
11840 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant))
11841 return AnalyzeImpConvsInComparison(S, E);
11842 }
11843 }
11844
11845 if (!T->hasUnsignedIntegerRepresentation()) {
11846 // We don't do anything special if this isn't an unsigned integral
11847 // comparison: we're only interested in integral comparisons, and
11848 // signed comparisons only happen in cases we don't care to warn about.
11849 return AnalyzeImpConvsInComparison(S, E);
11850 }
11851
11852 LHS = LHS->IgnoreParenImpCasts();
11853 RHS = RHS->IgnoreParenImpCasts();
11854
11855 if (!S.getLangOpts().CPlusPlus) {
11856 // Avoid warning about comparison of integers with different signs when
11857 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
11858 // the type of `E`.
11859 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType()))
11860 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
11861 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType()))
11862 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
11863 }
11864
11865 // Check to see if one of the (unmodified) operands is of different
11866 // signedness.
11867 Expr *signedOperand, *unsignedOperand;
11868 if (LHS->getType()->hasSignedIntegerRepresentation()) {
11869 assert(!RHS->getType()->hasSignedIntegerRepresentation() &&((void)0)
11870 "unsigned comparison between two signed integer expressions?")((void)0);
11871 signedOperand = LHS;
11872 unsignedOperand = RHS;
11873 } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
11874 signedOperand = RHS;
11875 unsignedOperand = LHS;
11876 } else {
11877 return AnalyzeImpConvsInComparison(S, E);
11878 }
11879
11880 // Otherwise, calculate the effective range of the signed operand.
11881 IntRange signedRange = GetExprRange(
11882 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true);
11883
11884 // Go ahead and analyze implicit conversions in the operands. Note
11885 // that we skip the implicit conversions on both sides.
11886 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc());
11887 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc());
11888
11889 // If the signed range is non-negative, -Wsign-compare won't fire.
11890 if (signedRange.NonNegative)
11891 return;
11892
11893 // For (in)equality comparisons, if the unsigned operand is a
11894 // constant which cannot collide with a overflowed signed operand,
11895 // then reinterpreting the signed operand as unsigned will not
11896 // change the result of the comparison.
11897 if (E->isEqualityOp()) {
11898 unsigned comparisonWidth = S.Context.getIntWidth(T);
11899 IntRange unsignedRange =
11900 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(),
11901 /*Approximate*/ true);
11902
11903 // We should never be unable to prove that the unsigned operand is
11904 // non-negative.
11905 assert(unsignedRange.NonNegative && "unsigned range includes negative?")((void)0);
11906
11907 if (unsignedRange.Width < comparisonWidth)
11908 return;
11909 }
11910
11911 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
11912 S.PDiag(diag::warn_mixed_sign_comparison)
11913 << LHS->getType() << RHS->getType()
11914 << LHS->getSourceRange() << RHS->getSourceRange());
11915}
11916
11917/// Analyzes an attempt to assign the given value to a bitfield.
11918///
11919/// Returns true if there was something fishy about the attempt.
11920static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
11921 SourceLocation InitLoc) {
11922 assert(Bitfield->isBitField())((void)0);
11923 if (Bitfield->isInvalidDecl())
11924 return false;
11925
11926 // White-list bool bitfields.
11927 QualType BitfieldType = Bitfield->getType();
11928 if (BitfieldType->isBooleanType())
11929 return false;
11930
11931 if (BitfieldType->isEnumeralType()) {
11932 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl();
11933 // If the underlying enum type was not explicitly specified as an unsigned
11934 // type and the enum contain only positive values, MSVC++ will cause an
11935 // inconsistency by storing this as a signed type.
11936 if (S.getLangOpts().CPlusPlus11 &&
11937 !BitfieldEnumDecl->getIntegerTypeSourceInfo() &&
11938 BitfieldEnumDecl->getNumPositiveBits() > 0 &&
11939 BitfieldEnumDecl->getNumNegativeBits() == 0) {
11940 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
11941 << BitfieldEnumDecl;
11942 }
11943 }
11944
11945 if (Bitfield->getType()->isBooleanType())
11946 return false;
11947
11948 // Ignore value- or type-dependent expressions.
11949 if (Bitfield->getBitWidth()->isValueDependent() ||
11950 Bitfield->getBitWidth()->isTypeDependent() ||
11951 Init->isValueDependent() ||
11952 Init->isTypeDependent())
11953 return false;
11954
11955 Expr *OriginalInit = Init->IgnoreParenImpCasts();
11956 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
11957
11958 Expr::EvalResult Result;
11959 if (!OriginalInit->EvaluateAsInt(Result, S.Context,
11960 Expr::SE_AllowSideEffects)) {
11961 // The RHS is not constant. If the RHS has an enum type, make sure the
11962 // bitfield is wide enough to hold all the values of the enum without
11963 // truncation.
11964 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) {
11965 EnumDecl *ED = EnumTy->getDecl();
11966 bool SignedBitfield = BitfieldType->isSignedIntegerType();
11967
11968 // Enum types are implicitly signed on Windows, so check if there are any
11969 // negative enumerators to see if the enum was intended to be signed or
11970 // not.
11971 bool SignedEnum = ED->getNumNegativeBits() > 0;
11972
11973 // Check for surprising sign changes when assigning enum values to a
11974 // bitfield of different signedness. If the bitfield is signed and we
11975 // have exactly the right number of bits to store this unsigned enum,
11976 // suggest changing the enum to an unsigned type. This typically happens
11977 // on Windows where unfixed enums always use an underlying type of 'int'.
11978 unsigned DiagID = 0;
11979 if (SignedEnum && !SignedBitfield) {
11980 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum;
11981 } else if (SignedBitfield && !SignedEnum &&
11982 ED->getNumPositiveBits() == FieldWidth) {
11983 DiagID = diag::warn_signed_bitfield_enum_conversion;
11984 }
11985
11986 if (DiagID) {
11987 S.Diag(InitLoc, DiagID) << Bitfield << ED;
11988 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo();
11989 SourceRange TypeRange =
11990 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange();
11991 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign)
11992 << SignedEnum << TypeRange;
11993 }
11994
11995 // Compute the required bitwidth. If the enum has negative values, we need
11996 // one more bit than the normal number of positive bits to represent the
11997 // sign bit.
11998 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1,
11999 ED->getNumNegativeBits())
12000 : ED->getNumPositiveBits();
12001
12002 // Check the bitwidth.
12003 if (BitsNeeded > FieldWidth) {
12004 Expr *WidthExpr = Bitfield->getBitWidth();
12005 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum)
12006 << Bitfield << ED;
12007 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield)
12008 << BitsNeeded << ED << WidthExpr->getSourceRange();
12009 }
12010 }
12011
12012 return false;
12013 }
12014
12015 llvm::APSInt Value = Result.Val.getInt();
12016
12017 unsigned OriginalWidth = Value.getBitWidth();
12018
12019 if (!Value.isSigned() || Value.isNegative())
12020 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
12021 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
12022 OriginalWidth = Value.getMinSignedBits();
12023
12024 if (OriginalWidth <= FieldWidth)
12025 return false;
12026
12027 // Compute the value which the bitfield will contain.
12028 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
12029 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType());
12030
12031 // Check whether the stored value is equal to the original value.
12032 TruncatedValue = TruncatedValue.extend(OriginalWidth);
12033 if (llvm::APSInt::isSameValue(Value, TruncatedValue))
12034 return false;
12035
12036 // Special-case bitfields of width 1: booleans are naturally 0/1, and
12037 // therefore don't strictly fit into a signed bitfield of width 1.
12038 if (FieldWidth == 1 && Value == 1)
12039 return false;
12040
12041 std::string PrettyValue = toString(Value, 10);
12042 std::string PrettyTrunc = toString(TruncatedValue, 10);
12043
12044 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
12045 << PrettyValue << PrettyTrunc << OriginalInit->getType()
12046 << Init->getSourceRange();
12047
12048 return true;
12049}
12050
12051/// Analyze the given simple or compound assignment for warning-worthy
12052/// operations.
12053static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
12054 // Just recurse on the LHS.
12055 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
12056
12057 // We want to recurse on the RHS as normal unless we're assigning to
12058 // a bitfield.
12059 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) {
12060 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(),
12061 E->getOperatorLoc())) {
12062 // Recurse, ignoring any implicit conversions on the RHS.
12063 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(),
12064 E->getOperatorLoc());
12065 }
12066 }
12067
12068 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
12069
12070 // Diagnose implicitly sequentially-consistent atomic assignment.
12071 if (E->getLHS()->getType()->isAtomicType())
12072 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
12073}
12074
12075/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
12076static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
12077 SourceLocation CContext, unsigned diag,
12078 bool pruneControlFlow = false) {
12079 if (pruneControlFlow) {
12080 S.DiagRuntimeBehavior(E->getExprLoc(), E,
12081 S.PDiag(diag)
12082 << SourceType << T << E->getSourceRange()
12083 << SourceRange(CContext));
12084 return;
12085 }
12086 S.Diag(E->getExprLoc(), diag)
12087 << SourceType << T << E->getSourceRange() << SourceRange(CContext);
12088}
12089
12090/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
12091static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
12092 SourceLocation CContext,
12093 unsigned diag, bool pruneControlFlow = false) {
12094 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
12095}
12096
12097static bool isObjCSignedCharBool(Sema &S, QualType Ty) {
12098 return Ty->isSpecificBuiltinType(BuiltinType::SChar) &&
12099 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty);
12100}
12101
12102static void adornObjCBoolConversionDiagWithTernaryFixit(
12103 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) {
12104 Expr *Ignored = SourceExpr->IgnoreImplicit();
12105 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored))
12106 Ignored = OVE->getSourceExpr();
12107 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) ||
12108 isa<BinaryOperator>(Ignored) ||
12109 isa<CXXOperatorCallExpr>(Ignored);
12110 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc());
12111 if (NeedsParens)
12112 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(")
12113 << FixItHint::CreateInsertion(EndLoc, ")");
12114 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO");
12115}
12116
12117/// Diagnose an implicit cast from a floating point value to an integer value.
12118static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
12119 SourceLocation CContext) {
12120 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool);
12121 const bool PruneWarnings = S.inTemplateInstantiation();
12122
12123 Expr *InnerE = E->IgnoreParenImpCasts();
12124 // We also want to warn on, e.g., "int i = -1.234"
12125 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE))
12126 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
12127 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
12128
12129 const bool IsLiteral =
12130 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE);
12131
12132 llvm::APFloat Value(0.0);
12133 bool IsConstant =
12134 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects);
12135 if (!IsConstant) {
12136 if (isObjCSignedCharBool(S, T)) {
12137 return adornObjCBoolConversionDiagWithTernaryFixit(
12138 S, E,
12139 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool)
12140 << E->getType());
12141 }
12142
12143 return DiagnoseImpCast(S, E, T, CContext,
12144 diag::warn_impcast_float_integer, PruneWarnings);
12145 }
12146
12147 bool isExact = false;
12148
12149 llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
12150 T->hasUnsignedIntegerRepresentation());
12151 llvm::APFloat::opStatus Result = Value.convertToInteger(
12152 IntegerValue, llvm::APFloat::rmTowardZero, &isExact);
12153
12154 // FIXME: Force the precision of the source value down so we don't print
12155 // digits which are usually useless (we don't really care here if we
12156 // truncate a digit by accident in edge cases). Ideally, APFloat::toString
12157 // would automatically print the shortest representation, but it's a bit
12158 // tricky to implement.
12159 SmallString<16> PrettySourceValue;
12160 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics());
12161 precision = (precision * 59 + 195) / 196;
12162 Value.toString(PrettySourceValue, precision);
12163
12164 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) {
12165 return adornObjCBoolConversionDiagWithTernaryFixit(
12166 S, E,
12167 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool)
12168 << PrettySourceValue);
12169 }
12170
12171 if (Result == llvm::APFloat::opOK && isExact) {
12172 if (IsLiteral) return;
12173 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer,
12174 PruneWarnings);
12175 }
12176
12177 // Conversion of a floating-point value to a non-bool integer where the
12178 // integral part cannot be represented by the integer type is undefined.
12179 if (!IsBool && Result == llvm::APFloat::opInvalidOp)
12180 return DiagnoseImpCast(
12181 S, E, T, CContext,
12182 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range
12183 : diag::warn_impcast_float_to_integer_out_of_range,
12184 PruneWarnings);
12185
12186 unsigned DiagID = 0;
12187 if (IsLiteral) {
12188 // Warn on floating point literal to integer.
12189 DiagID = diag::warn_impcast_literal_float_to_integer;
12190 } else if (IntegerValue == 0) {
12191 if (Value.isZero()) { // Skip -0.0 to 0 conversion.
12192 return DiagnoseImpCast(S, E, T, CContext,
12193 diag::warn_impcast_float_integer, PruneWarnings);
12194 }
12195 // Warn on non-zero to zero conversion.
12196 DiagID = diag::warn_impcast_float_to_integer_zero;
12197 } else {
12198 if (IntegerValue.isUnsigned()) {
12199 if (!IntegerValue.isMaxValue()) {
12200 return DiagnoseImpCast(S, E, T, CContext,
12201 diag::warn_impcast_float_integer, PruneWarnings);
12202 }
12203 } else { // IntegerValue.isSigned()
12204 if (!IntegerValue.isMaxSignedValue() &&
12205 !IntegerValue.isMinSignedValue()) {
12206 return DiagnoseImpCast(S, E, T, CContext,
12207 diag::warn_impcast_float_integer, PruneWarnings);
12208 }
12209 }
12210 // Warn on evaluatable floating point expression to integer conversion.
12211 DiagID = diag::warn_impcast_float_to_integer;
12212 }
12213
12214 SmallString<16> PrettyTargetValue;
12215 if (IsBool)
12216 PrettyTargetValue = Value.isZero() ? "false" : "true";
12217 else
12218 IntegerValue.toString(PrettyTargetValue);
12219
12220 if (PruneWarnings) {
12221 S.DiagRuntimeBehavior(E->getExprLoc(), E,
12222 S.PDiag(DiagID)
12223 << E->getType() << T.getUnqualifiedType()
12224 << PrettySourceValue << PrettyTargetValue
12225 << E->getSourceRange() << SourceRange(CContext));
12226 } else {
12227 S.Diag(E->getExprLoc(), DiagID)
12228 << E->getType() << T.getUnqualifiedType() << PrettySourceValue
12229 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext);
12230 }
12231}
12232
12233/// Analyze the given compound assignment for the possible losing of
12234/// floating-point precision.
12235static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
12236 assert(isa<CompoundAssignOperator>(E) &&((void)0)
12237 "Must be compound assignment operation")((void)0);
12238 // Recurse on the LHS and RHS in here
12239 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
12240 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
12241
12242 if (E->getLHS()->getType()->isAtomicType())
12243 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst);
12244
12245 // Now check the outermost expression
12246 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
12247 const auto *RBT = cast<CompoundAssignOperator>(E)
12248 ->getComputationResultType()
12249 ->getAs<BuiltinType>();
12250
12251 // The below checks assume source is floating point.
12252 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return;
12253
12254 // If source is floating point but target is an integer.
12255 if (ResultBT->isInteger())
12256 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(),
12257 E->getExprLoc(), diag::warn_impcast_float_integer);
12258
12259 if (!ResultBT->isFloatingPoint())
12260 return;
12261
12262 // If both source and target are floating points, warn about losing precision.
12263 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
12264 QualType(ResultBT, 0), QualType(RBT, 0));
12265 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
12266 // warn about dropping FP rank.
12267 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(),
12268 diag::warn_impcast_float_result_precision);
12269}
12270
12271static std::string PrettyPrintInRange(const llvm::APSInt &Value,
12272 IntRange Range) {
12273 if (!Range.Width) return "0";
12274
12275 llvm::APSInt ValueInRange = Value;
12276 ValueInRange.setIsSigned(!Range.NonNegative);
12277 ValueInRange = ValueInRange.trunc(Range.Width);
12278 return toString(ValueInRange, 10);
12279}
12280
12281static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
12282 if (!isa<ImplicitCastExpr>(Ex))
12283 return false;
12284
12285 Expr *InnerE = Ex->IgnoreParenImpCasts();
12286 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr();
12287 const Type *Source =
12288 S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
12289 if (Target->isDependentType())
12290 return false;
12291
12292 const BuiltinType *FloatCandidateBT =
12293 dyn_cast<BuiltinType>(ToBool ? Source : Target);
12294 const Type *BoolCandidateType = ToBool ? Target : Source;
12295
12296 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) &&
12297 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
12298}
12299
12300static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
12301 SourceLocation CC) {
12302 unsigned NumArgs = TheCall->getNumArgs();
12303 for (unsigned i = 0; i < NumArgs; ++i) {
12304 Expr *CurrA = TheCall->getArg(i);
12305 if (!IsImplicitBoolFloatConversion(S, CurrA, true))
12306 continue;
12307
12308 bool IsSwapped = ((i > 0) &&
12309 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false));
12310 IsSwapped |= ((i < (NumArgs - 1)) &&
12311 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false));
12312 if (IsSwapped) {
12313 // Warn on this floating-point to bool conversion.
12314 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
12315 CurrA->getType(), CC,
12316 diag::warn_impcast_floating_point_to_bool);
12317 }
12318 }
12319}
12320
12321static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
12322 SourceLocation CC) {
12323 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
12324 E->getExprLoc()))
12325 return;
12326
12327 // Don't warn on functions which have return type nullptr_t.
12328 if (isa<CallExpr>(E))
12329 return;
12330
12331 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
12332 const Expr::NullPointerConstantKind NullKind =
12333 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull);
12334 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr)
12335 return;
12336
12337 // Return if target type is a safe conversion.
12338 if (T->isAnyPointerType() || T->isBlockPointerType() ||
12339 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
12340 return;
12341
12342 SourceLocation Loc = E->getSourceRange().getBegin();
12343
12344 // Venture through the macro stacks to get to the source of macro arguments.
12345 // The new location is a better location than the complete location that was
12346 // passed in.
12347 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc);
12348 CC = S.SourceMgr.getTopMacroCallerLoc(CC);
12349
12350 // __null is usually wrapped in a macro. Go up a macro if that is the case.
12351 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) {
12352 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
12353 Loc, S.SourceMgr, S.getLangOpts());
12354 if (MacroName == "NULL")
12355 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin();
12356 }
12357
12358 // Only warn if the null and context location are in the same macro expansion.
12359 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC))
12360 return;
12361
12362 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
12363 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC)
12364 << FixItHint::CreateReplacement(Loc,
12365 S.getFixItZeroLiteralForType(T, Loc));
12366}
12367
12368static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
12369 ObjCArrayLiteral *ArrayLiteral);
12370
12371static void
12372checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
12373 ObjCDictionaryLiteral *DictionaryLiteral);
12374
12375/// Check a single element within a collection literal against the
12376/// target element type.
12377static void checkObjCCollectionLiteralElement(Sema &S,
12378 QualType TargetElementType,
12379 Expr *Element,
12380 unsigned ElementKind) {
12381 // Skip a bitcast to 'id' or qualified 'id'.
12382 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) {
12383 if (ICE->getCastKind() == CK_BitCast &&
12384 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
12385 Element = ICE->getSubExpr();
12386 }
12387
12388 QualType ElementType = Element->getType();
12389 ExprResult ElementResult(Element);
12390 if (ElementType->getAs<ObjCObjectPointerType>() &&
12391 S.CheckSingleAssignmentConstraints(TargetElementType,
12392 ElementResult,
12393 false, false)
12394 != Sema::Compatible) {
12395 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
12396 << ElementType << ElementKind << TargetElementType
12397 << Element->getSourceRange();
12398 }
12399
12400 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
12401 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral);
12402 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element))
12403 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral);
12404}
12405
12406/// Check an Objective-C array literal being converted to the given
12407/// target type.
12408static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
12409 ObjCArrayLiteral *ArrayLiteral) {
12410 if (!S.NSArrayDecl)
12411 return;
12412
12413 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
12414 if (!TargetObjCPtr)
12415 return;
12416
12417 if (TargetObjCPtr->isUnspecialized() ||
12418 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
12419 != S.NSArrayDecl->getCanonicalDecl())
12420 return;
12421
12422 auto TypeArgs = TargetObjCPtr->getTypeArgs();
12423 if (TypeArgs.size() != 1)
12424 return;
12425
12426 QualType TargetElementType = TypeArgs[0];
12427 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
12428 checkObjCCollectionLiteralElement(S, TargetElementType,
12429 ArrayLiteral->getElement(I),
12430 0);
12431 }
12432}
12433
12434/// Check an Objective-C dictionary literal being converted to the given
12435/// target type.
12436static void
12437checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
12438 ObjCDictionaryLiteral *DictionaryLiteral) {
12439 if (!S.NSDictionaryDecl)
12440 return;
12441
12442 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
12443 if (!TargetObjCPtr)
12444 return;
12445
12446 if (TargetObjCPtr->isUnspecialized() ||
12447 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
12448 != S.NSDictionaryDecl->getCanonicalDecl())
12449 return;
12450
12451 auto TypeArgs = TargetObjCPtr->getTypeArgs();
12452 if (TypeArgs.size() != 2)
12453 return;
12454
12455 QualType TargetKeyType = TypeArgs[0];
12456 QualType TargetObjectType = TypeArgs[1];
12457 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
12458 auto Element = DictionaryLiteral->getKeyValueElement(I);
12459 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1);
12460 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2);
12461 }
12462}
12463
12464// Helper function to filter out cases for constant width constant conversion.
12465// Don't warn on char array initialization or for non-decimal values.
12466static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
12467 SourceLocation CC) {
12468 // If initializing from a constant, and the constant starts with '0',
12469 // then it is a binary, octal, or hexadecimal. Allow these constants
12470 // to fill all the bits, even if there is a sign change.
12471 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) {
12472 const char FirstLiteralCharacter =
12473 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0];
12474 if (FirstLiteralCharacter == '0')
12475 return false;
12476 }
12477
12478 // If the CC location points to a '{', and the type is char, then assume
12479 // assume it is an array initialization.
12480 if (CC.isValid() && T->isCharType()) {
12481 const char FirstContextCharacter =
12482 S.getSourceManager().getCharacterData(CC)[0];
12483 if (FirstContextCharacter == '{')
12484 return false;
12485 }
12486
12487 return true;
12488}
12489
12490static const IntegerLiteral *getIntegerLiteral(Expr *E) {
12491 const auto *IL = dyn_cast<IntegerLiteral>(E);
12492 if (!IL) {
12493 if (auto *UO = dyn_cast<UnaryOperator>(E)) {
12494 if (UO->getOpcode() == UO_Minus)
12495 return dyn_cast<IntegerLiteral>(UO->getSubExpr());
12496 }
12497 }
12498
12499 return IL;
12500}
12501
12502static void DiagnoseIntInBoolContext(Sema &S, Expr *E) {
12503 E = E->IgnoreParenImpCasts();
12504 SourceLocation ExprLoc = E->getExprLoc();
12505
12506 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
12507 BinaryOperator::Opcode Opc = BO->getOpcode();
12508 Expr::EvalResult Result;
12509 // Do not diagnose unsigned shifts.
12510 if (Opc == BO_Shl) {
12511 const auto *LHS = getIntegerLiteral(BO->getLHS());
12512 const auto *RHS = getIntegerLiteral(BO->getRHS());
12513 if (LHS && LHS->getValue() == 0)
12514 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0;
12515 else if (!E->isValueDependent() && LHS && RHS &&
12516 RHS->getValue().isNonNegative() &&
12517 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects))
12518 S.Diag(ExprLoc, diag::warn_left_shift_always)
12519 << (Result.Val.getInt() != 0);
12520 else if (E->getType()->isSignedIntegerType())
12521 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E;
12522 }
12523 }
12524
12525 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
12526 const auto *LHS = getIntegerLiteral(CO->getTrueExpr());
12527 const auto *RHS = getIntegerLiteral(CO->getFalseExpr());
12528 if (!LHS || !RHS)
12529 return;
12530 if ((LHS->getValue() == 0 || LHS->getValue() == 1) &&
12531 (RHS->getValue() == 0 || RHS->getValue() == 1))
12532 // Do not diagnose common idioms.
12533 return;
12534 if (LHS->getValue() != 0 && RHS->getValue() != 0)
12535 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true);
12536 }
12537}
12538
12539static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
12540 SourceLocation CC,
12541 bool *ICContext = nullptr,
12542 bool IsListInit = false) {
12543 if (E->isTypeDependent() || E->isValueDependent()) return;
12544
12545 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
12546 const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
12547 if (Source == Target) return;
12548 if (Target->isDependentType()) return;
12549
12550 // If the conversion context location is invalid don't complain. We also
12551 // don't want to emit a warning if the issue occurs from the expansion of
12552 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
12553 // delay this check as long as possible. Once we detect we are in that
12554 // scenario, we just return.
12555 if (CC.isInvalid())
12556 return;
12557
12558 if (Source->isAtomicType())
12559 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
12560
12561 // Diagnose implicit casts to bool.
12562 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
12563 if (isa<StringLiteral>(E))
12564 // Warn on string literal to bool. Checks for string literals in logical
12565 // and expressions, for instance, assert(0 && "error here"), are
12566 // prevented by a check in AnalyzeImplicitConversions().
12567 return DiagnoseImpCast(S, E, T, CC,
12568 diag::warn_impcast_string_literal_to_bool);
12569 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) ||
12570 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) {
12571 // This covers the literal expressions that evaluate to Objective-C
12572 // objects.
12573 return DiagnoseImpCast(S, E, T, CC,
12574 diag::warn_impcast_objective_c_literal_to_bool);
12575 }
12576 if (Source->isPointerType() || Source->canDecayToPointerType()) {
12577 // Warn on pointer to bool conversion that is always true.
12578 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
12579 SourceRange(CC));
12580 }
12581 }
12582
12583 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL
12584 // is a typedef for signed char (macOS), then that constant value has to be 1
12585 // or 0.
12586 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) {
12587 Expr::EvalResult Result;
12588 if (E->EvaluateAsInt(Result, S.getASTContext(),
12589 Expr::SE_AllowSideEffects)) {
12590 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) {
12591 adornObjCBoolConversionDiagWithTernaryFixit(
12592 S, E,
12593 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool)
12594 << toString(Result.Val.getInt(), 10));
12595 }
12596 return;
12597 }
12598 }
12599
12600 // Check implicit casts from Objective-C collection literals to specialized
12601 // collection types, e.g., NSArray<NSString *> *.
12602 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E))
12603 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral);
12604 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E))
12605 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral);
12606
12607 // Strip vector types.
12608 if (isa<VectorType>(Source)) {
12609 if (Target->isVLSTBuiltinType() &&
12610 (S.Context.areCompatibleSveTypes(QualType(Target, 0),
12611 QualType(Source, 0)) ||
12612 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0),
12613 QualType(Source, 0))))
12614 return;
12615
12616 if (!isa<VectorType>(Target)) {
12617 if (S.SourceMgr.isInSystemMacro(CC))
12618 return;
12619 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
12620 }
12621
12622 // If the vector cast is cast between two vectors of the same size, it is
12623 // a bitcast, not a conversion.
12624 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
12625 return;
12626
12627 Source = cast<VectorType>(Source)->getElementType().getTypePtr();
12628 Target = cast<VectorType>(Target)->getElementType().getTypePtr();
12629 }
12630 if (auto VecTy = dyn_cast<VectorType>(Target))
12631 Target = VecTy->getElementType().getTypePtr();
12632
12633 // Strip complex types.
12634 if (isa<ComplexType>(Source)) {
12635 if (!isa<ComplexType>(Target)) {
12636 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType())
12637 return;
12638
12639 return DiagnoseImpCast(S, E, T, CC,
12640 S.getLangOpts().CPlusPlus
12641 ? diag::err_impcast_complex_scalar
12642 : diag::warn_impcast_complex_scalar);
12643 }
12644
12645 Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
12646 Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
12647 }
12648
12649 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
12650 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
12651
12652 // If the source is floating point...
12653 if (SourceBT && SourceBT->isFloatingPoint()) {
12654 // ...and the target is floating point...
12655 if (TargetBT && TargetBT->isFloatingPoint()) {
12656 // ...then warn if we're dropping FP rank.
12657
12658 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
12659 QualType(SourceBT, 0), QualType(TargetBT, 0));
12660 if (Order > 0) {
12661 // Don't warn about float constants that are precisely
12662 // representable in the target type.
12663 Expr::EvalResult result;
12664 if (E->EvaluateAsRValue(result, S.Context)) {
12665 // Value might be a float, a float vector, or a float complex.
12666 if (IsSameFloatAfterCast(result.Val,
12667 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
12668 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
12669 return;
12670 }
12671
12672 if (S.SourceMgr.isInSystemMacro(CC))
12673 return;
12674
12675 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
12676 }
12677 // ... or possibly if we're increasing rank, too
12678 else if (Order < 0) {
12679 if (S.SourceMgr.isInSystemMacro(CC))
12680 return;
12681
12682 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
12683 }
12684 return;
12685 }
12686
12687 // If the target is integral, always warn.
12688 if (TargetBT && TargetBT->isInteger()) {
12689 if (S.SourceMgr.isInSystemMacro(CC))
12690 return;
12691
12692 DiagnoseFloatingImpCast(S, E, T, CC);
12693 }
12694
12695 // Detect the case where a call result is converted from floating-point to
12696 // to bool, and the final argument to the call is converted from bool, to
12697 // discover this typo:
12698 //
12699 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
12700 //
12701 // FIXME: This is an incredibly special case; is there some more general
12702 // way to detect this class of misplaced-parentheses bug?
12703 if (Target->isBooleanType() && isa<CallExpr>(E)) {
12704 // Check last argument of function call to see if it is an
12705 // implicit cast from a type matching the type the result
12706 // is being cast to.
12707 CallExpr *CEx = cast<CallExpr>(E);
12708 if (unsigned NumArgs = CEx->getNumArgs()) {
12709 Expr *LastA = CEx->getArg(NumArgs - 1);
12710 Expr *InnerE = LastA->IgnoreParenImpCasts();
12711 if (isa<ImplicitCastExpr>(LastA) &&
12712 InnerE->getType()->isBooleanType()) {
12713 // Warn on this floating-point to bool conversion
12714 DiagnoseImpCast(S, E, T, CC,
12715 diag::warn_impcast_floating_point_to_bool);
12716 }
12717 }
12718 }
12719 return;
12720 }
12721
12722 // Valid casts involving fixed point types should be accounted for here.
12723 if (Source->isFixedPointType()) {
12724 if (Target->isUnsaturatedFixedPointType()) {
12725 Expr::EvalResult Result;
12726 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects,
12727 S.isConstantEvaluated())) {
12728 llvm::APFixedPoint Value = Result.Val.getFixedPoint();
12729 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
12730 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T);
12731 if (Value > MaxVal || Value < MinVal) {
12732 S.DiagRuntimeBehavior(E->getExprLoc(), E,
12733 S.PDiag(diag::warn_impcast_fixed_point_range)
12734 << Value.toString() << T
12735 << E->getSourceRange()
12736 << clang::SourceRange(CC));
12737 return;
12738 }
12739 }
12740 } else if (Target->isIntegerType()) {
12741 Expr::EvalResult Result;
12742 if (!S.isConstantEvaluated() &&
12743 E->EvaluateAsFixedPoint(Result, S.Context,
12744 Expr::SE_AllowSideEffects)) {
12745 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint();
12746
12747 bool Overflowed;
12748 llvm::APSInt IntResult = FXResult.convertToInt(
12749 S.Context.getIntWidth(T),
12750 Target->isSignedIntegerOrEnumerationType(), &Overflowed);
12751
12752 if (Overflowed) {
12753 S.DiagRuntimeBehavior(E->getExprLoc(), E,
12754 S.PDiag(diag::warn_impcast_fixed_point_range)
12755 << FXResult.toString() << T
12756 << E->getSourceRange()
12757 << clang::SourceRange(CC));
12758 return;
12759 }
12760 }
12761 }
12762 } else if (Target->isUnsaturatedFixedPointType()) {
12763 if (Source->isIntegerType()) {
12764 Expr::EvalResult Result;
12765 if (!S.isConstantEvaluated() &&
12766 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
12767 llvm::APSInt Value = Result.Val.getInt();
12768
12769 bool Overflowed;
12770 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue(
12771 Value, S.Context.getFixedPointSemantics(T), &Overflowed);
12772
12773 if (Overflowed) {
12774 S.DiagRuntimeBehavior(E->getExprLoc(), E,
12775 S.PDiag(diag::warn_impcast_fixed_point_range)
12776 << toString(Value, /*Radix=*/10) << T
12777 << E->getSourceRange()
12778 << clang::SourceRange(CC));
12779 return;
12780 }
12781 }
12782 }
12783 }
12784
12785 // If we are casting an integer type to a floating point type without
12786 // initialization-list syntax, we might lose accuracy if the floating
12787 // point type has a narrower significand than the integer type.
12788 if (SourceBT && TargetBT && SourceBT->isIntegerType() &&
12789 TargetBT->isFloatingType() && !IsListInit) {
12790 // Determine the number of precision bits in the source integer type.
12791 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(),
12792 /*Approximate*/ true);
12793 unsigned int SourcePrecision = SourceRange.Width;
12794
12795 // Determine the number of precision bits in the
12796 // target floating point type.
12797 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision(
12798 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
12799
12800 if (SourcePrecision > 0 && TargetPrecision > 0 &&
12801 SourcePrecision > TargetPrecision) {
12802
12803 if (Optional<llvm::APSInt> SourceInt =
12804 E->getIntegerConstantExpr(S.Context)) {
12805 // If the source integer is a constant, convert it to the target
12806 // floating point type. Issue a warning if the value changes
12807 // during the whole conversion.
12808 llvm::APFloat TargetFloatValue(
12809 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
12810 llvm::APFloat::opStatus ConversionStatus =
12811 TargetFloatValue.convertFromAPInt(
12812 *SourceInt, SourceBT->isSignedInteger(),
12813 llvm::APFloat::rmNearestTiesToEven);
12814
12815 if (ConversionStatus != llvm::APFloat::opOK) {
12816 SmallString<32> PrettySourceValue;
12817 SourceInt->toString(PrettySourceValue, 10);
12818 SmallString<32> PrettyTargetValue;
12819 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision);
12820
12821 S.DiagRuntimeBehavior(
12822 E->getExprLoc(), E,
12823 S.PDiag(diag::warn_impcast_integer_float_precision_constant)
12824 << PrettySourceValue << PrettyTargetValue << E->getType() << T
12825 << E->getSourceRange() << clang::SourceRange(CC));
12826 }
12827 } else {
12828 // Otherwise, the implicit conversion may lose precision.
12829 DiagnoseImpCast(S, E, T, CC,
12830 diag::warn_impcast_integer_float_precision);
12831 }
12832 }
12833 }
12834
12835 DiagnoseNullConversion(S, E, T, CC);
12836
12837 S.DiscardMisalignedMemberAddress(Target, E);
12838
12839 if (Target->isBooleanType())
12840 DiagnoseIntInBoolContext(S, E);
12841
12842 if (!Source->isIntegerType() || !Target->isIntegerType())
12843 return;
12844
12845 // TODO: remove this early return once the false positives for constant->bool
12846 // in templates, macros, etc, are reduced or removed.
12847 if (Target->isSpecificBuiltinType(BuiltinType::Bool))
12848 return;
12849
12850 if (isObjCSignedCharBool(S, T) && !Source->isCharType() &&
12851 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) {
12852 return adornObjCBoolConversionDiagWithTernaryFixit(
12853 S, E,
12854 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool)
12855 << E->getType());
12856 }
12857
12858 IntRange SourceTypeRange =
12859 IntRange::forTargetOfCanonicalType(S.Context, Source);
12860 IntRange LikelySourceRange =
12861 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true);
12862 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
12863
12864 if (LikelySourceRange.Width > TargetRange.Width) {
12865 // If the source is a constant, use a default-on diagnostic.
12866 // TODO: this should happen for bitfield stores, too.
12867 Expr::EvalResult Result;
12868 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects,
12869 S.isConstantEvaluated())) {
12870 llvm::APSInt Value(32);
12871 Value = Result.Val.getInt();
12872
12873 if (S.SourceMgr.isInSystemMacro(CC))
12874 return;
12875
12876 std::string PrettySourceValue = toString(Value, 10);
12877 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
12878
12879 S.DiagRuntimeBehavior(
12880 E->getExprLoc(), E,
12881 S.PDiag(diag::warn_impcast_integer_precision_constant)
12882 << PrettySourceValue << PrettyTargetValue << E->getType() << T
12883 << E->getSourceRange() << SourceRange(CC));
12884 return;
12885 }
12886
12887 // People want to build with -Wshorten-64-to-32 and not -Wconversion.
12888 if (S.SourceMgr.isInSystemMacro(CC))
12889 return;
12890
12891 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
12892 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
12893 /* pruneControlFlow */ true);
12894 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
12895 }
12896
12897 if (TargetRange.Width > SourceTypeRange.Width) {
12898 if (auto *UO = dyn_cast<UnaryOperator>(E))
12899 if (UO->getOpcode() == UO_Minus)
12900 if (Source->isUnsignedIntegerType()) {
12901 if (Target->isUnsignedIntegerType())
12902 return DiagnoseImpCast(S, E, T, CC,
12903 diag::warn_impcast_high_order_zero_bits);
12904 if (Target->isSignedIntegerType())
12905 return DiagnoseImpCast(S, E, T, CC,
12906 diag::warn_impcast_nonnegative_result);
12907 }
12908 }
12909
12910 if (TargetRange.Width == LikelySourceRange.Width &&
12911 !TargetRange.NonNegative && LikelySourceRange.NonNegative &&
12912 Source->isSignedIntegerType()) {
12913 // Warn when doing a signed to signed conversion, warn if the positive
12914 // source value is exactly the width of the target type, which will
12915 // cause a negative value to be stored.
12916
12917 Expr::EvalResult Result;
12918 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) &&
12919 !S.SourceMgr.isInSystemMacro(CC)) {
12920 llvm::APSInt Value = Result.Val.getInt();
12921 if (isSameWidthConstantConversion(S, E, T, CC)) {
12922 std::string PrettySourceValue = toString(Value, 10);
12923 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
12924
12925 S.DiagRuntimeBehavior(
12926 E->getExprLoc(), E,
12927 S.PDiag(diag::warn_impcast_integer_precision_constant)
12928 << PrettySourceValue << PrettyTargetValue << E->getType() << T
12929 << E->getSourceRange() << SourceRange(CC));
12930 return;
12931 }
12932 }
12933
12934 // Fall through for non-constants to give a sign conversion warning.
12935 }
12936
12937 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
12938 (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
12939 LikelySourceRange.Width == TargetRange.Width)) {
12940 if (S.SourceMgr.isInSystemMacro(CC))
12941 return;
12942
12943 unsigned DiagID = diag::warn_impcast_integer_sign;
12944
12945 // Traditionally, gcc has warned about this under -Wsign-compare.
12946 // We also want to warn about it in -Wconversion.
12947 // So if -Wconversion is off, use a completely identical diagnostic
12948 // in the sign-compare group.
12949 // The conditional-checking code will
12950 if (ICContext) {
12951 DiagID = diag::warn_impcast_integer_sign_conditional;
12952 *ICContext = true;
12953 }
12954
12955 return DiagnoseImpCast(S, E, T, CC, DiagID);
12956 }
12957
12958 // Diagnose conversions between different enumeration types.
12959 // In C, we pretend that the type of an EnumConstantDecl is its enumeration
12960 // type, to give us better diagnostics.
12961 QualType SourceType = E->getType();
12962 if (!S.getLangOpts().CPlusPlus) {
12963 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
12964 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
12965 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
12966 SourceType = S.Context.getTypeDeclType(Enum);
12967 Source = S.Context.getCanonicalType(SourceType).getTypePtr();
12968 }
12969 }
12970
12971 if (const EnumType *SourceEnum = Source->getAs<EnumType>())
12972 if (const EnumType *TargetEnum = Target->getAs<EnumType>())
12973 if (SourceEnum->getDecl()->hasNameForLinkage() &&
12974 TargetEnum->getDecl()->hasNameForLinkage() &&
12975 SourceEnum != TargetEnum) {
12976 if (S.SourceMgr.isInSystemMacro(CC))
12977 return;
12978
12979 return DiagnoseImpCast(S, E, SourceType, T, CC,
12980 diag::warn_impcast_different_enum_types);
12981 }
12982}
12983
12984static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
12985 SourceLocation CC, QualType T);
12986
12987static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
12988 SourceLocation CC, bool &ICContext) {
12989 E = E->IgnoreParenImpCasts();
12990
12991 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E))
12992 return CheckConditionalOperator(S, CO, CC, T);
12993
12994 AnalyzeImplicitConversions(S, E, CC);
12995 if (E->getType() != T)
12996 return CheckImplicitConversion(S, E, T, CC, &ICContext);
12997}
12998
12999static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
13000 SourceLocation CC, QualType T) {
13001 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
13002
13003 Expr *TrueExpr = E->getTrueExpr();
13004 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E))
13005 TrueExpr = BCO->getCommon();
13006
13007 bool Suspicious = false;
13008 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious);
13009 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
13010
13011 if (T->isBooleanType())
13012 DiagnoseIntInBoolContext(S, E);
13013
13014 // If -Wconversion would have warned about either of the candidates
13015 // for a signedness conversion to the context type...
13016 if (!Suspicious) return;
13017
13018 // ...but it's currently ignored...
13019 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC))
13020 return;
13021
13022 // ...then check whether it would have warned about either of the
13023 // candidates for a signedness conversion to the condition type.
13024 if (E->getType() == T) return;
13025
13026 Suspicious = false;
13027 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(),
13028 E->getType(), CC, &Suspicious);
13029 if (!Suspicious)
13030 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
13031 E->getType(), CC, &Suspicious);
13032}
13033
13034/// Check conversion of given expression to boolean.
13035/// Input argument E is a logical expression.
13036static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
13037 if (S.getLangOpts().Bool)
13038 return;
13039 if (E->IgnoreParenImpCasts()->getType()->isAtomicType())
13040 return;
13041 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
13042}
13043
13044namespace {
13045struct AnalyzeImplicitConversionsWorkItem {
13046 Expr *E;
13047 SourceLocation CC;
13048 bool IsListInit;
13049};
13050}
13051
13052/// Data recursive variant of AnalyzeImplicitConversions. Subexpressions
13053/// that should be visited are added to WorkList.
13054static void AnalyzeImplicitConversions(
13055 Sema &S, AnalyzeImplicitConversionsWorkItem Item,
13056 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) {
13057 Expr *OrigE = Item.E;
13058 SourceLocation CC = Item.CC;
13059
13060 QualType T = OrigE->getType();
13061 Expr *E = OrigE->IgnoreParenImpCasts();
13062
13063 // Propagate whether we are in a C++ list initialization expression.
13064 // If so, we do not issue warnings for implicit int-float conversion
13065 // precision loss, because C++11 narrowing already handles it.
13066 bool IsListInit = Item.IsListInit ||
13067 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus);
13068
13069 if (E->isTypeDependent() || E->isValueDependent())
13070 return;
13071
13072 Expr *SourceExpr = E;
13073 // Examine, but don't traverse into the source expression of an
13074 // OpaqueValueExpr, since it may have multiple parents and we don't want to
13075 // emit duplicate diagnostics. Its fine to examine the form or attempt to
13076 // evaluate it in the context of checking the specific conversion to T though.
13077 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E))
13078 if (auto *Src = OVE->getSourceExpr())
13079 SourceExpr = Src;
13080
13081 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr))
13082 if (UO->getOpcode() == UO_Not &&
13083 UO->getSubExpr()->isKnownToHaveBooleanValue())
13084 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool)
13085 << OrigE->getSourceRange() << T->isBooleanType()
13086 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!");
13087
13088 // For conditional operators, we analyze the arguments as if they
13089 // were being fed directly into the output.
13090 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) {
13091 CheckConditionalOperator(S, CO, CC, T);
13092 return;
13093 }
13094
13095 // Check implicit argument conversions for function calls.
13096 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr))
13097 CheckImplicitArgumentConversions(S, Call, CC);
13098
13099 // Go ahead and check any implicit conversions we might have skipped.
13100 // The non-canonical typecheck is just an optimization;
13101 // CheckImplicitConversion will filter out dead implicit conversions.
13102 if (SourceExpr->getType() != T)
13103 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit);
13104
13105 // Now continue drilling into this expression.
13106
13107 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
13108 // The bound subexpressions in a PseudoObjectExpr are not reachable
13109 // as transitive children.
13110 // FIXME: Use a more uniform representation for this.
13111 for (auto *SE : POE->semantics())
13112 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
13113 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit});
13114 }
13115
13116 // Skip past explicit casts.
13117 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) {
13118 E = CE->getSubExpr()->IgnoreParenImpCasts();
13119 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
13120 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
13121 WorkList.push_back({E, CC, IsListInit});
13122 return;
13123 }
13124
13125 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
13126 // Do a somewhat different check with comparison operators.
13127 if (BO->isComparisonOp())
13128 return AnalyzeComparison(S, BO);
13129
13130 // And with simple assignments.
13131 if (BO->getOpcode() == BO_Assign)
13132 return AnalyzeAssignment(S, BO);
13133 // And with compound assignments.
13134 if (BO->isAssignmentOp())
13135 return AnalyzeCompoundAssignment(S, BO);
13136 }
13137
13138 // These break the otherwise-useful invariant below. Fortunately,
13139 // we don't really need to recurse into them, because any internal
13140 // expressions should have been analyzed already when they were
13141 // built into statements.
13142 if (isa<StmtExpr>(E)) return;
13143
13144 // Don't descend into unevaluated contexts.
13145 if (isa<UnaryExprOrTypeTraitExpr>(E)) return;
13146
13147 // Now just recurse over the expression's children.
13148 CC = E->getExprLoc();
13149 BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
13150 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
13151 for (Stmt *SubStmt : E->children()) {
13152 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt);
13153 if (!ChildExpr)
13154 continue;
13155
13156 if (IsLogicalAndOperator &&
13157 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
13158 // Ignore checking string literals that are in logical and operators.
13159 // This is a common pattern for asserts.
13160 continue;
13161 WorkList.push_back({ChildExpr, CC, IsListInit});
13162 }
13163
13164 if (BO && BO->isLogicalOp()) {
13165 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
13166 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
13167 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
13168
13169 SubExpr = BO->getRHS()->IgnoreParenImpCasts();
13170 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
13171 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
13172 }
13173
13174 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) {
13175 if (U->getOpcode() == UO_LNot) {
13176 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
13177 } else if (U->getOpcode() != UO_AddrOf) {
13178 if (U->getSubExpr()->getType()->isAtomicType())
13179 S.Diag(U->getSubExpr()->getBeginLoc(),
13180 diag::warn_atomic_implicit_seq_cst);
13181 }
13182 }
13183}
13184
13185/// AnalyzeImplicitConversions - Find and report any interesting
13186/// implicit conversions in the given expression. There are a couple
13187/// of competing diagnostics here, -Wconversion and -Wsign-compare.
13188static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
13189 bool IsListInit/*= false*/) {
13190 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList;
13191 WorkList.push_back({OrigE, CC, IsListInit});
13192 while (!WorkList.empty())
13193 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList);
13194}
13195
13196/// Diagnose integer type and any valid implicit conversion to it.
13197static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
13198 // Taking into account implicit conversions,
13199 // allow any integer.
13200 if (!E->getType()->isIntegerType()) {
13201 S.Diag(E->getBeginLoc(),
13202 diag::err_opencl_enqueue_kernel_invalid_local_size_type);
13203 return true;
13204 }
13205 // Potentially emit standard warnings for implicit conversions if enabled
13206 // using -Wconversion.
13207 CheckImplicitConversion(S, E, IntT, E->getBeginLoc());
13208 return false;
13209}
13210
13211// Helper function for Sema::DiagnoseAlwaysNonNullPointer.
13212// Returns true when emitting a warning about taking the address of a reference.
13213static bool CheckForReference(Sema &SemaRef, const Expr *E,
13214 const PartialDiagnostic &PD) {
13215 E = E->IgnoreParenImpCasts();
13216
13217 const FunctionDecl *FD = nullptr;
13218
13219 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
13220 if (!DRE->getDecl()->getType()->isReferenceType())
13221 return false;
13222 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) {
13223 if (!M->getMemberDecl()->getType()->isReferenceType())
13224 return false;
13225 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) {
13226 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType())
13227 return false;
13228 FD = Call->getDirectCallee();
13229 } else {
13230 return false;
13231 }
13232
13233 SemaRef.Diag(E->getExprLoc(), PD);
13234
13235 // If possible, point to location of function.
13236 if (FD) {
13237 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD;
13238 }
13239
13240 return true;
13241}
13242
13243// Returns true if the SourceLocation is expanded from any macro body.
13244// Returns false if the SourceLocation is invalid, is from not in a macro
13245// expansion, or is from expanded from a top-level macro argument.
13246static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
13247 if (Loc.isInvalid())
13248 return false;
13249
13250 while (Loc.isMacroID()) {
13251 if (SM.isMacroBodyExpansion(Loc))
13252 return true;
13253 Loc = SM.getImmediateMacroCallerLoc(Loc);
13254 }
13255
13256 return false;
13257}
13258
13259/// Diagnose pointers that are always non-null.
13260/// \param E the expression containing the pointer
13261/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
13262/// compared to a null pointer
13263/// \param IsEqual True when the comparison is equal to a null pointer
13264/// \param Range Extra SourceRange to highlight in the diagnostic
13265void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
13266 Expr::NullPointerConstantKind NullKind,
13267 bool IsEqual, SourceRange Range) {
13268 if (!E)
13269 return;
13270
13271 // Don't warn inside macros.
13272 if (E->getExprLoc().isMacroID()) {
13273 const SourceManager &SM = getSourceManager();
13274 if (IsInAnyMacroBody(SM, E->getExprLoc()) ||
13275 IsInAnyMacroBody(SM, Range.getBegin()))
13276 return;
13277 }
13278 E = E->IgnoreImpCasts();
13279
13280 const bool IsCompare = NullKind != Expr::NPCK_NotNull;
13281
13282 if (isa<CXXThisExpr>(E)) {
13283 unsigned DiagID = IsCompare ? diag::warn_this_null_compare
13284 : diag::warn_this_bool_conversion;
13285 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual;
13286 return;
13287 }
13288
13289 bool IsAddressOf = false;
13290
13291 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
13292 if (UO->getOpcode() != UO_AddrOf)
13293 return;
13294 IsAddressOf = true;
13295 E = UO->getSubExpr();
13296 }
13297
13298 if (IsAddressOf) {
13299 unsigned DiagID = IsCompare
13300 ? diag::warn_address_of_reference_null_compare
13301 : diag::warn_address_of_reference_bool_conversion;
13302 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range
13303 << IsEqual;
13304 if (CheckForReference(*this, E, PD)) {
13305 return;
13306 }
13307 }
13308
13309 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) {
13310 bool IsParam = isa<NonNullAttr>(NonnullAttr);
13311 std::string Str;
13312 llvm::raw_string_ostream S(Str);
13313 E->printPretty(S, nullptr, getPrintingPolicy());
13314 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
13315 : diag::warn_cast_nonnull_to_bool;
13316 Diag(E->getExprLoc(), DiagID) << IsParam << S.str()
13317 << E->getSourceRange() << Range << IsEqual;
13318 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam;
13319 };
13320
13321 // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
13322 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) {
13323 if (auto *Callee = Call->getDirectCallee()) {
13324 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) {
13325 ComplainAboutNonnullParamOrCall(A);
13326 return;
13327 }
13328 }
13329 }
13330
13331 // Expect to find a single Decl. Skip anything more complicated.
13332 ValueDecl *D = nullptr;
13333 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
13334 D = R->getDecl();
13335 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
13336 D = M->getMemberDecl();
13337 }
13338
13339 // Weak Decls can be null.
13340 if (!D || D->isWeak())
13341 return;
13342
13343 // Check for parameter decl with nonnull attribute
13344 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) {
13345 if (getCurFunction() &&
13346 !getCurFunction()->ModifiedNonNullParams.count(PV)) {
13347 if (const Attr *A = PV->getAttr<NonNullAttr>()) {
13348 ComplainAboutNonnullParamOrCall(A);
13349 return;
13350 }
13351
13352 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
13353 // Skip function template not specialized yet.
13354 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
13355 return;
13356 auto ParamIter = llvm::find(FD->parameters(), PV);
13357 assert(ParamIter != FD->param_end())((void)0);
13358 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
13359
13360 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
13361 if (!NonNull->args_size()) {
13362 ComplainAboutNonnullParamOrCall(NonNull);
13363 return;
13364 }
13365
13366 for (const ParamIdx &ArgNo : NonNull->args()) {
13367 if (ArgNo.getASTIndex() == ParamNo) {
13368 ComplainAboutNonnullParamOrCall(NonNull);
13369 return;
13370 }
13371 }
13372 }
13373 }
13374 }
13375 }
13376
13377 QualType T = D->getType();
13378 const bool IsArray = T->isArrayType();
13379 const bool IsFunction = T->isFunctionType();
13380
13381 // Address of function is used to silence the function warning.
13382 if (IsAddressOf && IsFunction) {
13383 return;
13384 }
13385
13386 // Found nothing.
13387 if (!IsAddressOf && !IsFunction && !IsArray)
13388 return;
13389
13390 // Pretty print the expression for the diagnostic.
13391 std::string Str;
13392 llvm::raw_string_ostream S(Str);
13393 E->printPretty(S, nullptr, getPrintingPolicy());
13394
13395 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
13396 : diag::warn_impcast_pointer_to_bool;
13397 enum {
13398 AddressOf,
13399 FunctionPointer,
13400 ArrayPointer
13401 } DiagType;
13402 if (IsAddressOf)
13403 DiagType = AddressOf;
13404 else if (IsFunction)
13405 DiagType = FunctionPointer;
13406 else if (IsArray)
13407 DiagType = ArrayPointer;
13408 else
13409 llvm_unreachable("Could not determine diagnostic.")__builtin_unreachable();
13410 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
13411 << Range << IsEqual;
13412
13413 if (!IsFunction)
13414 return;
13415
13416 // Suggest '&' to silence the function warning.
13417 Diag(E->getExprLoc(), diag::note_function_warning_silence)
13418 << FixItHint::CreateInsertion(E->getBeginLoc(), "&");
13419
13420 // Check to see if '()' fixit should be emitted.
13421 QualType ReturnType;
13422 UnresolvedSet<4> NonTemplateOverloads;
13423 tryExprAsCall(*E, ReturnType, NonTemplateOverloads);
13424 if (ReturnType.isNull())
13425 return;
13426
13427 if (IsCompare) {
13428 // There are two cases here. If there is null constant, the only suggest
13429 // for a pointer return type. If the null is 0, then suggest if the return
13430 // type is a pointer or an integer type.
13431 if (!ReturnType->isPointerType()) {
13432 if (NullKind == Expr::NPCK_ZeroExpression ||
13433 NullKind == Expr::NPCK_ZeroLiteral) {
13434 if (!ReturnType->isIntegerType())
13435 return;
13436 } else {
13437 return;
13438 }
13439 }
13440 } else { // !IsCompare
13441 // For function to bool, only suggest if the function pointer has bool
13442 // return type.
13443 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
13444 return;
13445 }
13446 Diag(E->getExprLoc(), diag::note_function_to_function_call)
13447 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()");
13448}
13449
13450/// Diagnoses "dangerous" implicit conversions within the given
13451/// expression (which is a full expression). Implements -Wconversion
13452/// and -Wsign-compare.
13453///
13454/// \param CC the "context" location of the implicit conversion, i.e.
13455/// the most location of the syntactic entity requiring the implicit
13456/// conversion
13457void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
13458 // Don't diagnose in unevaluated contexts.
13459 if (isUnevaluatedContext())
13460 return;
13461
13462 // Don't diagnose for value- or type-dependent expressions.
13463 if (E->isTypeDependent() || E->isValueDependent())
13464 return;
13465
13466 // Check for array bounds violations in cases where the check isn't triggered
13467 // elsewhere for other Expr types (like BinaryOperators), e.g. when an
13468 // ArraySubscriptExpr is on the RHS of a variable initialization.
13469 CheckArrayAccess(E);
13470
13471 // This is not the right CC for (e.g.) a variable initialization.
13472 AnalyzeImplicitConversions(*this, E, CC);
13473}
13474
13475/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
13476/// Input argument E is a logical expression.
13477void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
13478 ::CheckBoolLikeConversion(*this, E, CC);
13479}
13480
13481/// Diagnose when expression is an integer constant expression and its evaluation
13482/// results in integer overflow
13483void Sema::CheckForIntOverflow (Expr *E) {
13484 // Use a work list to deal with nested struct initializers.
13485 SmallVector<Expr *, 2> Exprs(1, E);
13486
13487 do {
13488 Expr *OriginalE = Exprs.pop_back_val();
13489 Expr *E = OriginalE->IgnoreParenCasts();
13490
13491 if (isa<BinaryOperator>(E)) {
13492 E->EvaluateForOverflow(Context);
13493 continue;
13494 }
13495
13496 if (auto InitList = dyn_cast<InitListExpr>(OriginalE))
13497 Exprs.append(InitList->inits().begin(), InitList->inits().end());
13498 else if (isa<ObjCBoxedExpr>(OriginalE))
13499 E->EvaluateForOverflow(Context);
13500 else if (auto Call = dyn_cast<CallExpr>(E))
13501 Exprs.append(Call->arg_begin(), Call->arg_end());
13502 else if (auto Message = dyn_cast<ObjCMessageExpr>(E))
13503 Exprs.append(Message->arg_begin(), Message->arg_end());
13504 } while (!Exprs.empty());
13505}
13506
13507namespace {
13508
13509/// Visitor for expressions which looks for unsequenced operations on the
13510/// same object.
13511class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
13512 using Base = ConstEvaluatedExprVisitor<SequenceChecker>;
13513
13514 /// A tree of sequenced regions within an expression. Two regions are
13515 /// unsequenced if one is an ancestor or a descendent of the other. When we
13516 /// finish processing an expression with sequencing, such as a comma
13517 /// expression, we fold its tree nodes into its parent, since they are
13518 /// unsequenced with respect to nodes we will visit later.
13519 class SequenceTree {
13520 struct Value {
13521 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
13522 unsigned Parent : 31;
13523 unsigned Merged : 1;
13524 };
13525 SmallVector<Value, 8> Values;
13526
13527 public:
13528 /// A region within an expression which may be sequenced with respect
13529 /// to some other region.
13530 class Seq {
13531 friend class SequenceTree;
13532
13533 unsigned Index;
13534
13535 explicit Seq(unsigned N) : Index(N) {}
13536
13537 public:
13538 Seq() : Index(0) {}
13539 };
13540
13541 SequenceTree() { Values.push_back(Value(0)); }
13542 Seq root() const { return Seq(0); }
13543
13544 /// Create a new sequence of operations, which is an unsequenced
13545 /// subset of \p Parent. This sequence of operations is sequenced with
13546 /// respect to other children of \p Parent.
13547 Seq allocate(Seq Parent) {
13548 Values.push_back(Value(Parent.Index));
13549 return Seq(Values.size() - 1);
13550 }
13551
13552 /// Merge a sequence of operations into its parent.
13553 void merge(Seq S) {
13554 Values[S.Index].Merged = true;
13555 }
13556
13557 /// Determine whether two operations are unsequenced. This operation
13558 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
13559 /// should have been merged into its parent as appropriate.
13560 bool isUnsequenced(Seq Cur, Seq Old) {
13561 unsigned C = representative(Cur.Index);
13562 unsigned Target = representative(Old.Index);
13563 while (C >= Target) {
13564 if (C == Target)
13565 return true;
13566 C = Values[C].Parent;
13567 }
13568 return false;
13569 }
13570
13571 private:
13572 /// Pick a representative for a sequence.
13573 unsigned representative(unsigned K) {
13574 if (Values[K].Merged)
13575 // Perform path compression as we go.
13576 return Values[K].Parent = representative(Values[K].Parent);
13577 return K;
13578 }
13579 };
13580
13581 /// An object for which we can track unsequenced uses.
13582 using Object = const NamedDecl *;
13583
13584 /// Different flavors of object usage which we track. We only track the
13585 /// least-sequenced usage of each kind.
13586 enum UsageKind {
13587 /// A read of an object. Multiple unsequenced reads are OK.
13588 UK_Use,
13589
13590 /// A modification of an object which is sequenced before the value
13591 /// computation of the expression, such as ++n in C++.
13592 UK_ModAsValue,
13593
13594 /// A modification of an object which is not sequenced before the value
13595 /// computation of the expression, such as n++.
13596 UK_ModAsSideEffect,
13597
13598 UK_Count = UK_ModAsSideEffect + 1
13599 };
13600
13601 /// Bundle together a sequencing region and the expression corresponding
13602 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo.
13603 struct Usage {
13604 const Expr *UsageExpr;
13605 SequenceTree::Seq Seq;
13606
13607 Usage() : UsageExpr(nullptr), Seq() {}
13608 };
13609
13610 struct UsageInfo {
13611 Usage Uses[UK_Count];
13612
13613 /// Have we issued a diagnostic for this object already?
13614 bool Diagnosed;
13615
13616 UsageInfo() : Uses(), Diagnosed(false) {}
13617 };
13618 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
13619
13620 Sema &SemaRef;
13621
13622 /// Sequenced regions within the expression.
13623 SequenceTree Tree;
13624
13625 /// Declaration modifications and references which we have seen.
13626 UsageInfoMap UsageMap;
13627
13628 /// The region we are currently within.
13629 SequenceTree::Seq Region;
13630
13631 /// Filled in with declarations which were modified as a side-effect
13632 /// (that is, post-increment operations).
13633 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr;
13634
13635 /// Expressions to check later. We defer checking these to reduce
13636 /// stack usage.
13637 SmallVectorImpl<const Expr *> &WorkList;
13638
13639 /// RAII object wrapping the visitation of a sequenced subexpression of an
13640 /// expression. At the end of this process, the side-effects of the evaluation
13641 /// become sequenced with respect to the value computation of the result, so
13642 /// we downgrade any UK_ModAsSideEffect within the evaluation to
13643 /// UK_ModAsValue.
13644 struct SequencedSubexpression {
13645 SequencedSubexpression(SequenceChecker &Self)
13646 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) {
13647 Self.ModAsSideEffect = &ModAsSideEffect;
13648 }
13649
13650 ~SequencedSubexpression() {
13651 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) {
13652 // Add a new usage with usage kind UK_ModAsValue, and then restore
13653 // the previous usage with UK_ModAsSideEffect (thus clearing it if
13654 // the previous one was empty).
13655 UsageInfo &UI = Self.UsageMap[M.first];
13656 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect];
13657 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue);
13658 SideEffectUsage = M.second;
13659 }
13660 Self.ModAsSideEffect = OldModAsSideEffect;
13661 }
13662
13663 SequenceChecker &Self;
13664 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect;
13665 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect;
13666 };
13667
13668 /// RAII object wrapping the visitation of a subexpression which we might
13669 /// choose to evaluate as a constant. If any subexpression is evaluated and
13670 /// found to be non-constant, this allows us to suppress the evaluation of
13671 /// the outer expression.
13672 class EvaluationTracker {
13673 public:
13674 EvaluationTracker(SequenceChecker &Self)
13675 : Self(Self), Prev(Self.EvalTracker) {
13676 Self.EvalTracker = this;
13677 }
13678
13679 ~EvaluationTracker() {
13680 Self.EvalTracker = Prev;
13681 if (Prev)
13682 Prev->EvalOK &= EvalOK;
13683 }
13684
13685 bool evaluate(const Expr *E, bool &Result) {
13686 if (!EvalOK || E->isValueDependent())
13687 return false;
13688 EvalOK = E->EvaluateAsBooleanCondition(
13689 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated());
13690 return EvalOK;
13691 }
13692
13693 private:
13694 SequenceChecker &Self;
13695 EvaluationTracker *Prev;
13696 bool EvalOK = true;
13697 } *EvalTracker = nullptr;
13698
13699 /// Find the object which is produced by the specified expression,
13700 /// if any.
13701 Object getObject(const Expr *E, bool Mod) const {
13702 E = E->IgnoreParenCasts();
13703 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
13704 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
13705 return getObject(UO->getSubExpr(), Mod);
13706 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
13707 if (BO->getOpcode() == BO_Comma)
13708 return getObject(BO->getRHS(), Mod);
13709 if (Mod && BO->isAssignmentOp())
13710 return getObject(BO->getLHS(), Mod);
13711 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
13712 // FIXME: Check for more interesting cases, like "x.n = ++x.n".
13713 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts()))
13714 return ME->getMemberDecl();
13715 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
13716 // FIXME: If this is a reference, map through to its value.
13717 return DRE->getDecl();
13718 return nullptr;
13719 }
13720
13721 /// Note that an object \p O was modified or used by an expression
13722 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for
13723 /// the object \p O as obtained via the \p UsageMap.
13724 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) {
13725 // Get the old usage for the given object and usage kind.
13726 Usage &U = UI.Uses[UK];
13727 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) {
13728 // If we have a modification as side effect and are in a sequenced
13729 // subexpression, save the old Usage so that we can restore it later
13730 // in SequencedSubexpression::~SequencedSubexpression.
13731 if (UK == UK_ModAsSideEffect && ModAsSideEffect)
13732 ModAsSideEffect->push_back(std::make_pair(O, U));
13733 // Then record the new usage with the current sequencing region.
13734 U.UsageExpr = UsageExpr;
13735 U.Seq = Region;
13736 }
13737 }
13738
13739 /// Check whether a modification or use of an object \p O in an expression
13740 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is
13741 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap.
13742 /// \p IsModMod is true when we are checking for a mod-mod unsequenced
13743 /// usage and false we are checking for a mod-use unsequenced usage.
13744 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr,
13745 UsageKind OtherKind, bool IsModMod) {
13746 if (UI.Diagnosed)
13747 return;
13748
13749 const Usage &U = UI.Uses[OtherKind];
13750 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq))
13751 return;
13752
13753 const Expr *Mod = U.UsageExpr;
13754 const Expr *ModOrUse = UsageExpr;
13755 if (OtherKind == UK_Use)
13756 std::swap(Mod, ModOrUse);
13757
13758 SemaRef.DiagRuntimeBehavior(
13759 Mod->getExprLoc(), {Mod, ModOrUse},
13760 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod
13761 : diag::warn_unsequenced_mod_use)
13762 << O << SourceRange(ModOrUse->getExprLoc()));
13763 UI.Diagnosed = true;
13764 }
13765
13766 // A note on note{Pre, Post}{Use, Mod}:
13767 //
13768 // (It helps to follow the algorithm with an expression such as
13769 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced
13770 // operations before C++17 and both are well-defined in C++17).
13771 //
13772 // When visiting a node which uses/modify an object we first call notePreUse
13773 // or notePreMod before visiting its sub-expression(s). At this point the
13774 // children of the current node have not yet been visited and so the eventual
13775 // uses/modifications resulting from the children of the current node have not
13776 // been recorded yet.
13777 //
13778 // We then visit the children of the current node. After that notePostUse or
13779 // notePostMod is called. These will 1) detect an unsequenced modification
13780 // as side effect (as in "k++ + k") and 2) add a new usage with the
13781 // appropriate usage kind.
13782 //
13783 // We also have to be careful that some operation sequences modification as
13784 // side effect as well (for example: || or ,). To account for this we wrap
13785 // the visitation of such a sub-expression (for example: the LHS of || or ,)
13786 // with SequencedSubexpression. SequencedSubexpression is an RAII object
13787 // which record usages which are modifications as side effect, and then
13788 // downgrade them (or more accurately restore the previous usage which was a
13789 // modification as side effect) when exiting the scope of the sequenced
13790 // subexpression.
13791
13792 void notePreUse(Object O, const Expr *UseExpr) {
13793 UsageInfo &UI = UsageMap[O];
13794 // Uses conflict with other modifications.
13795 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false);
13796 }
13797
13798 void notePostUse(Object O, const Expr *UseExpr) {
13799 UsageInfo &UI = UsageMap[O];
13800 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect,
13801 /*IsModMod=*/false);
13802 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use);
13803 }
13804
13805 void notePreMod(Object O, const Expr *ModExpr) {
13806 UsageInfo &UI = UsageMap[O];
13807 // Modifications conflict with other modifications and with uses.
13808 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true);
13809 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false);
13810 }
13811
13812 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) {
13813 UsageInfo &UI = UsageMap[O];
13814 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect,
13815 /*IsModMod=*/true);
13816 addUsage(O, UI, ModExpr, /*UsageKind=*/UK);
13817 }
13818
13819public:
13820 SequenceChecker(Sema &S, const Expr *E,
13821 SmallVectorImpl<const Expr *> &WorkList)
13822 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) {
13823 Visit(E);
13824 // Silence a -Wunused-private-field since WorkList is now unused.
13825 // TODO: Evaluate if it can be used, and if not remove it.
13826 (void)this->WorkList;
13827 }
13828
13829 void VisitStmt(const Stmt *S) {
13830 // Skip all statements which aren't expressions for now.
13831 }
13832
13833 void VisitExpr(const Expr *E) {
13834 // By default, just recurse to evaluated subexpressions.
13835 Base::VisitStmt(E);
13836 }
13837
13838 void VisitCastExpr(const CastExpr *E) {
13839 Object O = Object();
13840 if (E->getCastKind() == CK_LValueToRValue)
13841 O = getObject(E->getSubExpr(), false);
13842
13843 if (O)
13844 notePreUse(O, E);
13845 VisitExpr(E);
13846 if (O)
13847 notePostUse(O, E);
13848 }
13849
13850 void VisitSequencedExpressions(const Expr *SequencedBefore,
13851 const Expr *SequencedAfter) {
13852 SequenceTree::Seq BeforeRegion = Tree.allocate(Region);
13853 SequenceTree::Seq AfterRegion = Tree.allocate(Region);
13854 SequenceTree::Seq OldRegion = Region;
13855
13856 {
13857 SequencedSubexpression SeqBefore(*this);
13858 Region = BeforeRegion;
13859 Visit(SequencedBefore);
13860 }
13861
13862 Region = AfterRegion;
13863 Visit(SequencedAfter);
13864
13865 Region = OldRegion;
13866
13867 Tree.merge(BeforeRegion);
13868 Tree.merge(AfterRegion);
13869 }
13870
13871 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) {
13872 // C++17 [expr.sub]p1:
13873 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
13874 // expression E1 is sequenced before the expression E2.
13875 if (SemaRef.getLangOpts().CPlusPlus17)
13876 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS());
13877 else {
13878 Visit(ASE->getLHS());
13879 Visit(ASE->getRHS());
13880 }
13881 }
13882
13883 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
13884 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
13885 void VisitBinPtrMem(const BinaryOperator *BO) {
13886 // C++17 [expr.mptr.oper]p4:
13887 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...]
13888 // the expression E1 is sequenced before the expression E2.
13889 if (SemaRef.getLangOpts().CPlusPlus17)
13890 VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
13891 else {
13892 Visit(BO->getLHS());
13893 Visit(BO->getRHS());
13894 }
13895 }
13896
13897 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); }
13898 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); }
13899 void VisitBinShlShr(const BinaryOperator *BO) {
13900 // C++17 [expr.shift]p4:
13901 // The expression E1 is sequenced before the expression E2.
13902 if (SemaRef.getLangOpts().CPlusPlus17)
13903 VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
13904 else {
13905 Visit(BO->getLHS());
13906 Visit(BO->getRHS());
13907 }
13908 }
13909
13910 void VisitBinComma(const BinaryOperator *BO) {
13911 // C++11 [expr.comma]p1:
13912 // Every value computation and side effect associated with the left
13913 // expression is sequenced before every value computation and side
13914 // effect associated with the right expression.
13915 VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
13916 }
13917
13918 void VisitBinAssign(const BinaryOperator *BO) {
13919 SequenceTree::Seq RHSRegion;
13920 SequenceTree::Seq LHSRegion;
13921 if (SemaRef.getLangOpts().CPlusPlus17) {
13922 RHSRegion = Tree.allocate(Region);
13923 LHSRegion = Tree.allocate(Region);
13924 } else {
13925 RHSRegion = Region;
13926 LHSRegion = Region;
13927 }
13928 SequenceTree::Seq OldRegion = Region;
13929
13930 // C++11 [expr.ass]p1:
13931 // [...] the assignment is sequenced after the value computation
13932 // of the right and left operands, [...]
13933 //
13934 // so check it before inspecting the operands and update the
13935 // map afterwards.
13936 Object O = getObject(BO->getLHS(), /*Mod=*/true);
13937 if (O)
13938 notePreMod(O, BO);
13939
13940 if (SemaRef.getLangOpts().CPlusPlus17) {
13941 // C++17 [expr.ass]p1:
13942 // [...] The right operand is sequenced before the left operand. [...]
13943 {
13944 SequencedSubexpression SeqBefore(*this);
13945 Region = RHSRegion;
13946 Visit(BO->getRHS());
13947 }
13948
13949 Region = LHSRegion;
13950 Visit(BO->getLHS());
13951
13952 if (O && isa<CompoundAssignOperator>(BO))
13953 notePostUse(O, BO);
13954
13955 } else {
13956 // C++11 does not specify any sequencing between the LHS and RHS.
13957 Region = LHSRegion;
13958 Visit(BO->getLHS());
13959
13960 if (O && isa<CompoundAssignOperator>(BO))
13961 notePostUse(O, BO);
13962
13963 Region = RHSRegion;
13964 Visit(BO->getRHS());
13965 }
13966
13967 // C++11 [expr.ass]p1:
13968 // the assignment is sequenced [...] before the value computation of the
13969 // assignment expression.
13970 // C11 6.5.16/3 has no such rule.
13971 Region = OldRegion;
13972 if (O)
13973 notePostMod(O, BO,
13974 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
13975 : UK_ModAsSideEffect);
13976 if (SemaRef.getLangOpts().CPlusPlus17) {
13977 Tree.merge(RHSRegion);
13978 Tree.merge(LHSRegion);
13979 }
13980 }
13981
13982 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) {
13983 VisitBinAssign(CAO);
13984 }
13985
13986 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
13987 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
13988 void VisitUnaryPreIncDec(const UnaryOperator *UO) {
13989 Object O = getObject(UO->getSubExpr(), true);
13990 if (!O)
13991 return VisitExpr(UO);
13992
13993 notePreMod(O, UO);
13994 Visit(UO->getSubExpr());
13995 // C++11 [expr.pre.incr]p1:
13996 // the expression ++x is equivalent to x+=1
13997 notePostMod(O, UO,
13998 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
13999 : UK_ModAsSideEffect);
14000 }
14001
14002 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
14003 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
14004 void VisitUnaryPostIncDec(const UnaryOperator *UO) {
14005 Object O = getObject(UO->getSubExpr(), true);
14006 if (!O)
14007 return VisitExpr(UO);
14008
14009 notePreMod(O, UO);
14010 Visit(UO->getSubExpr());
14011 notePostMod(O, UO, UK_ModAsSideEffect);
14012 }
14013
14014 void VisitBinLOr(const BinaryOperator *BO) {
14015 // C++11 [expr.log.or]p2:
14016 // If the second expression is evaluated, every value computation and
14017 // side effect associated with the first expression is sequenced before
14018 // every value computation and side effect associated with the
14019 // second expression.
14020 SequenceTree::Seq LHSRegion = Tree.allocate(Region);
14021 SequenceTree::Seq RHSRegion = Tree.allocate(Region);
14022 SequenceTree::Seq OldRegion = Region;
14023
14024 EvaluationTracker Eval(*this);
14025 {
14026 SequencedSubexpression Sequenced(*this);
14027 Region = LHSRegion;
14028 Visit(BO->getLHS());
14029 }
14030
14031 // C++11 [expr.log.or]p1:
14032 // [...] the second operand is not evaluated if the first operand
14033 // evaluates to true.
14034 bool EvalResult = false;
14035 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult);
14036 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult);
14037 if (ShouldVisitRHS) {
14038 Region = RHSRegion;
14039 Visit(BO->getRHS());
14040 }
14041
14042 Region = OldRegion;
14043 Tree.merge(LHSRegion);
14044 Tree.merge(RHSRegion);
14045 }
14046
14047 void VisitBinLAnd(const BinaryOperator *BO) {
14048 // C++11 [expr.log.and]p2:
14049 // If the second expression is evaluated, every value computation and
14050 // side effect associated with the first expression is sequenced before
14051 // every value computation and side effect associated with the
14052 // second expression.
14053 SequenceTree::Seq LHSRegion = Tree.allocate(Region);
14054 SequenceTree::Seq RHSRegion = Tree.allocate(Region);
14055 SequenceTree::Seq OldRegion = Region;
14056
14057 EvaluationTracker Eval(*this);
14058 {
14059 SequencedSubexpression Sequenced(*this);
14060 Region = LHSRegion;
14061 Visit(BO->getLHS());
14062 }
14063
14064 // C++11 [expr.log.and]p1:
14065 // [...] the second operand is not evaluated if the first operand is false.
14066 bool EvalResult = false;
14067 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult);
14068 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult);
14069 if (ShouldVisitRHS) {
14070 Region = RHSRegion;
14071 Visit(BO->getRHS());
14072 }
14073
14074 Region = OldRegion;
14075 Tree.merge(LHSRegion);
14076 Tree.merge(RHSRegion);
14077 }
14078
14079 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) {
14080 // C++11 [expr.cond]p1:
14081 // [...] Every value computation and side effect associated with the first
14082 // expression is sequenced before every value computation and side effect
14083 // associated with the second or third expression.
14084 SequenceTree::Seq ConditionRegion = Tree.allocate(Region);
14085
14086 // No sequencing is specified between the true and false expression.
14087 // However since exactly one of both is going to be evaluated we can
14088 // consider them to be sequenced. This is needed to avoid warning on
14089 // something like "x ? y+= 1 : y += 2;" in the case where we will visit
14090 // both the true and false expressions because we can't evaluate x.
14091 // This will still allow us to detect an expression like (pre C++17)
14092 // "(x ? y += 1 : y += 2) = y".
14093 //
14094 // We don't wrap the visitation of the true and false expression with
14095 // SequencedSubexpression because we don't want to downgrade modifications
14096 // as side effect in the true and false expressions after the visition
14097 // is done. (for example in the expression "(x ? y++ : y++) + y" we should
14098 // not warn between the two "y++", but we should warn between the "y++"
14099 // and the "y".
14100 SequenceTree::Seq TrueRegion = Tree.allocate(Region);
14101 SequenceTree::Seq FalseRegion = Tree.allocate(Region);
14102 SequenceTree::Seq OldRegion = Region;
14103
14104 EvaluationTracker Eval(*this);
14105 {
14106 SequencedSubexpression Sequenced(*this);
14107 Region = ConditionRegion;
14108 Visit(CO->getCond());
14109 }
14110
14111 // C++11 [expr.cond]p1:
14112 // [...] The first expression is contextually converted to bool (Clause 4).
14113 // It is evaluated and if it is true, the result of the conditional
14114 // expression is the value of the second expression, otherwise that of the
14115 // third expression. Only one of the second and third expressions is
14116 // evaluated. [...]
14117 bool EvalResult = false;
14118 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult);
14119 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult);
14120 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult);
14121 if (ShouldVisitTrueExpr) {
14122 Region = TrueRegion;
14123 Visit(CO->getTrueExpr());
14124 }
14125 if (ShouldVisitFalseExpr) {
14126 Region = FalseRegion;
14127 Visit(CO->getFalseExpr());
14128 }
14129
14130 Region = OldRegion;
14131 Tree.merge(ConditionRegion);
14132 Tree.merge(TrueRegion);
14133 Tree.merge(FalseRegion);
14134 }
14135
14136 void VisitCallExpr(const CallExpr *CE) {
14137 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
14138
14139 if (CE->isUnevaluatedBuiltinCall(Context))
14140 return;
14141
14142 // C++11 [intro.execution]p15:
14143 // When calling a function [...], every value computation and side effect
14144 // associated with any argument expression, or with the postfix expression
14145 // designating the called function, is sequenced before execution of every
14146 // expression or statement in the body of the function [and thus before
14147 // the value computation of its result].
14148 SequencedSubexpression Sequenced(*this);
14149 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] {
14150 // C++17 [expr.call]p5
14151 // The postfix-expression is sequenced before each expression in the
14152 // expression-list and any default argument. [...]
14153 SequenceTree::Seq CalleeRegion;
14154 SequenceTree::Seq OtherRegion;
14155 if (SemaRef.getLangOpts().CPlusPlus17) {
14156 CalleeRegion = Tree.allocate(Region);
14157 OtherRegion = Tree.allocate(Region);
14158 } else {
14159 CalleeRegion = Region;
14160 OtherRegion = Region;
14161 }
14162 SequenceTree::Seq OldRegion = Region;
14163
14164 // Visit the callee expression first.
14165 Region = CalleeRegion;
14166 if (SemaRef.getLangOpts().CPlusPlus17) {
14167 SequencedSubexpression Sequenced(*this);
14168 Visit(CE->getCallee());
14169 } else {
14170 Visit(CE->getCallee());
14171 }
14172
14173 // Then visit the argument expressions.
14174 Region = OtherRegion;
14175 for (const Expr *Argument : CE->arguments())
14176 Visit(Argument);
14177
14178 Region = OldRegion;
14179 if (SemaRef.getLangOpts().CPlusPlus17) {
14180 Tree.merge(CalleeRegion);
14181 Tree.merge(OtherRegion);
14182 }
14183 });
14184 }
14185
14186 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) {
14187 // C++17 [over.match.oper]p2:
14188 // [...] the operator notation is first transformed to the equivalent
14189 // function-call notation as summarized in Table 12 (where @ denotes one
14190 // of the operators covered in the specified subclause). However, the
14191 // operands are sequenced in the order prescribed for the built-in
14192 // operator (Clause 8).
14193 //
14194 // From the above only overloaded binary operators and overloaded call
14195 // operators have sequencing rules in C++17 that we need to handle
14196 // separately.
14197 if (!SemaRef.getLangOpts().CPlusPlus17 ||
14198 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call))
14199 return VisitCallExpr(CXXOCE);
14200
14201 enum {
14202 NoSequencing,
14203 LHSBeforeRHS,
14204 RHSBeforeLHS,
14205 LHSBeforeRest
14206 } SequencingKind;
14207 switch (CXXOCE->getOperator()) {
14208 case OO_Equal:
14209 case OO_PlusEqual:
14210 case OO_MinusEqual:
14211 case OO_StarEqual:
14212 case OO_SlashEqual:
14213 case OO_PercentEqual:
14214 case OO_CaretEqual:
14215 case OO_AmpEqual:
14216 case OO_PipeEqual:
14217 case OO_LessLessEqual:
14218 case OO_GreaterGreaterEqual:
14219 SequencingKind = RHSBeforeLHS;
14220 break;
14221
14222 case OO_LessLess:
14223 case OO_GreaterGreater:
14224 case OO_AmpAmp:
14225 case OO_PipePipe:
14226 case OO_Comma:
14227 case OO_ArrowStar:
14228 case OO_Subscript:
14229 SequencingKind = LHSBeforeRHS;
14230 break;
14231
14232 case OO_Call:
14233 SequencingKind = LHSBeforeRest;
14234 break;
14235
14236 default:
14237 SequencingKind = NoSequencing;
14238 break;
14239 }
14240
14241 if (SequencingKind == NoSequencing)
14242 return VisitCallExpr(CXXOCE);
14243
14244 // This is a call, so all subexpressions are sequenced before the result.
14245 SequencedSubexpression Sequenced(*this);
14246
14247 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] {
14248 assert(SemaRef.getLangOpts().CPlusPlus17 &&((void)0)
14249 "Should only get there with C++17 and above!")((void)0);
14250 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) &&((void)0)
14251 "Should only get there with an overloaded binary operator"((void)0)
14252 " or an overloaded call operator!")((void)0);
14253
14254 if (SequencingKind == LHSBeforeRest) {
14255 assert(CXXOCE->getOperator() == OO_Call &&((void)0)
14256 "We should only have an overloaded call operator here!")((void)0);
14257
14258 // This is very similar to VisitCallExpr, except that we only have the
14259 // C++17 case. The postfix-expression is the first argument of the
14260 // CXXOperatorCallExpr. The expressions in the expression-list, if any,
14261 // are in the following arguments.
14262 //
14263 // Note that we intentionally do not visit the callee expression since
14264 // it is just a decayed reference to a function.
14265 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region);
14266 SequenceTree::Seq ArgsRegion = Tree.allocate(Region);
14267 SequenceTree::Seq OldRegion = Region;
14268
14269 assert(CXXOCE->getNumArgs() >= 1 &&((void)0)
14270 "An overloaded call operator must have at least one argument"((void)0)
14271 " for the postfix-expression!")((void)0);
14272 const Expr *PostfixExpr = CXXOCE->getArgs()[0];
14273 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1,
14274 CXXOCE->getNumArgs() - 1);
14275
14276 // Visit the postfix-expression first.
14277 {
14278 Region = PostfixExprRegion;
14279 SequencedSubexpression Sequenced(*this);
14280 Visit(PostfixExpr);
14281 }
14282
14283 // Then visit the argument expressions.
14284 Region = ArgsRegion;
14285 for (const Expr *Arg : Args)
14286 Visit(Arg);
14287
14288 Region = OldRegion;
14289 Tree.merge(PostfixExprRegion);
14290 Tree.merge(ArgsRegion);
14291 } else {
14292 assert(CXXOCE->getNumArgs() == 2 &&((void)0)
14293 "Should only have two arguments here!")((void)0);
14294 assert((SequencingKind == LHSBeforeRHS ||((void)0)
14295 SequencingKind == RHSBeforeLHS) &&((void)0)
14296 "Unexpected sequencing kind!")((void)0);
14297
14298 // We do not visit the callee expression since it is just a decayed
14299 // reference to a function.
14300 const Expr *E1 = CXXOCE->getArg(0);
14301 const Expr *E2 = CXXOCE->getArg(1);
14302 if (SequencingKind == RHSBeforeLHS)
14303 std::swap(E1, E2);
14304
14305 return VisitSequencedExpressions(E1, E2);
14306 }
14307 });
14308 }
14309
14310 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) {
14311 // This is a call, so all subexpressions are sequenced before the result.
14312 SequencedSubexpression Sequenced(*this);
14313
14314 if (!CCE->isListInitialization())
14315 return VisitExpr(CCE);
14316
14317 // In C++11, list initializations are sequenced.
14318 SmallVector<SequenceTree::Seq, 32> Elts;
14319 SequenceTree::Seq Parent = Region;
14320 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
14321 E = CCE->arg_end();
14322 I != E; ++I) {
14323 Region = Tree.allocate(Parent);
14324 Elts.push_back(Region);
14325 Visit(*I);
14326 }
14327
14328 // Forget that the initializers are sequenced.
14329 Region = Parent;
14330 for (unsigned I = 0; I < Elts.size(); ++I)
14331 Tree.merge(Elts[I]);
14332 }
14333
14334 void VisitInitListExpr(const InitListExpr *ILE) {
14335 if (!SemaRef.getLangOpts().CPlusPlus11)
14336 return VisitExpr(ILE);
14337
14338 // In C++11, list initializations are sequenced.
14339 SmallVector<SequenceTree::Seq, 32> Elts;
14340 SequenceTree::Seq Parent = Region;
14341 for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
14342 const Expr *E = ILE->getInit(I);
14343 if (!E)
14344 continue;
14345 Region = Tree.allocate(Parent);
14346 Elts.push_back(Region);
14347 Visit(E);
14348 }
14349
14350 // Forget that the initializers are sequenced.
14351 Region = Parent;
14352 for (unsigned I = 0; I < Elts.size(); ++I)
14353 Tree.merge(Elts[I]);
14354 }
14355};
14356
14357} // namespace
14358
14359void Sema::CheckUnsequencedOperations(const Expr *E) {
14360 SmallVector<const Expr *, 8> WorkList;
14361 WorkList.push_back(E);
14362 while (!WorkList.empty()) {
14363 const Expr *Item = WorkList.pop_back_val();
14364 SequenceChecker(*this, Item, WorkList);
14365 }
14366}
14367
14368void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
14369 bool IsConstexpr) {
14370 llvm::SaveAndRestore<bool> ConstantContext(
14371 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E));
14372 CheckImplicitConversions(E, CheckLoc);
14373 if (!E->isInstantiationDependent())
14374 CheckUnsequencedOperations(E);
14375 if (!IsConstexpr && !E->isValueDependent())
14376 CheckForIntOverflow(E);
14377 DiagnoseMisalignedMembers();
14378}
14379
14380void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
14381 FieldDecl *BitField,
14382 Expr *Init) {
14383 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc);
14384}
14385
14386static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
14387 SourceLocation Loc) {
14388 if (!PType->isVariablyModifiedType())
14389 return;
14390 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) {
14391 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc);
14392 return;
14393 }
14394 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) {
14395 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc);
14396 return;
14397 }
14398 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) {
14399 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc);
14400 return;
14401 }
14402
14403 const ArrayType *AT = S.Context.getAsArrayType(PType);
14404 if (!AT)
14405 return;
14406
14407 if (AT->getSizeModifier() != ArrayType::Star) {
14408 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc);
14409 return;
14410 }
14411
14412 S.Diag(Loc, diag::err_array_star_in_function_definition);
14413}
14414
14415/// CheckParmsForFunctionDef - Check that the parameters of the given
14416/// function are appropriate for the definition of a function. This
14417/// takes care of any checks that cannot be performed on the
14418/// declaration itself, e.g., that the types of each of the function
14419/// parameters are complete.
14420bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
14421 bool CheckParameterNames) {
14422 bool HasInvalidParm = false;
14423 for (ParmVarDecl *Param : Parameters) {
14424 // C99 6.7.5.3p4: the parameters in a parameter type list in a
14425 // function declarator that is part of a function definition of
14426 // that function shall not have incomplete type.
14427 //
14428 // This is also C++ [dcl.fct]p6.
14429 if (!Param->isInvalidDecl() &&
14430 RequireCompleteType(Param->getLocation(), Param->getType(),
14431 diag::err_typecheck_decl_incomplete_type)) {
14432 Param->setInvalidDecl();
14433 HasInvalidParm = true;
14434 }
14435
14436 // C99 6.9.1p5: If the declarator includes a parameter type list, the
14437 // declaration of each parameter shall include an identifier.
14438 if (CheckParameterNames && Param->getIdentifier() == nullptr &&
14439 !Param->isImplicit() && !getLangOpts().CPlusPlus) {
14440 // Diagnose this as an extension in C17 and earlier.
14441 if (!getLangOpts().C2x)
14442 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x);
14443 }
14444
14445 // C99 6.7.5.3p12:
14446 // If the function declarator is not part of a definition of that
14447 // function, parameters may have incomplete type and may use the [*]
14448 // notation in their sequences of declarator specifiers to specify
14449 // variable length array types.
14450 QualType PType = Param->getOriginalType();
14451 // FIXME: This diagnostic should point the '[*]' if source-location
14452 // information is added for it.
14453 diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
14454
14455 // If the parameter is a c++ class type and it has to be destructed in the
14456 // callee function, declare the destructor so that it can be called by the
14457 // callee function. Do not perform any direct access check on the dtor here.
14458 if (!Param->isInvalidDecl()) {
14459 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) {
14460 if (!ClassDecl->isInvalidDecl() &&
14461 !ClassDecl->hasIrrelevantDestructor() &&
14462 !ClassDecl->isDependentContext() &&
14463 ClassDecl->isParamDestroyedInCallee()) {
14464 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
14465 MarkFunctionReferenced(Param->getLocation(), Destructor);
14466 DiagnoseUseOfDecl(Destructor, Param->getLocation());
14467 }
14468 }
14469 }
14470
14471 // Parameters with the pass_object_size attribute only need to be marked
14472 // constant at function definitions. Because we lack information about
14473 // whether we're on a declaration or definition when we're instantiating the
14474 // attribute, we need to check for constness here.
14475 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
14476 if (!Param->getType().isConstQualified())
14477 Diag(Param->getLocation(), diag::err_attribute_pointers_only)
14478 << Attr->getSpelling() << 1;
14479
14480 // Check for parameter names shadowing fields from the class.
14481 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) {
14482 // The owning context for the parameter should be the function, but we
14483 // want to see if this function's declaration context is a record.
14484 DeclContext *DC = Param->getDeclContext();
14485 if (DC && DC->isFunctionOrMethod()) {
14486 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent()))
14487 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(),
14488 RD, /*DeclIsField*/ false);
14489 }
14490 }
14491 }
14492
14493 return HasInvalidParm;
14494}
14495
14496Optional<std::pair<CharUnits, CharUnits>>
14497static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx);
14498
14499/// Compute the alignment and offset of the base class object given the
14500/// derived-to-base cast expression and the alignment and offset of the derived
14501/// class object.
14502static std::pair<CharUnits, CharUnits>
14503getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType,
14504 CharUnits BaseAlignment, CharUnits Offset,
14505 ASTContext &Ctx) {
14506 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE;
14507 ++PathI) {
14508 const CXXBaseSpecifier *Base = *PathI;
14509 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
14510 if (Base->isVirtual()) {
14511 // The complete object may have a lower alignment than the non-virtual
14512 // alignment of the base, in which case the base may be misaligned. Choose
14513 // the smaller of the non-virtual alignment and BaseAlignment, which is a
14514 // conservative lower bound of the complete object alignment.
14515 CharUnits NonVirtualAlignment =
14516 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment();
14517 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment);
14518 Offset = CharUnits::Zero();
14519 } else {
14520 const ASTRecordLayout &RL =
14521 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl());
14522 Offset += RL.getBaseClassOffset(BaseDecl);
14523 }
14524 DerivedType = Base->getType();
14525 }
14526
14527 return std::make_pair(BaseAlignment, Offset);
14528}
14529
14530/// Compute the alignment and offset of a binary additive operator.
14531static Optional<std::pair<CharUnits, CharUnits>>
14532getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
14533 bool IsSub, ASTContext &Ctx) {
14534 QualType PointeeType = PtrE->getType()->getPointeeType();
14535
14536 if (!PointeeType->isConstantSizeType())
14537 return llvm::None;
14538
14539 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx);
14540
14541 if (!P)
14542 return llvm::None;
14543
14544 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType);
14545 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) {
14546 CharUnits Offset = EltSize * IdxRes->getExtValue();
14547 if (IsSub)
14548 Offset = -Offset;
14549 return std::make_pair(P->first, P->second + Offset);
14550 }
14551
14552 // If the integer expression isn't a constant expression, compute the lower
14553 // bound of the alignment using the alignment and offset of the pointer
14554 // expression and the element size.
14555 return std::make_pair(
14556 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize),
14557 CharUnits::Zero());
14558}
14559
14560/// This helper function takes an lvalue expression and returns the alignment of
14561/// a VarDecl and a constant offset from the VarDecl.
14562Optional<std::pair<CharUnits, CharUnits>>
14563static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) {
14564 E = E->IgnoreParens();
14565 switch (E->getStmtClass()) {
14566 default:
14567 break;
14568 case Stmt::CStyleCastExprClass:
14569 case Stmt::CXXStaticCastExprClass:
14570 case Stmt::ImplicitCastExprClass: {
14571 auto *CE = cast<CastExpr>(E);
14572 const Expr *From = CE->getSubExpr();
14573 switch (CE->getCastKind()) {
14574 default:
14575 break;
14576 case CK_NoOp:
14577 return getBaseAlignmentAndOffsetFromLValue(From, Ctx);
14578 case CK_UncheckedDerivedToBase:
14579 case CK_DerivedToBase: {
14580 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx);
14581 if (!P)
14582 break;
14583 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first,
14584 P->second, Ctx);
14585 }
14586 }
14587 break;
14588 }
14589 case Stmt::ArraySubscriptExprClass: {
14590 auto *ASE = cast<ArraySubscriptExpr>(E);
14591 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(),
14592 false, Ctx);
14593 }
14594 case Stmt::DeclRefExprClass: {
14595 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) {
14596 // FIXME: If VD is captured by copy or is an escaping __block variable,
14597 // use the alignment of VD's type.
14598 if (!VD->getType()->isReferenceType())
14599 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero());
14600 if (VD->hasInit())
14601 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx);
14602 }
14603 break;
14604 }
14605 case Stmt::MemberExprClass: {
14606 auto *ME = cast<MemberExpr>(E);
14607 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
14608 if (!FD || FD->getType()->isReferenceType() ||
14609 FD->getParent()->isInvalidDecl())
14610 break;
14611 Optional<std::pair<CharUnits, CharUnits>> P;
14612 if (ME->isArrow())
14613 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx);
14614 else
14615 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx);
14616 if (!P)
14617 break;
14618 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent());
14619 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex());
14620 return std::make_pair(P->first,
14621 P->second + CharUnits::fromQuantity(Offset));
14622 }
14623 case Stmt::UnaryOperatorClass: {
14624 auto *UO = cast<UnaryOperator>(E);
14625 switch (UO->getOpcode()) {
14626 default:
14627 break;
14628 case UO_Deref:
14629 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx);
14630 }
14631 break;
14632 }
14633 case Stmt::BinaryOperatorClass: {
14634 auto *BO = cast<BinaryOperator>(E);
14635 auto Opcode = BO->getOpcode();
14636 switch (Opcode) {
14637 default:
14638 break;
14639 case BO_Comma:
14640 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx);
14641 }
14642 break;
14643 }
14644 }
14645 return llvm::None;
14646}
14647
14648/// This helper function takes a pointer expression and returns the alignment of
14649/// a VarDecl and a constant offset from the VarDecl.
14650Optional<std::pair<CharUnits, CharUnits>>
14651static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) {
14652 E = E->IgnoreParens();
14653 switch (E->getStmtClass()) {
14654 default:
14655 break;
14656 case Stmt::CStyleCastExprClass:
14657 case Stmt::CXXStaticCastExprClass:
14658 case Stmt::ImplicitCastExprClass: {
14659 auto *CE = cast<CastExpr>(E);
14660 const Expr *From = CE->getSubExpr();
14661 switch (CE->getCastKind()) {
14662 default:
14663 break;
14664 case CK_NoOp:
14665 return getBaseAlignmentAndOffsetFromPtr(From, Ctx);
14666 case CK_ArrayToPointerDecay:
14667 return getBaseAlignmentAndOffsetFromLValue(From, Ctx);
14668 case CK_UncheckedDerivedToBase:
14669 case CK_DerivedToBase: {
14670 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx);
14671 if (!P)
14672 break;
14673 return getDerivedToBaseAlignmentAndOffset(
14674 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx);
14675 }
14676 }
14677 break;
14678 }
14679 case Stmt::CXXThisExprClass: {
14680 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl();
14681 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment();
14682 return std::make_pair(Alignment, CharUnits::Zero());
14683 }
14684 case Stmt::UnaryOperatorClass: {
14685 auto *UO = cast<UnaryOperator>(E);
14686 if (UO->getOpcode() == UO_AddrOf)
14687 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx);
14688 break;
14689 }
14690 case Stmt::BinaryOperatorClass: {
14691 auto *BO = cast<BinaryOperator>(E);
14692 auto Opcode = BO->getOpcode();
14693 switch (Opcode) {
14694 default:
14695 break;
14696 case BO_Add:
14697 case BO_Sub: {
14698 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS();
14699 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType())
14700 std::swap(LHS, RHS);
14701 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub,
14702 Ctx);
14703 }
14704 case BO_Comma:
14705 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx);
14706 }
14707 break;
14708 }
14709 }
14710 return llvm::None;
14711}
14712
14713static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) {
14714 // See if we can compute the alignment of a VarDecl and an offset from it.
14715 Optional<std::pair<CharUnits, CharUnits>> P =
14716 getBaseAlignmentAndOffsetFromPtr(E, S.Context);
14717
14718 if (P)
14719 return P->first.alignmentAtOffset(P->second);
14720
14721 // If that failed, return the type's alignment.
14722 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType());
14723}
14724
14725/// CheckCastAlign - Implements -Wcast-align, which warns when a
14726/// pointer cast increases the alignment requirements.
14727void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
14728 // This is actually a lot of work to potentially be doing on every
14729 // cast; don't do it if we're ignoring -Wcast_align (as is the default).
14730 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin()))
14731 return;
14732
14733 // Ignore dependent types.
14734 if (T->isDependentType() || Op->getType()->isDependentType())
14735 return;
14736
14737 // Require that the destination be a pointer type.
14738 const PointerType *DestPtr = T->getAs<PointerType>();
14739 if (!DestPtr) return;
14740
14741 // If the destination has alignment 1, we're done.
14742 QualType DestPointee = DestPtr->getPointeeType();
14743 if (DestPointee->isIncompleteType()) return;
14744 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee);
14745 if (DestAlign.isOne()) return;
14746
14747 // Require that the source be a pointer type.
14748 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
14749 if (!SrcPtr) return;
14750 QualType SrcPointee = SrcPtr->getPointeeType();
14751
14752 // Explicitly allow casts from cv void*. We already implicitly
14753 // allowed casts to cv void*, since they have alignment 1.
14754 // Also allow casts involving incomplete types, which implicitly
14755 // includes 'void'.
14756 if (SrcPointee->isIncompleteType()) return;
14757
14758 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this);
14759
14760 if (SrcAlign >= DestAlign) return;
14761
14762 Diag(TRange.getBegin(), diag::warn_cast_align)
14763 << Op->getType() << T
14764 << static_cast<unsigned>(SrcAlign.getQuantity())
14765 << static_cast<unsigned>(DestAlign.getQuantity())
14766 << TRange << Op->getSourceRange();
14767}
14768
14769/// Check whether this array fits the idiom of a size-one tail padded
14770/// array member of a struct.
14771///
14772/// We avoid emitting out-of-bounds access warnings for such arrays as they are
14773/// commonly used to emulate flexible arrays in C89 code.
14774static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size,
14775 const NamedDecl *ND) {
14776 if (Size != 1 || !ND) return false;
14777
14778 const FieldDecl *FD = dyn_cast<FieldDecl>(ND);
14779 if (!FD) return false;
14780
14781 // Don't consider sizes resulting from macro expansions or template argument
14782 // substitution to form C89 tail-padded arrays.
14783
14784 TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
14785 while (TInfo) {
14786 TypeLoc TL = TInfo->getTypeLoc();
14787 // Look through typedefs.
14788 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) {
14789 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
14790 TInfo = TDL->getTypeSourceInfo();
14791 continue;
14792 }
14793 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) {
14794 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr());
14795 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
14796 return false;
14797 }
14798 break;
14799 }
14800
14801 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
14802 if (!RD) return false;
14803 if (RD->isUnion()) return false;
14804 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
14805 if (!CRD->isStandardLayout()) return false;
14806 }
14807
14808 // See if this is the last field decl in the record.
14809 const Decl *D = FD;
14810 while ((D = D->getNextDeclInContext()))
14811 if (isa<FieldDecl>(D))
14812 return false;
14813 return true;
14814}
14815
14816void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
14817 const ArraySubscriptExpr *ASE,
14818 bool AllowOnePastEnd, bool IndexNegated) {
14819 // Already diagnosed by the constant evaluator.
14820 if (isConstantEvaluated())
14821 return;
14822
14823 IndexExpr = IndexExpr->IgnoreParenImpCasts();
14824 if (IndexExpr->isValueDependent())
14825 return;
14826
14827 const Type *EffectiveType =
14828 BaseExpr->getType()->getPointeeOrArrayElementType();
14829 BaseExpr = BaseExpr->IgnoreParenCasts();
14830 const ConstantArrayType *ArrayTy =
14831 Context.getAsConstantArrayType(BaseExpr->getType());
14832
14833 const Type *BaseType =
14834 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr();
14835 bool IsUnboundedArray = (BaseType == nullptr);
14836 if (EffectiveType->isDependentType() ||
14837 (!IsUnboundedArray && BaseType->isDependentType()))
14838 return;
14839
14840 Expr::EvalResult Result;
14841 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects))
14842 return;
14843
14844 llvm::APSInt index = Result.Val.getInt();
14845 if (IndexNegated) {
14846 index.setIsUnsigned(false);
14847 index = -index;
14848 }
14849
14850 const NamedDecl *ND = nullptr;
14851 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
14852 ND = DRE->getDecl();
14853 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
14854 ND = ME->getMemberDecl();
14855
14856 if (IsUnboundedArray) {
14857 if (index.isUnsigned() || !index.isNegative()) {
14858 const auto &ASTC = getASTContext();
14859 unsigned AddrBits =
14860 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace(
14861 EffectiveType->getCanonicalTypeInternal()));
14862 if (index.getBitWidth() < AddrBits)
14863 index = index.zext(AddrBits);
14864 Optional<CharUnits> ElemCharUnits =
14865 ASTC.getTypeSizeInCharsIfKnown(EffectiveType);
14866 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void
14867 // pointer) bounds-checking isn't meaningful.
14868 if (!ElemCharUnits)
14869 return;
14870 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity());
14871 // If index has more active bits than address space, we already know
14872 // we have a bounds violation to warn about. Otherwise, compute
14873 // address of (index + 1)th element, and warn about bounds violation
14874 // only if that address exceeds address space.
14875 if (index.getActiveBits() <= AddrBits) {
14876 bool Overflow;
14877 llvm::APInt Product(index);
14878 Product += 1;
14879 Product = Product.umul_ov(ElemBytes, Overflow);
14880 if (!Overflow && Product.getActiveBits() <= AddrBits)
14881 return;
14882 }
14883
14884 // Need to compute max possible elements in address space, since that
14885 // is included in diag message.
14886 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits);
14887 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth()));
14888 MaxElems += 1;
14889 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth());
14890 MaxElems = MaxElems.udiv(ElemBytes);
14891
14892 unsigned DiagID =
14893 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds
14894 : diag::warn_ptr_arith_exceeds_max_addressable_bounds;
14895
14896 // Diag message shows element size in bits and in "bytes" (platform-
14897 // dependent CharUnits)
14898 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
14899 PDiag(DiagID)
14900 << toString(index, 10, true) << AddrBits
14901 << (unsigned)ASTC.toBits(*ElemCharUnits)
14902 << toString(ElemBytes, 10, false)
14903 << toString(MaxElems, 10, false)
14904 << (unsigned)MaxElems.getLimitedValue(~0U)
14905 << IndexExpr->getSourceRange());
14906
14907 if (!ND) {
14908 // Try harder to find a NamedDecl to point at in the note.
14909 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
14910 BaseExpr = ASE->getBase()->IgnoreParenCasts();
14911 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
14912 ND = DRE->getDecl();
14913 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
14914 ND = ME->getMemberDecl();
14915 }
14916
14917 if (ND)
14918 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
14919 PDiag(diag::note_array_declared_here) << ND);
14920 }
14921 return;
14922 }
14923
14924 if (index.isUnsigned() || !index.isNegative()) {
14925 // It is possible that the type of the base expression after
14926 // IgnoreParenCasts is incomplete, even though the type of the base
14927 // expression before IgnoreParenCasts is complete (see PR39746 for an
14928 // example). In this case we have no information about whether the array
14929 // access exceeds the array bounds. However we can still diagnose an array
14930 // access which precedes the array bounds.
14931 if (BaseType->isIncompleteType())
14932 return;
14933
14934 llvm::APInt size = ArrayTy->getSize();
14935 if (!size.isStrictlyPositive())
14936 return;
14937
14938 if (BaseType != EffectiveType) {
14939 // Make sure we're comparing apples to apples when comparing index to size
14940 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
14941 uint64_t array_typesize = Context.getTypeSize(BaseType);
14942 // Handle ptrarith_typesize being zero, such as when casting to void*
14943 if (!ptrarith_typesize) ptrarith_typesize = 1;
14944 if (ptrarith_typesize != array_typesize) {
14945 // There's a cast to a different size type involved
14946 uint64_t ratio = array_typesize / ptrarith_typesize;
14947 // TODO: Be smarter about handling cases where array_typesize is not a
14948 // multiple of ptrarith_typesize
14949 if (ptrarith_typesize * ratio == array_typesize)
14950 size *= llvm::APInt(size.getBitWidth(), ratio);
14951 }
14952 }
14953
14954 if (size.getBitWidth() > index.getBitWidth())
14955 index = index.zext(size.getBitWidth());
14956 else if (size.getBitWidth() < index.getBitWidth())
14957 size = size.zext(index.getBitWidth());
14958
14959 // For array subscripting the index must be less than size, but for pointer
14960 // arithmetic also allow the index (offset) to be equal to size since
14961 // computing the next address after the end of the array is legal and
14962 // commonly done e.g. in C++ iterators and range-based for loops.
14963 if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
14964 return;
14965
14966 // Also don't warn for arrays of size 1 which are members of some
14967 // structure. These are often used to approximate flexible arrays in C89
14968 // code.
14969 if (IsTailPaddedMemberArray(*this, size, ND))
14970 return;
14971
14972 // Suppress the warning if the subscript expression (as identified by the
14973 // ']' location) and the index expression are both from macro expansions
14974 // within a system header.
14975 if (ASE) {
14976 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
14977 ASE->getRBracketLoc());
14978 if (SourceMgr.isInSystemHeader(RBracketLoc)) {
14979 SourceLocation IndexLoc =
14980 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc());
14981 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc))
14982 return;
14983 }
14984 }
14985
14986 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds
14987 : diag::warn_ptr_arith_exceeds_bounds;
14988
14989 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
14990 PDiag(DiagID) << toString(index, 10, true)
14991 << toString(size, 10, true)
14992 << (unsigned)size.getLimitedValue(~0U)
14993 << IndexExpr->getSourceRange());
14994 } else {
14995 unsigned DiagID = diag::warn_array_index_precedes_bounds;
14996 if (!ASE) {
14997 DiagID = diag::warn_ptr_arith_precedes_bounds;
14998 if (index.isNegative()) index = -index;
14999 }
15000
15001 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
15002 PDiag(DiagID) << toString(index, 10, true)
15003 << IndexExpr->getSourceRange());
15004 }
15005
15006 if (!ND) {
15007 // Try harder to find a NamedDecl to point at in the note.
15008 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
15009 BaseExpr = ASE->getBase()->IgnoreParenCasts();
15010 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
15011 ND = DRE->getDecl();
15012 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
15013 ND = ME->getMemberDecl();
15014 }
15015
15016 if (ND)
15017 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
15018 PDiag(diag::note_array_declared_here) << ND);
15019}
15020
15021void Sema::CheckArrayAccess(const Expr *expr) {
15022 int AllowOnePastEnd = 0;
15023 while (expr) {
15024 expr = expr->IgnoreParenImpCasts();
15025 switch (expr->getStmtClass()) {
15026 case Stmt::ArraySubscriptExprClass: {
15027 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
15028 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
15029 AllowOnePastEnd > 0);
15030 expr = ASE->getBase();
15031 break;
15032 }
15033 case Stmt::MemberExprClass: {
15034 expr = cast<MemberExpr>(expr)->getBase();
15035 break;
15036 }
15037 case Stmt::OMPArraySectionExprClass: {
15038 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
15039 if (ASE->getLowerBound())
15040 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(),
15041 /*ASE=*/nullptr, AllowOnePastEnd > 0);
15042 return;
15043 }
15044 case Stmt::UnaryOperatorClass: {
15045 // Only unwrap the * and & unary operators
15046 const UnaryOperator *UO = cast<UnaryOperator>(expr);
15047 expr = UO->getSubExpr();
15048 switch (UO->getOpcode()) {
15049 case UO_AddrOf:
15050 AllowOnePastEnd++;
15051 break;
15052 case UO_Deref:
15053 AllowOnePastEnd--;
15054 break;
15055 default:
15056 return;
15057 }
15058 break;
15059 }
15060 case Stmt::ConditionalOperatorClass: {
15061 const ConditionalOperator *cond = cast<ConditionalOperator>(expr);
15062 if (const Expr *lhs = cond->getLHS())
15063 CheckArrayAccess(lhs);
15064 if (const Expr *rhs = cond->getRHS())
15065 CheckArrayAccess(rhs);
15066 return;
15067 }
15068 case Stmt::CXXOperatorCallExprClass: {
15069 const auto *OCE = cast<CXXOperatorCallExpr>(expr);
15070 for (const auto *Arg : OCE->arguments())
15071 CheckArrayAccess(Arg);
15072 return;
15073 }
15074 default:
15075 return;
15076 }
15077 }
15078}
15079
15080//===--- CHECK: Objective-C retain cycles ----------------------------------//
15081
15082namespace {
15083
15084struct RetainCycleOwner {
15085 VarDecl *Variable = nullptr;
15086 SourceRange Range;
15087 SourceLocation Loc;
15088 bool Indirect = false;
15089
15090 RetainCycleOwner() = default;
15091
15092 void setLocsFrom(Expr *e) {
15093 Loc = e->getExprLoc();
15094 Range = e->getSourceRange();
15095 }
15096};
15097
15098} // namespace
15099
15100/// Consider whether capturing the given variable can possibly lead to
15101/// a retain cycle.
15102static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
15103 // In ARC, it's captured strongly iff the variable has __strong
15104 // lifetime. In MRR, it's captured strongly if the variable is
15105 // __block and has an appropriate type.
15106 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
15107 return false;
15108
15109 owner.Variable = var;
15110 if (ref)
15111 owner.setLocsFrom(ref);
15112 return true;
15113}
15114
15115static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
15116 while (true) {
15117 e = e->IgnoreParens();
15118 if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
15119 switch (cast->getCastKind()) {
15120 case CK_BitCast:
15121 case CK_LValueBitCast:
15122 case CK_LValueToRValue:
15123 case CK_ARCReclaimReturnedObject:
15124 e = cast->getSubExpr();
15125 continue;
15126
15127 default:
15128 return false;
15129 }
15130 }
15131
15132 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
15133 ObjCIvarDecl *ivar = ref->getDecl();
15134 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
15135 return false;
15136
15137 // Try to find a retain cycle in the base.
15138 if (!findRetainCycleOwner(S, ref->getBase(), owner))
15139 return false;
15140
15141 if (ref->isFreeIvar()) owner.setLocsFrom(ref);
15142 owner.Indirect = true;
15143 return true;
15144 }
15145
15146 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
15147 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
15148 if (!var) return false;
15149 return considerVariable(var, ref, owner);
15150 }
15151
15152 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
15153 if (member->isArrow()) return false;
15154
15155 // Don't count this as an indirect ownership.
15156 e = member->getBase();
15157 continue;
15158 }
15159
15160 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
15161 // Only pay attention to pseudo-objects on property references.
15162 ObjCPropertyRefExpr *pre
15163 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
15164 ->IgnoreParens());
15165 if (!pre) return false;
15166 if (pre->isImplicitProperty()) return false;
15167 ObjCPropertyDecl *property = pre->getExplicitProperty();
15168 if (!property->isRetaining() &&
15169 !(property->getPropertyIvarDecl() &&
15170 property->getPropertyIvarDecl()->getType()
15171 .getObjCLifetime() == Qualifiers::OCL_Strong))
15172 return false;
15173
15174 owner.Indirect = true;
15175 if (pre->isSuperReceiver()) {
15176 owner.Variable = S.getCurMethodDecl()->getSelfDecl();
15177 if (!owner.Variable)
15178 return false;
15179 owner.Loc = pre->getLocation();
15180 owner.Range = pre->getSourceRange();
15181 return true;
15182 }
15183 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
15184 ->getSourceExpr());
15185 continue;
15186 }
15187
15188 // Array ivars?
15189
15190 return false;
15191 }
15192}
15193
15194namespace {
15195
15196 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
15197 ASTContext &Context;
15198 VarDecl *Variable;
15199 Expr *Capturer = nullptr;
15200 bool VarWillBeReased = false;
15201
15202 FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
15203 : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
15204 Context(Context), Variable(variable) {}
15205
15206 void VisitDeclRefExpr(DeclRefExpr *ref) {
15207 if (ref->getDecl() == Variable && !Capturer)
15208 Capturer = ref;
15209 }
15210
15211 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
15212 if (Capturer) return;
15213 Visit(ref->getBase());
15214 if (Capturer && ref->isFreeIvar())
15215 Capturer = ref;
15216 }
15217
15218 void VisitBlockExpr(BlockExpr *block) {
15219 // Look inside nested blocks
15220 if (block->getBlockDecl()->capturesVariable(Variable))
15221 Visit(block->getBlockDecl()->getBody());
15222 }
15223
15224 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
15225 if (Capturer) return;
15226 if (OVE->getSourceExpr())
15227 Visit(OVE->getSourceExpr());
15228 }
15229
15230 void VisitBinaryOperator(BinaryOperator *BinOp) {
15231 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
15232 return;
15233 Expr *LHS = BinOp->getLHS();
15234 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) {
15235 if (DRE->getDecl() != Variable)
15236 return;
15237 if (Expr *RHS = BinOp->getRHS()) {
15238 RHS = RHS->IgnoreParenCasts();
15239 Optional<llvm::APSInt> Value;
15240 VarWillBeReased =
15241 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) &&
15242 *Value == 0);
15243 }
15244 }
15245 }
15246 };
15247
15248} // namespace
15249
15250/// Check whether the given argument is a block which captures a
15251/// variable.
15252static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
15253 assert(owner.Variable && owner.Loc.isValid())((void)0);
15254
15255 e = e->IgnoreParenCasts();
15256
15257 // Look through [^{...} copy] and Block_copy(^{...}).
15258 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
15259 Selector Cmd = ME->getSelector();
15260 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
15261 e = ME->getInstanceReceiver();
15262 if (!e)
15263 return nullptr;
15264 e = e->IgnoreParenCasts();
15265 }
15266 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
15267 if (CE->getNumArgs() == 1) {
15268 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
15269 if (Fn) {
15270 const IdentifierInfo *FnI = Fn->getIdentifier();
15271 if (FnI && FnI->isStr("_Block_copy")) {
15272 e = CE->getArg(0)->IgnoreParenCasts();
15273 }
15274 }
15275 }
15276 }
15277
15278 BlockExpr *block = dyn_cast<BlockExpr>(e);
15279 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
15280 return nullptr;
15281
15282 FindCaptureVisitor visitor(S.Context, owner.Variable);
15283 visitor.Visit(block->getBlockDecl()->getBody());
15284 return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
15285}
15286
15287static void diagnoseRetainCycle(Sema &S, Expr *capturer,
15288 RetainCycleOwner &owner) {
15289 assert(capturer)((void)0);
15290 assert(owner.Variable && owner.Loc.isValid())((void)0);
15291
15292 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
15293 << owner.Variable << capturer->getSourceRange();
15294 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
15295 << owner.Indirect << owner.Range;
15296}
15297
15298/// Check for a keyword selector that starts with the word 'add' or
15299/// 'set'.
15300static bool isSetterLikeSelector(Selector sel) {
15301 if (sel.isUnarySelector()) return false;
15302
15303 StringRef str = sel.getNameForSlot(0);
15304 while (!str.empty() && str.front() == '_') str = str.substr(1);
15305 if (str.startswith("set"))
15306 str = str.substr(3);
15307 else if (str.startswith("add")) {
15308 // Specially allow 'addOperationWithBlock:'.
15309 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
15310 return false;
15311 str = str.substr(3);
15312 }
15313 else
15314 return false;
15315
15316 if (str.empty()) return true;
15317 return !isLowercase(str.front());
15318}
15319
15320static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
15321 ObjCMessageExpr *Message) {
15322 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
15323 Message->getReceiverInterface(),
15324 NSAPI::ClassId_NSMutableArray);
15325 if (!IsMutableArray) {
15326 return None;
15327 }
15328
15329 Selector Sel = Message->getSelector();
15330
15331 Optional<NSAPI::NSArrayMethodKind> MKOpt =
15332 S.NSAPIObj->getNSArrayMethodKind(Sel);
15333 if (!MKOpt) {
15334 return None;
15335 }
15336
15337 NSAPI::NSArrayMethodKind MK = *MKOpt;
15338
15339 switch (MK) {
15340 case NSAPI::NSMutableArr_addObject:
15341 case NSAPI::NSMutableArr_insertObjectAtIndex:
15342 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
15343 return 0;
15344 case NSAPI::NSMutableArr_replaceObjectAtIndex:
15345 return 1;
15346
15347 default:
15348 return None;
15349 }
15350
15351 return None;
15352}
15353
15354static
15355Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
15356 ObjCMessageExpr *Message) {
15357 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
15358 Message->getReceiverInterface(),
15359 NSAPI::ClassId_NSMutableDictionary);
15360 if (!IsMutableDictionary) {
15361 return None;
15362 }
15363
15364 Selector Sel = Message->getSelector();
15365
15366 Optional<NSAPI::NSDictionaryMethodKind> MKOpt =
15367 S.NSAPIObj->getNSDictionaryMethodKind(Sel);
15368 if (!MKOpt) {
15369 return None;
15370 }
15371
15372 NSAPI::NSDictionaryMethodKind MK = *MKOpt;
15373
15374 switch (MK) {
15375 case NSAPI::NSMutableDict_setObjectForKey:
15376 case NSAPI::NSMutableDict_setValueForKey:
15377 case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
15378 return 0;
15379
15380 default:
15381 return None;
15382 }
15383
15384 return None;
15385}
15386
15387static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
15388 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
15389 Message->getReceiverInterface(),
15390 NSAPI::ClassId_NSMutableSet);
15391
15392 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
15393 Message->getReceiverInterface(),
15394 NSAPI::ClassId_NSMutableOrderedSet);
15395 if (!IsMutableSet && !IsMutableOrderedSet) {
15396 return None;
15397 }
15398
15399 Selector Sel = Message->getSelector();
15400
15401 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel);
15402 if (!MKOpt) {
15403 return None;
15404 }
15405
15406 NSAPI::NSSetMethodKind MK = *MKOpt;
15407
15408 switch (MK) {
15409 case NSAPI::NSMutableSet_addObject:
15410 case NSAPI::NSOrderedSet_setObjectAtIndex:
15411 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
15412 case NSAPI::NSOrderedSet_insertObjectAtIndex:
15413 return 0;
15414 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
15415 return 1;
15416 }
15417
15418 return None;
15419}
15420
15421void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
15422 if (!Message->isInstanceMessage()) {
15423 return;
15424 }
15425
15426 Optional<int> ArgOpt;
15427
15428 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
15429 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
15430 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) {
15431 return;
15432 }
15433
15434 int ArgIndex = *ArgOpt;
15435
15436 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
15437 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
15438 Arg = OE->getSourceExpr()->IgnoreImpCasts();
15439 }
15440
15441 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
15442 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
15443 if (ArgRE->isObjCSelfExpr()) {
15444 Diag(Message->getSourceRange().getBegin(),
15445 diag::warn_objc_circular_container)
15446 << ArgRE->getDecl() << StringRef("'super'");
15447 }
15448 }
15449 } else {
15450 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
15451
15452 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
15453 Receiver = OE->getSourceExpr()->IgnoreImpCasts();
15454 }
15455
15456 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
15457 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
15458 if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
15459 ValueDecl *Decl = ReceiverRE->getDecl();
15460 Diag(Message->getSourceRange().getBegin(),
15461 diag::warn_objc_circular_container)
15462 << Decl << Decl;
15463 if (!ArgRE->isObjCSelfExpr()) {
15464 Diag(Decl->getLocation(),
15465 diag::note_objc_circular_container_declared_here)
15466 << Decl;
15467 }
15468 }
15469 }
15470 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
15471 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
15472 if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
15473 ObjCIvarDecl *Decl = IvarRE->getDecl();
15474 Diag(Message->getSourceRange().getBegin(),
15475 diag::warn_objc_circular_container)
15476 << Decl << Decl;
15477 Diag(Decl->getLocation(),
15478 diag::note_objc_circular_container_declared_here)
15479 << Decl;
15480 }
15481 }
15482 }
15483 }
15484}
15485
15486/// Check a message send to see if it's likely to cause a retain cycle.
15487void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
15488 // Only check instance methods whose selector looks like a setter.
15489 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
15490 return;
15491
15492 // Try to find a variable that the receiver is strongly owned by.
15493 RetainCycleOwner owner;
15494 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
15495 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
15496 return;
15497 } else {
15498 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance)((void)0);
15499 owner.Variable = getCurMethodDecl()->getSelfDecl();
15500 owner.Loc = msg->getSuperLoc();
15501 owner.Range = msg->getSuperLoc();
15502 }
15503
15504 // Check whether the receiver is captured by any of the arguments.
15505 const ObjCMethodDecl *MD = msg->getMethodDecl();
15506 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
15507 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) {
15508 // noescape blocks should not be retained by the method.
15509 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
15510 continue;
15511 return diagnoseRetainCycle(*this, capturer, owner);
15512 }
15513 }
15514}
15515
15516/// Check a property assign to see if it's likely to cause a retain cycle.
15517void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
15518 RetainCycleOwner owner;
15519 if (!findRetainCycleOwner(*this, receiver, owner))
15520 return;
15521
15522 if (Expr *capturer = findCapturingExpr(*this, argument, owner))
15523 diagnoseRetainCycle(*this, capturer, owner);
15524}
15525
15526void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
15527 RetainCycleOwner Owner;
15528 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner))
15529 return;
15530
15531 // Because we don't have an expression for the variable, we have to set the
15532 // location explicitly here.
15533 Owner.Loc = Var->getLocation();
15534 Owner.Range = Var->getSourceRange();
15535
15536 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner))
15537 diagnoseRetainCycle(*this, Capturer, Owner);
15538}
15539
15540static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
15541 Expr *RHS, bool isProperty) {
15542 // Check if RHS is an Objective-C object literal, which also can get
15543 // immediately zapped in a weak reference. Note that we explicitly
15544 // allow ObjCStringLiterals, since those are designed to never really die.
15545 RHS = RHS->IgnoreParenImpCasts();
15546
15547 // This enum needs to match with the 'select' in
15548 // warn_objc_arc_literal_assign (off-by-1).
15549 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS);
15550 if (Kind == Sema::LK_String || Kind == Sema::LK_None)
15551 return false;
15552
15553 S.Diag(Loc, diag::warn_arc_literal_assign)
15554 << (unsigned) Kind
15555 << (isProperty ? 0 : 1)
15556 << RHS->getSourceRange();
15557
15558 return true;
15559}
15560
15561static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc,
15562 Qualifiers::ObjCLifetime LT,
15563 Expr *RHS, bool isProperty) {
15564 // Strip off any implicit cast added to get to the one ARC-specific.
15565 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
15566 if (cast->getCastKind() == CK_ARCConsumeObject) {
15567 S.Diag(Loc, diag::warn_arc_retained_assign)
15568 << (LT == Qualifiers::OCL_ExplicitNone)
15569 << (isProperty ? 0 : 1)
15570 << RHS->getSourceRange();
15571 return true;
15572 }
15573 RHS = cast->getSubExpr();
15574 }
15575
15576 if (LT == Qualifiers::OCL_Weak &&
15577 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty))
15578 return true;
15579
15580 return false;
15581}
15582
15583bool Sema::checkUnsafeAssigns(SourceLocation Loc,
15584 QualType LHS, Expr *RHS) {
15585 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
15586
15587 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
15588 return false;
15589
15590 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false))
15591 return true;
15592
15593 return false;
15594}
15595
15596void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
15597 Expr *LHS, Expr *RHS) {
15598 QualType LHSType;
15599 // PropertyRef on LHS type need be directly obtained from
15600 // its declaration as it has a PseudoType.
15601 ObjCPropertyRefExpr *PRE
15602 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
15603 if (PRE && !PRE->isImplicitProperty()) {
15604 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
15605 if (PD)
15606 LHSType = PD->getType();
15607 }
15608
15609 if (LHSType.isNull())
15610 LHSType = LHS->getType();
15611
15612 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
15613
15614 if (LT == Qualifiers::OCL_Weak) {
15615 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
15616 getCurFunction()->markSafeWeakUse(LHS);
15617 }
15618
15619 if (checkUnsafeAssigns(Loc, LHSType, RHS))
15620 return;
15621
15622 // FIXME. Check for other life times.
15623 if (LT != Qualifiers::OCL_None)
15624 return;
15625
15626 if (PRE) {
15627 if (PRE->isImplicitProperty())
15628 return;
15629 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
15630 if (!PD)
15631 return;
15632
15633 unsigned Attributes = PD->getPropertyAttributes();
15634 if (Attributes & ObjCPropertyAttribute::kind_assign) {
15635 // when 'assign' attribute was not explicitly specified
15636 // by user, ignore it and rely on property type itself
15637 // for lifetime info.
15638 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
15639 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) &&
15640 LHSType->isObjCRetainableType())
15641 return;
15642
15643 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
15644 if (cast->getCastKind() == CK_ARCConsumeObject) {
15645 Diag(Loc, diag::warn_arc_retained_property_assign)
15646 << RHS->getSourceRange();
15647 return;
15648 }
15649 RHS = cast->getSubExpr();
15650 }
15651 } else if (Attributes & ObjCPropertyAttribute::kind_weak) {
15652 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true))
15653 return;
15654 }
15655 }
15656}
15657
15658//===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
15659
15660static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
15661 SourceLocation StmtLoc,
15662 const NullStmt *Body) {
15663 // Do not warn if the body is a macro that expands to nothing, e.g:
15664 //
15665 // #define CALL(x)
15666 // if (condition)
15667 // CALL(0);
15668 if (Body->hasLeadingEmptyMacro())
15669 return false;
15670
15671 // Get line numbers of statement and body.
15672 bool StmtLineInvalid;
15673 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc,
15674 &StmtLineInvalid);
15675 if (StmtLineInvalid)
15676 return false;
15677
15678 bool BodyLineInvalid;
15679 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(),
15680 &BodyLineInvalid);
15681 if (BodyLineInvalid)
15682 return false;
15683
15684 // Warn if null statement and body are on the same line.
15685 if (StmtLine != BodyLine)
15686 return false;
15687
15688 return true;
15689}
15690
15691void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
15692 const Stmt *Body,
15693 unsigned DiagID) {
15694 // Since this is a syntactic check, don't emit diagnostic for template
15695 // instantiations, this just adds noise.
15696 if (CurrentInstantiationScope)
15697 return;
15698
15699 // The body should be a null statement.
15700 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
15701 if (!NBody)
15702 return;
15703
15704 // Do the usual checks.
15705 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
15706 return;
15707
15708 Diag(NBody->getSemiLoc(), DiagID);
15709 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
15710}
15711
15712void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
15713 const Stmt *PossibleBody) {
15714 assert(!CurrentInstantiationScope)((void)0); // Ensured by caller
15715
15716 SourceLocation StmtLoc;
15717 const Stmt *Body;
15718 unsigned DiagID;
15719 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) {
15720 StmtLoc = FS->getRParenLoc();
15721 Body = FS->getBody();
15722 DiagID = diag::warn_empty_for_body;
15723 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
15724 StmtLoc = WS->getCond()->getSourceRange().getEnd();
15725 Body = WS->getBody();
15726 DiagID = diag::warn_empty_while_body;
15727 } else
15728 return; // Neither `for' nor `while'.
15729
15730 // The body should be a null statement.
15731 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
15732 if (!NBody)
15733 return;
15734
15735 // Skip expensive checks if diagnostic is disabled.
15736 if (Diags.isIgnored(DiagID, NBody->getSemiLoc()))
15737 return;
15738
15739 // Do the usual checks.
15740 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
15741 return;
15742
15743 // `for(...);' and `while(...);' are popular idioms, so in order to keep
15744 // noise level low, emit diagnostics only if for/while is followed by a
15745 // CompoundStmt, e.g.:
15746 // for (int i = 0; i < n; i++);
15747 // {
15748 // a(i);
15749 // }
15750 // or if for/while is followed by a statement with more indentation
15751 // than for/while itself:
15752 // for (int i = 0; i < n; i++);
15753 // a(i);
15754 bool ProbableTypo = isa<CompoundStmt>(PossibleBody);
15755 if (!ProbableTypo) {
15756 bool BodyColInvalid;
15757 unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
15758 PossibleBody->getBeginLoc(), &BodyColInvalid);
15759 if (BodyColInvalid)
15760 return;
15761
15762 bool StmtColInvalid;
15763 unsigned StmtCol =
15764 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid);
15765 if (StmtColInvalid)
15766 return;
15767
15768 if (BodyCol > StmtCol)
15769 ProbableTypo = true;
15770 }
15771
15772 if (ProbableTypo) {
15773 Diag(NBody->getSemiLoc(), DiagID);
15774 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
15775 }
15776}
15777
15778//===--- CHECK: Warn on self move with std::move. -------------------------===//
15779
15780/// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
15781void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
15782 SourceLocation OpLoc) {
15783 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
15784 return;
15785
15786 if (inTemplateInstantiation())
15787 return;
15788
15789 // Strip parens and casts away.
15790 LHSExpr = LHSExpr->IgnoreParenImpCasts();
15791 RHSExpr = RHSExpr->IgnoreParenImpCasts();
15792
15793 // Check for a call expression
15794 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr);
15795 if (!CE || CE->getNumArgs() != 1)
15796 return;
15797
15798 // Check for a call to std::move
15799 if (!CE->isCallToStdMove())
15800 return;
15801
15802 // Get argument from std::move
15803 RHSExpr = CE->getArg(0);
15804
15805 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
15806 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
15807
15808 // Two DeclRefExpr's, check that the decls are the same.
15809 if (LHSDeclRef && RHSDeclRef) {
15810 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
15811 return;
15812 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
15813 RHSDeclRef->getDecl()->getCanonicalDecl())
15814 return;
15815
15816 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
15817 << LHSExpr->getSourceRange()
15818 << RHSExpr->getSourceRange();
15819 return;
15820 }
15821
15822 // Member variables require a different approach to check for self moves.
15823 // MemberExpr's are the same if every nested MemberExpr refers to the same
15824 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
15825 // the base Expr's are CXXThisExpr's.
15826 const Expr *LHSBase = LHSExpr;
15827 const Expr *RHSBase = RHSExpr;
15828 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr);
15829 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr);
15830 if (!LHSME || !RHSME)
15831 return;
15832
15833 while (LHSME && RHSME) {
15834 if (LHSME->getMemberDecl()->getCanonicalDecl() !=
15835 RHSME->getMemberDecl()->getCanonicalDecl())
15836 return;
15837
15838 LHSBase = LHSME->getBase();
15839 RHSBase = RHSME->getBase();
15840 LHSME = dyn_cast<MemberExpr>(LHSBase);
15841 RHSME = dyn_cast<MemberExpr>(RHSBase);
15842 }
15843
15844 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase);
15845 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase);
15846 if (LHSDeclRef && RHSDeclRef) {
15847 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
15848 return;
15849 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
15850 RHSDeclRef->getDecl()->getCanonicalDecl())
15851 return;
15852
15853 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
15854 << LHSExpr->getSourceRange()
15855 << RHSExpr->getSourceRange();
15856 return;
15857 }
15858
15859 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
15860 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
15861 << LHSExpr->getSourceRange()
15862 << RHSExpr->getSourceRange();
15863}
15864
15865//===--- Layout compatibility ----------------------------------------------//
15866
15867static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
15868
15869/// Check if two enumeration types are layout-compatible.
15870static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
15871 // C++11 [dcl.enum] p8:
15872 // Two enumeration types are layout-compatible if they have the same
15873 // underlying type.
15874 return ED1->isComplete() && ED2->isComplete() &&
15875 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType());
15876}
15877
15878/// Check if two fields are layout-compatible.
15879static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
15880 FieldDecl *Field2) {
15881 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
15882 return false;
15883
15884 if (Field1->isBitField() != Field2->isBitField())
15885 return false;
15886
15887 if (Field1->isBitField()) {
15888 // Make sure that the bit-fields are the same length.
15889 unsigned Bits1 = Field1->getBitWidthValue(C);
15890 unsigned Bits2 = Field2->getBitWidthValue(C);
15891
15892 if (Bits1 != Bits2)
15893 return false;
15894 }
15895
15896 return true;
15897}
15898
15899/// Check if two standard-layout structs are layout-compatible.
15900/// (C++11 [class.mem] p17)
15901static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
15902 RecordDecl *RD2) {
15903 // If both records are C++ classes, check that base classes match.
15904 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) {
15905 // If one of records is a CXXRecordDecl we are in C++ mode,
15906 // thus the other one is a CXXRecordDecl, too.
15907 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2);
15908 // Check number of base classes.
15909 if (D1CXX->getNumBases() != D2CXX->getNumBases())
15910 return false;
15911
15912 // Check the base classes.
15913 for (CXXRecordDecl::base_class_const_iterator
15914 Base1 = D1CXX->bases_begin(),
15915 BaseEnd1 = D1CXX->bases_end(),
15916 Base2 = D2CXX->bases_begin();
15917 Base1 != BaseEnd1;
15918 ++Base1, ++Base2) {
15919 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType()))
15920 return false;
15921 }
15922 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) {
15923 // If only RD2 is a C++ class, it should have zero base classes.
15924 if (D2CXX->getNumBases() > 0)
15925 return false;
15926 }
15927
15928 // Check the fields.
15929 RecordDecl::field_iterator Field2 = RD2->field_begin(),
15930 Field2End = RD2->field_end(),
15931 Field1 = RD1->field_begin(),
15932 Field1End = RD1->field_end();
15933 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
15934 if (!isLayoutCompatible(C, *Field1, *Field2))
15935 return false;
15936 }
15937 if (Field1 != Field1End || Field2 != Field2End)
15938 return false;
15939
15940 return true;
15941}
15942
15943/// Check if two standard-layout unions are layout-compatible.
15944/// (C++11 [class.mem] p18)
15945static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
15946 RecordDecl *RD2) {
15947 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
15948 for (auto *Field2 : RD2->fields())
15949 UnmatchedFields.insert(Field2);
15950
15951 for (auto *Field1 : RD1->fields()) {
15952 llvm::SmallPtrSet<FieldDecl *, 8>::iterator
15953 I = UnmatchedFields.begin(),
15954 E = UnmatchedFields.end();
15955
15956 for ( ; I != E; ++I) {
15957 if (isLayoutCompatible(C, Field1, *I)) {
15958 bool Result = UnmatchedFields.erase(*I);
15959 (void) Result;
15960 assert(Result)((void)0);
15961 break;
15962 }
15963 }
15964 if (I == E)
15965 return false;
15966 }
15967
15968 return UnmatchedFields.empty();
15969}
15970
15971static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
15972 RecordDecl *RD2) {
15973 if (RD1->isUnion() != RD2->isUnion())
15974 return false;
15975
15976 if (RD1->isUnion())
15977 return isLayoutCompatibleUnion(C, RD1, RD2);
15978 else
15979 return isLayoutCompatibleStruct(C, RD1, RD2);
15980}
15981
15982/// Check if two types are layout-compatible in C++11 sense.
15983static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
15984 if (T1.isNull() || T2.isNull())
15985 return false;
15986
15987 // C++11 [basic.types] p11:
15988 // If two types T1 and T2 are the same type, then T1 and T2 are
15989 // layout-compatible types.
15990 if (C.hasSameType(T1, T2))
15991 return true;
15992
15993 T1 = T1.getCanonicalType().getUnqualifiedType();
15994 T2 = T2.getCanonicalType().getUnqualifiedType();
15995
15996 const Type::TypeClass TC1 = T1->getTypeClass();
15997 const Type::TypeClass TC2 = T2->getTypeClass();
15998
15999 if (TC1 != TC2)
16000 return false;
16001
16002 if (TC1 == Type::Enum) {
16003 return isLayoutCompatible(C,
16004 cast<EnumType>(T1)->getDecl(),
16005 cast<EnumType>(T2)->getDecl());
16006 } else if (TC1 == Type::Record) {
16007 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
16008 return false;
16009
16010 return isLayoutCompatible(C,
16011 cast<RecordType>(T1)->getDecl(),
16012 cast<RecordType>(T2)->getDecl());
16013 }
16014
16015 return false;
16016}
16017
16018//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
16019
16020/// Given a type tag expression find the type tag itself.
16021///
16022/// \param TypeExpr Type tag expression, as it appears in user's code.
16023///
16024/// \param VD Declaration of an identifier that appears in a type tag.
16025///
16026/// \param MagicValue Type tag magic value.
16027///
16028/// \param isConstantEvaluated wether the evalaution should be performed in
16029
16030/// constant context.
16031static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
16032 const ValueDecl **VD, uint64_t *MagicValue,
16033 bool isConstantEvaluated) {
16034 while(true) {
16035 if (!TypeExpr)
16036 return false;
16037
16038 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
16039
16040 switch (TypeExpr->getStmtClass()) {
16041 case Stmt::UnaryOperatorClass: {
16042 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr);
16043 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
16044 TypeExpr = UO->getSubExpr();
16045 continue;
16046 }
16047 return false;
16048 }
16049
16050 case Stmt::DeclRefExprClass: {
16051 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr);
16052 *VD = DRE->getDecl();
16053 return true;
16054 }
16055
16056 case Stmt::IntegerLiteralClass: {
16057 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr);
16058 llvm::APInt MagicValueAPInt = IL->getValue();
16059 if (MagicValueAPInt.getActiveBits() <= 64) {
16060 *MagicValue = MagicValueAPInt.getZExtValue();
16061 return true;
16062 } else
16063 return false;
16064 }
16065
16066 case Stmt::BinaryConditionalOperatorClass:
16067 case Stmt::ConditionalOperatorClass: {
16068 const AbstractConditionalOperator *ACO =
16069 cast<AbstractConditionalOperator>(TypeExpr);
16070 bool Result;
16071 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx,
16072 isConstantEvaluated)) {
16073 if (Result)
16074 TypeExpr = ACO->getTrueExpr();
16075 else
16076 TypeExpr = ACO->getFalseExpr();
16077 continue;
16078 }
16079 return false;
16080 }
16081
16082 case Stmt::BinaryOperatorClass: {
16083 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr);
16084 if (BO->getOpcode() == BO_Comma) {
16085 TypeExpr = BO->getRHS();
16086 continue;
16087 }
16088 return false;
16089 }
16090
16091 default:
16092 return false;
16093 }
16094 }
16095}
16096
16097/// Retrieve the C type corresponding to type tag TypeExpr.
16098///
16099/// \param TypeExpr Expression that specifies a type tag.
16100///
16101/// \param MagicValues Registered magic values.
16102///
16103/// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
16104/// kind.
16105///
16106/// \param TypeInfo Information about the corresponding C type.
16107///
16108/// \param isConstantEvaluated wether the evalaution should be performed in
16109/// constant context.
16110///
16111/// \returns true if the corresponding C type was found.
16112static bool GetMatchingCType(
16113 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr,
16114 const ASTContext &Ctx,
16115 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData>
16116 *MagicValues,
16117 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo,
16118 bool isConstantEvaluated) {
16119 FoundWrongKind = false;
16120
16121 // Variable declaration that has type_tag_for_datatype attribute.
16122 const ValueDecl *VD = nullptr;
16123
16124 uint64_t MagicValue;
16125
16126 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated))
16127 return false;
16128
16129 if (VD) {
16130 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
16131 if (I->getArgumentKind() != ArgumentKind) {
16132 FoundWrongKind = true;
16133 return false;
16134 }
16135 TypeInfo.Type = I->getMatchingCType();
16136 TypeInfo.LayoutCompatible = I->getLayoutCompatible();
16137 TypeInfo.MustBeNull = I->getMustBeNull();
16138 return true;
16139 }
16140 return false;
16141 }
16142
16143 if (!MagicValues)
16144 return false;
16145
16146 llvm::DenseMap<Sema::TypeTagMagicValue,
16147 Sema::TypeTagData>::const_iterator I =
16148 MagicValues->find(std::make_pair(ArgumentKind, MagicValue));
16149 if (I == MagicValues->end())
16150 return false;
16151
16152 TypeInfo = I->second;
16153 return true;
16154}
16155
16156void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
16157 uint64_t MagicValue, QualType Type,
16158 bool LayoutCompatible,
16159 bool MustBeNull) {
16160 if (!TypeTagForDatatypeMagicValues)
16161 TypeTagForDatatypeMagicValues.reset(
16162 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
16163
16164 TypeTagMagicValue Magic(ArgumentKind, MagicValue);
16165 (*TypeTagForDatatypeMagicValues)[Magic] =
16166 TypeTagData(Type, LayoutCompatible, MustBeNull);
16167}
16168
16169static bool IsSameCharType(QualType T1, QualType T2) {
16170 const BuiltinType *BT1 = T1->getAs<BuiltinType>();
16171 if (!BT1)
16172 return false;
16173
16174 const BuiltinType *BT2 = T2->getAs<BuiltinType>();
16175 if (!BT2)
16176 return false;
16177
16178 BuiltinType::Kind T1Kind = BT1->getKind();
16179 BuiltinType::Kind T2Kind = BT2->getKind();
16180
16181 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
16182 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
16183 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
16184 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
16185}
16186
16187void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
16188 const ArrayRef<const Expr *> ExprArgs,
16189 SourceLocation CallSiteLoc) {
16190 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
16191 bool IsPointerAttr = Attr->getIsPointer();
16192
16193 // Retrieve the argument representing the 'type_tag'.
16194 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex();
16195 if (TypeTagIdxAST >= ExprArgs.size()) {
16196 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
16197 << 0 << Attr->getTypeTagIdx().getSourceIndex();
16198 return;
16199 }
16200 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST];
16201 bool FoundWrongKind;
16202 TypeTagData TypeInfo;
16203 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
16204 TypeTagForDatatypeMagicValues.get(), FoundWrongKind,
16205 TypeInfo, isConstantEvaluated())) {
16206 if (FoundWrongKind)
16207 Diag(TypeTagExpr->getExprLoc(),
16208 diag::warn_type_tag_for_datatype_wrong_kind)
16209 << TypeTagExpr->getSourceRange();
16210 return;
16211 }
16212
16213 // Retrieve the argument representing the 'arg_idx'.
16214 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex();
16215 if (ArgumentIdxAST >= ExprArgs.size()) {
16216 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
16217 << 1 << Attr->getArgumentIdx().getSourceIndex();
16218 return;
16219 }
16220 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST];
16221 if (IsPointerAttr) {
16222 // Skip implicit cast of pointer to `void *' (as a function argument).
16223 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
16224 if (ICE->getType()->isVoidPointerType() &&
16225 ICE->getCastKind() == CK_BitCast)
16226 ArgumentExpr = ICE->getSubExpr();
16227 }
16228 QualType ArgumentType = ArgumentExpr->getType();
16229
16230 // Passing a `void*' pointer shouldn't trigger a warning.
16231 if (IsPointerAttr && ArgumentType->isVoidPointerType())
16232 return;
16233
16234 if (TypeInfo.MustBeNull) {
16235 // Type tag with matching void type requires a null pointer.
16236 if (!ArgumentExpr->isNullPointerConstant(Context,
16237 Expr::NPC_ValueDependentIsNotNull)) {
16238 Diag(ArgumentExpr->getExprLoc(),
16239 diag::warn_type_safety_null_pointer_required)
16240 << ArgumentKind->getName()
16241 << ArgumentExpr->getSourceRange()
16242 << TypeTagExpr->getSourceRange();
16243 }
16244 return;
16245 }
16246
16247 QualType RequiredType = TypeInfo.Type;
16248 if (IsPointerAttr)
16249 RequiredType = Context.getPointerType(RequiredType);
16250
16251 bool mismatch = false;
16252 if (!TypeInfo.LayoutCompatible) {
16253 mismatch = !Context.hasSameType(ArgumentType, RequiredType);
16254
16255 // C++11 [basic.fundamental] p1:
16256 // Plain char, signed char, and unsigned char are three distinct types.
16257 //
16258 // But we treat plain `char' as equivalent to `signed char' or `unsigned
16259 // char' depending on the current char signedness mode.
16260 if (mismatch)
16261 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(),
16262 RequiredType->getPointeeType())) ||
16263 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType)))
16264 mismatch = false;
16265 } else
16266 if (IsPointerAttr)
16267 mismatch = !isLayoutCompatible(Context,
16268 ArgumentType->getPointeeType(),
16269 RequiredType->getPointeeType());
16270 else
16271 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType);
16272
16273 if (mismatch)
16274 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
16275 << ArgumentType << ArgumentKind
16276 << TypeInfo.LayoutCompatible << RequiredType
16277 << ArgumentExpr->getSourceRange()
16278 << TypeTagExpr->getSourceRange();
16279}
16280
16281void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
16282 CharUnits Alignment) {
16283 MisalignedMembers.emplace_back(E, RD, MD, Alignment);
16284}
16285
16286void Sema::DiagnoseMisalignedMembers() {
16287 for (MisalignedMember &m : MisalignedMembers) {
16288 const NamedDecl *ND = m.RD;
16289 if (ND->getName().empty()) {
16290 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
16291 ND = TD;
16292 }
16293 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member)
16294 << m.MD << ND << m.E->getSourceRange();
16295 }
16296 MisalignedMembers.clear();
16297}
16298
16299void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
16300 E = E->IgnoreParens();
16301 if (!T->isPointerType() && !T->isIntegerType())
16302 return;
16303 if (isa<UnaryOperator>(E) &&
16304 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
16305 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
16306 if (isa<MemberExpr>(Op)) {
16307 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op));
16308 if (MA != MisalignedMembers.end() &&
16309 (T->isIntegerType() ||
16310 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
16311 Context.getTypeAlignInChars(
16312 T->getPointeeType()) <= MA->Alignment))))
16313 MisalignedMembers.erase(MA);
16314 }
16315 }
16316}
16317
16318void Sema::RefersToMemberWithReducedAlignment(
16319 Expr *E,
16320 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
16321 Action) {
16322 const auto *ME = dyn_cast<MemberExpr>(E);
16323 if (!ME)
16324 return;
16325
16326 // No need to check expressions with an __unaligned-qualified type.
16327 if (E->getType().getQualifiers().hasUnaligned())
16328 return;
16329
16330 // For a chain of MemberExpr like "a.b.c.d" this list
16331 // will keep FieldDecl's like [d, c, b].
16332 SmallVector<FieldDecl *, 4> ReverseMemberChain;
16333 const MemberExpr *TopME = nullptr;
16334 bool AnyIsPacked = false;
16335 do {
16336 QualType BaseType = ME->getBase()->getType();
16337 if (BaseType->isDependentType())
16338 return;
16339 if (ME->isArrow())
16340 BaseType = BaseType->getPointeeType();
16341 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl();
16342 if (RD->isInvalidDecl())
16343 return;
16344
16345 ValueDecl *MD = ME->getMemberDecl();
16346 auto *FD = dyn_cast<FieldDecl>(MD);
16347 // We do not care about non-data members.
16348 if (!FD || FD->isInvalidDecl())
16349 return;
16350
16351 AnyIsPacked =
16352 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>());
16353 ReverseMemberChain.push_back(FD);
16354
16355 TopME = ME;
16356 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens());
16357 } while (ME);
16358 assert(TopME && "We did not compute a topmost MemberExpr!")((void)0);
16359
16360 // Not the scope of this diagnostic.
16361 if (!AnyIsPacked)
16362 return;
16363
16364 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts();
16365 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase);
16366 // TODO: The innermost base of the member expression may be too complicated.
16367 // For now, just disregard these cases. This is left for future
16368 // improvement.
16369 if (!DRE && !isa<CXXThisExpr>(TopBase))
16370 return;
16371
16372 // Alignment expected by the whole expression.
16373 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType());
16374
16375 // No need to do anything else with this case.
16376 if (ExpectedAlignment.isOne())
16377 return;
16378
16379 // Synthesize offset of the whole access.
16380 CharUnits Offset;
16381 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend();
16382 I++) {
16383 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I));
16384 }
16385
16386 // Compute the CompleteObjectAlignment as the alignment of the whole chain.
16387 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
16388 ReverseMemberChain.back()->getParent()->getTypeForDecl());
16389
16390 // The base expression of the innermost MemberExpr may give
16391 // stronger guarantees than the class containing the member.
16392 if (DRE && !TopME->isArrow()) {
16393 const ValueDecl *VD = DRE->getDecl();
16394 if (!VD->getType()->isReferenceType())
16395 CompleteObjectAlignment =
16396 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD));
16397 }
16398
16399 // Check if the synthesized offset fulfills the alignment.
16400 if (Offset % ExpectedAlignment != 0 ||
16401 // It may fulfill the offset it but the effective alignment may still be
16402 // lower than the expected expression alignment.
16403 CompleteObjectAlignment < ExpectedAlignment) {
16404 // If this happens, we want to determine a sensible culprit of this.
16405 // Intuitively, watching the chain of member expressions from right to
16406 // left, we start with the required alignment (as required by the field
16407 // type) but some packed attribute in that chain has reduced the alignment.
16408 // It may happen that another packed structure increases it again. But if
16409 // we are here such increase has not been enough. So pointing the first
16410 // FieldDecl that either is packed or else its RecordDecl is,
16411 // seems reasonable.
16412 FieldDecl *FD = nullptr;
16413 CharUnits Alignment;
16414 for (FieldDecl *FDI : ReverseMemberChain) {
16415 if (FDI->hasAttr<PackedAttr>() ||
16416 FDI->getParent()->hasAttr<PackedAttr>()) {
16417 FD = FDI;
16418 Alignment = std::min(
16419 Context.getTypeAlignInChars(FD->getType()),
16420 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
16421 break;
16422 }
16423 }
16424 assert(FD && "We did not find a packed FieldDecl!")((void)0);
16425 Action(E, FD->getParent(), FD, Alignment);
16426 }
16427}
16428
16429void Sema::CheckAddressOfPackedMember(Expr *rhs) {
16430 using namespace std::placeholders;
16431
16432 RefersToMemberWithReducedAlignment(
16433 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1,
16434 _2, _3, _4));
16435}
16436
16437ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
16438 ExprResult CallResult) {
16439 if (checkArgCount(*this, TheCall, 1))
16440 return ExprError();
16441
16442 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0));
16443 if (MatrixArg.isInvalid())
16444 return MatrixArg;
16445 Expr *Matrix = MatrixArg.get();
16446
16447 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>();
16448 if (!MType) {
16449 Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg);
16450 return ExprError();
16451 }
16452
16453 // Create returned matrix type by swapping rows and columns of the argument
16454 // matrix type.
16455 QualType ResultType = Context.getConstantMatrixType(
16456 MType->getElementType(), MType->getNumColumns(), MType->getNumRows());
16457
16458 // Change the return type to the type of the returned matrix.
16459 TheCall->setType(ResultType);
16460
16461 // Update call argument to use the possibly converted matrix argument.
16462 TheCall->setArg(0, Matrix);
16463 return CallResult;
16464}
16465
16466// Get and verify the matrix dimensions.
16467static llvm::Optional<unsigned>
16468getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
16469 SourceLocation ErrorPos;
16470 Optional<llvm::APSInt> Value =
16471 Expr->getIntegerConstantExpr(S.Context, &ErrorPos);
16472 if (!Value) {
16473 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
16474 << Name;
16475 return {};
16476 }
16477 uint64_t Dim = Value->getZExtValue();
16478 if (!ConstantMatrixType::isDimensionValid(Dim)) {
16479 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension)
16480 << Name << ConstantMatrixType::getMaxElementsPerDimension();
16481 return {};
16482 }
16483 return Dim;
16484}
16485
16486ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
16487 ExprResult CallResult) {
16488 if (!getLangOpts().MatrixTypes) {
16489 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled);
16490 return ExprError();
16491 }
16492
16493 if (checkArgCount(*this, TheCall, 4))
16494 return ExprError();
16495
16496 unsigned PtrArgIdx = 0;
16497 Expr *PtrExpr = TheCall->getArg(PtrArgIdx);
16498 Expr *RowsExpr = TheCall->getArg(1);
16499 Expr *ColumnsExpr = TheCall->getArg(2);
16500 Expr *StrideExpr = TheCall->getArg(3);
16501
16502 bool ArgError = false;
16503
16504 // Check pointer argument.
16505 {
16506 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr);
16507 if (PtrConv.isInvalid())
16508 return PtrConv;
16509 PtrExpr = PtrConv.get();
16510 TheCall->setArg(0, PtrExpr);
16511 if (PtrExpr->isTypeDependent()) {
16512 TheCall->setType(Context.DependentTy);
16513 return TheCall;
16514 }
16515 }
16516
16517 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
16518 QualType ElementTy;
16519 if (!PtrTy) {
16520 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
16521 << PtrArgIdx + 1;
16522 ArgError = true;
16523 } else {
16524 ElementTy = PtrTy->getPointeeType().getUnqualifiedType();
16525
16526 if (!ConstantMatrixType::isValidElementType(ElementTy)) {
16527 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
16528 << PtrArgIdx + 1;
16529 ArgError = true;
16530 }
16531 }
16532
16533 // Apply default Lvalue conversions and convert the expression to size_t.
16534 auto ApplyArgumentConversions = [this](Expr *E) {
16535 ExprResult Conv = DefaultLvalueConversion(E);
16536 if (Conv.isInvalid())
16537 return Conv;
16538
16539 return tryConvertExprToType(Conv.get(), Context.getSizeType());
16540 };
16541
16542 // Apply conversion to row and column expressions.
16543 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr);
16544 if (!RowsConv.isInvalid()) {
16545 RowsExpr = RowsConv.get();
16546 TheCall->setArg(1, RowsExpr);
16547 } else
16548 RowsExpr = nullptr;
16549
16550 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr);
16551 if (!ColumnsConv.isInvalid()) {
16552 ColumnsExpr = ColumnsConv.get();
16553 TheCall->setArg(2, ColumnsExpr);
16554 } else
16555 ColumnsExpr = nullptr;
16556
16557 // If any any part of the result matrix type is still pending, just use
16558 // Context.DependentTy, until all parts are resolved.
16559 if ((RowsExpr && RowsExpr->isTypeDependent()) ||
16560 (ColumnsExpr && ColumnsExpr->isTypeDependent())) {
16561 TheCall->setType(Context.DependentTy);
16562 return CallResult;
16563 }
16564
16565 // Check row and column dimenions.
16566 llvm::Optional<unsigned> MaybeRows;
16567 if (RowsExpr)
16568 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this);
16569
16570 llvm::Optional<unsigned> MaybeColumns;
16571 if (ColumnsExpr)
16572 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this);
16573
16574 // Check stride argument.
16575 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr);
16576 if (StrideConv.isInvalid())
16577 return ExprError();
16578 StrideExpr = StrideConv.get();
16579 TheCall->setArg(3, StrideExpr);
16580
16581 if (MaybeRows) {
16582 if (Optional<llvm::APSInt> Value =
16583 StrideExpr->getIntegerConstantExpr(Context)) {
16584 uint64_t Stride = Value->getZExtValue();
16585 if (Stride < *MaybeRows) {
16586 Diag(StrideExpr->getBeginLoc(),
16587 diag::err_builtin_matrix_stride_too_small);
16588 ArgError = true;
16589 }
16590 }
16591 }
16592
16593 if (ArgError || !MaybeRows || !MaybeColumns)
16594 return ExprError();
16595
16596 TheCall->setType(
16597 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns));
16598 return CallResult;
16599}
16600
16601ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
16602 ExprResult CallResult) {
16603 if (checkArgCount(*this, TheCall, 3))
16604 return ExprError();
16605
16606 unsigned PtrArgIdx = 1;
16607 Expr *MatrixExpr = TheCall->getArg(0);
16608 Expr *PtrExpr = TheCall->getArg(PtrArgIdx);
16609 Expr *StrideExpr = TheCall->getArg(2);
16610
16611 bool ArgError = false;
16612
16613 {
16614 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr);
16615 if (MatrixConv.isInvalid())
16616 return MatrixConv;
16617 MatrixExpr = MatrixConv.get();
16618 TheCall->setArg(0, MatrixExpr);
16619 }
16620 if (MatrixExpr->isTypeDependent()) {
16621 TheCall->setType(Context.DependentTy);
16622 return TheCall;
16623 }
16624
16625 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>();
16626 if (!MatrixTy) {
16627 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0;
16628 ArgError = true;
16629 }
16630
16631 {
16632 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr);
16633 if (PtrConv.isInvalid())
16634 return PtrConv;
16635 PtrExpr = PtrConv.get();
16636 TheCall->setArg(1, PtrExpr);
16637 if (PtrExpr->isTypeDependent()) {
16638 TheCall->setType(Context.DependentTy);
16639 return TheCall;
16640 }
16641 }
16642
16643 // Check pointer argument.
16644 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
16645 if (!PtrTy) {
16646 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
16647 << PtrArgIdx + 1;
16648 ArgError = true;
16649 } else {
16650 QualType ElementTy = PtrTy->getPointeeType();
16651 if (ElementTy.isConstQualified()) {
16652 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const);
16653 ArgError = true;
16654 }
16655 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType();
16656 if (MatrixTy &&
16657 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) {
16658 Diag(PtrExpr->getBeginLoc(),
16659 diag::err_builtin_matrix_pointer_arg_mismatch)
16660 << ElementTy << MatrixTy->getElementType();
16661 ArgError = true;
16662 }
16663 }
16664
16665 // Apply default Lvalue conversions and convert the stride expression to
16666 // size_t.
16667 {
16668 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr);
16669 if (StrideConv.isInvalid())
16670 return StrideConv;
16671
16672 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType());
16673 if (StrideConv.isInvalid())
16674 return StrideConv;
16675 StrideExpr = StrideConv.get();
16676 TheCall->setArg(2, StrideExpr);
16677 }
16678
16679 // Check stride argument.
16680 if (MatrixTy) {
16681 if (Optional<llvm::APSInt> Value =
16682 StrideExpr->getIntegerConstantExpr(Context)) {
16683 uint64_t Stride = Value->getZExtValue();
16684 if (Stride < MatrixTy->getNumRows()) {
16685 Diag(StrideExpr->getBeginLoc(),
16686 diag::err_builtin_matrix_stride_too_small);
16687 ArgError = true;
16688 }
16689 }
16690 }
16691
16692 if (ArgError)
16693 return ExprError();
16694
16695 return CallResult;
16696}
16697
16698/// \brief Enforce the bounds of a TCB
16699/// CheckTCBEnforcement - Enforces that every function in a named TCB only
16700/// directly calls other functions in the same TCB as marked by the enforce_tcb
16701/// and enforce_tcb_leaf attributes.
16702void Sema::CheckTCBEnforcement(const CallExpr *TheCall,
16703 const FunctionDecl *Callee) {
16704 const FunctionDecl *Caller = getCurFunctionDecl();
16705
16706 // Calls to builtins are not enforced.
16707 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() ||
16708 Callee->getBuiltinID() != 0)
16709 return;
16710
16711 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find
16712 // all TCBs the callee is a part of.
16713 llvm::StringSet<> CalleeTCBs;
16714 for_each(Callee->specific_attrs<EnforceTCBAttr>(),
16715 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); });
16716 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(),
16717 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); });
16718
16719 // Go through the TCBs the caller is a part of and emit warnings if Caller
16720 // is in a TCB that the Callee is not.
16721 for_each(
16722 Caller->specific_attrs<EnforceTCBAttr>(),
16723 [&](const auto *A) {
16724 StringRef CallerTCB = A->getTCBName();
16725 if (CalleeTCBs.count(CallerTCB) == 0) {
16726 this->Diag(TheCall->getExprLoc(),
16727 diag::warn_tcb_enforcement_violation) << Callee
16728 << CallerTCB;
16729 }
16730 });
16731}