1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements extra semantic analysis beyond what is enforced
10// by the C type system.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/APValue.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/AttrIterator.h"
18#include "clang/AST/CharUnits.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclBase.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/DeclarationName.h"
24#include "clang/AST/EvaluatedExprVisitor.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
28#include "clang/AST/ExprOpenMP.h"
29#include "clang/AST/FormatString.h"
30#include "clang/AST/IgnoreExpr.h"
31#include "clang/AST/NSAPI.h"
32#include "clang/AST/NonTrivialTypeVisitor.h"
33#include "clang/AST/OperationKinds.h"
34#include "clang/AST/RecordLayout.h"
35#include "clang/AST/Stmt.h"
36#include "clang/AST/TemplateBase.h"
37#include "clang/AST/Type.h"
38#include "clang/AST/TypeLoc.h"
39#include "clang/AST/UnresolvedSet.h"
40#include "clang/Basic/AddressSpaces.h"
41#include "clang/Basic/CharInfo.h"
42#include "clang/Basic/Diagnostic.h"
43#include "clang/Basic/IdentifierTable.h"
44#include "clang/Basic/LLVM.h"
45#include "clang/Basic/LangOptions.h"
46#include "clang/Basic/OpenCLOptions.h"
47#include "clang/Basic/OperatorKinds.h"
48#include "clang/Basic/PartialDiagnostic.h"
49#include "clang/Basic/SourceLocation.h"
50#include "clang/Basic/SourceManager.h"
51#include "clang/Basic/Specifiers.h"
52#include "clang/Basic/SyncScope.h"
53#include "clang/Basic/TargetBuiltins.h"
54#include "clang/Basic/TargetCXXABI.h"
55#include "clang/Basic/TargetInfo.h"
56#include "clang/Basic/TypeTraits.h"
57#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
58#include "clang/Sema/Initialization.h"
59#include "clang/Sema/Lookup.h"
60#include "clang/Sema/Ownership.h"
61#include "clang/Sema/Scope.h"
62#include "clang/Sema/ScopeInfo.h"
63#include "clang/Sema/Sema.h"
64#include "clang/Sema/SemaInternal.h"
65#include "llvm/ADT/APFloat.h"
66#include "llvm/ADT/APInt.h"
67#include "llvm/ADT/APSInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
70#include "llvm/ADT/FoldingSet.h"
71#include "llvm/ADT/STLExtras.h"
72#include "llvm/ADT/SmallBitVector.h"
73#include "llvm/ADT/SmallPtrSet.h"
74#include "llvm/ADT/SmallString.h"
75#include "llvm/ADT/SmallVector.h"
76#include "llvm/ADT/StringExtras.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/StringSet.h"
79#include "llvm/ADT/StringSwitch.h"
80#include "llvm/Support/AtomicOrdering.h"
81#include "llvm/Support/Casting.h"
82#include "llvm/Support/Compiler.h"
83#include "llvm/Support/ConvertUTF.h"
84#include "llvm/Support/ErrorHandling.h"
85#include "llvm/Support/Format.h"
86#include "llvm/Support/Locale.h"
87#include "llvm/Support/MathExtras.h"
88#include "llvm/Support/SaveAndRestore.h"
89#include "llvm/Support/raw_ostream.h"
90#include "llvm/TargetParser/RISCVTargetParser.h"
91#include "llvm/TargetParser/Triple.h"
92#include <algorithm>
93#include <bitset>
94#include <cassert>
95#include <cctype>
96#include <cstddef>
97#include <cstdint>
98#include <functional>
99#include <limits>
100#include <optional>
101#include <string>
102#include <tuple>
103#include <utility>
104
105using namespace clang;
106using namespace sema;
107
108SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
109 unsigned ByteNo) const {
110 return SL->getLocationOfByte(ByteNo, SM: getSourceManager(), Features: LangOpts,
111 Target: Context.getTargetInfo());
112}
113
114static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
115 Sema::FormatArgumentPassingKind B) {
116 return (A << 8) | B;
117}
118
119/// Checks that a call expression's argument count is at least the desired
120/// number. This is useful when doing custom type-checking on a variadic
121/// function. Returns true on error.
122static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
123 unsigned MinArgCount) {
124 unsigned ArgCount = Call->getNumArgs();
125 if (ArgCount >= MinArgCount)
126 return false;
127
128 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
129 << 0 /*function call*/ << MinArgCount << ArgCount
130 << /*is non object*/ 0 << Call->getSourceRange();
131}
132
133/// Checks that a call expression's argument count is at most the desired
134/// number. This is useful when doing custom type-checking on a variadic
135/// function. Returns true on error.
136static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
137 unsigned ArgCount = Call->getNumArgs();
138 if (ArgCount <= MaxArgCount)
139 return false;
140 return S.Diag(Call->getEndLoc(),
141 diag::err_typecheck_call_too_many_args_at_most)
142 << 0 /*function call*/ << MaxArgCount << ArgCount
143 << /*is non object*/ 0 << Call->getSourceRange();
144}
145
146/// Checks that a call expression's argument count is in the desired range. This
147/// is useful when doing custom type-checking on a variadic function. Returns
148/// true on error.
149static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount,
150 unsigned MaxArgCount) {
151 return checkArgCountAtLeast(S, Call, MinArgCount) ||
152 checkArgCountAtMost(S, Call, MaxArgCount);
153}
154
155/// Checks that a call expression's argument count is the desired number.
156/// This is useful when doing custom type-checking. Returns true on error.
157static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
158 unsigned ArgCount = Call->getNumArgs();
159 if (ArgCount == DesiredArgCount)
160 return false;
161
162 if (checkArgCountAtLeast(S, Call, MinArgCount: DesiredArgCount))
163 return true;
164 assert(ArgCount > DesiredArgCount && "should have diagnosed this");
165
166 // Highlight all the excess arguments.
167 SourceRange Range(Call->getArg(Arg: DesiredArgCount)->getBeginLoc(),
168 Call->getArg(Arg: ArgCount - 1)->getEndLoc());
169
170 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
171 << 0 /*function call*/ << DesiredArgCount << ArgCount
172 << /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
173}
174
175static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) {
176 if (Value->isTypeDependent())
177 return false;
178
179 InitializedEntity Entity =
180 InitializedEntity::InitializeParameter(Context&: S.Context, Type: Ty, Consumed: false);
181 ExprResult Result =
182 S.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Value);
183 if (Result.isInvalid())
184 return true;
185 Value = Result.get();
186 return false;
187}
188
189/// Check that the first argument to __builtin_annotation is an integer
190/// and the second argument is a non-wide string literal.
191static bool BuiltinAnnotation(Sema &S, CallExpr *TheCall) {
192 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 2))
193 return true;
194
195 // First argument should be an integer.
196 Expr *ValArg = TheCall->getArg(Arg: 0);
197 QualType Ty = ValArg->getType();
198 if (!Ty->isIntegerType()) {
199 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
200 << ValArg->getSourceRange();
201 return true;
202 }
203
204 // Second argument should be a constant string.
205 Expr *StrArg = TheCall->getArg(Arg: 1)->IgnoreParenCasts();
206 StringLiteral *Literal = dyn_cast<StringLiteral>(Val: StrArg);
207 if (!Literal || !Literal->isOrdinary()) {
208 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
209 << StrArg->getSourceRange();
210 return true;
211 }
212
213 TheCall->setType(Ty);
214 return false;
215}
216
217static bool BuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
218 // We need at least one argument.
219 if (TheCall->getNumArgs() < 1) {
220 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
221 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
222 << TheCall->getCallee()->getSourceRange();
223 return true;
224 }
225
226 // All arguments should be wide string literals.
227 for (Expr *Arg : TheCall->arguments()) {
228 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
229 if (!Literal || !Literal->isWide()) {
230 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
231 << Arg->getSourceRange();
232 return true;
233 }
234 }
235
236 return false;
237}
238
239/// Check that the argument to __builtin_addressof is a glvalue, and set the
240/// result type to the corresponding pointer type.
241static bool BuiltinAddressof(Sema &S, CallExpr *TheCall) {
242 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
243 return true;
244
245 ExprResult Arg(TheCall->getArg(Arg: 0));
246 QualType ResultType = S.CheckAddressOfOperand(Operand&: Arg, OpLoc: TheCall->getBeginLoc());
247 if (ResultType.isNull())
248 return true;
249
250 TheCall->setArg(Arg: 0, ArgExpr: Arg.get());
251 TheCall->setType(ResultType);
252 return false;
253}
254
255/// Check that the argument to __builtin_function_start is a function.
256static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
257 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
258 return true;
259
260 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: 0));
261 if (Arg.isInvalid())
262 return true;
263
264 TheCall->setArg(Arg: 0, ArgExpr: Arg.get());
265 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
266 Val: Arg.get()->getAsBuiltinConstantDeclRef(Context: S.getASTContext()));
267
268 if (!FD) {
269 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type)
270 << TheCall->getSourceRange();
271 return true;
272 }
273
274 return !S.checkAddressOfFunctionIsAvailable(Function: FD, /*Complain=*/true,
275 Loc: TheCall->getBeginLoc());
276}
277
278/// Check the number of arguments and set the result type to
279/// the argument type.
280static bool BuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
281 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
282 return true;
283
284 TheCall->setType(TheCall->getArg(Arg: 0)->getType());
285 return false;
286}
287
288/// Check that the value argument for __builtin_is_aligned(value, alignment) and
289/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
290/// type (but not a function pointer) and that the alignment is a power-of-two.
291static bool BuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
292 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 2))
293 return true;
294
295 clang::Expr *Source = TheCall->getArg(Arg: 0);
296 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
297
298 auto IsValidIntegerType = [](QualType Ty) {
299 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
300 };
301 QualType SrcTy = Source->getType();
302 // We should also be able to use it with arrays (but not functions!).
303 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
304 SrcTy = S.Context.getDecayedType(T: SrcTy);
305 }
306 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
307 SrcTy->isFunctionPointerType()) {
308 // FIXME: this is not quite the right error message since we don't allow
309 // floating point types, or member pointers.
310 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
311 << SrcTy;
312 return true;
313 }
314
315 clang::Expr *AlignOp = TheCall->getArg(Arg: 1);
316 if (!IsValidIntegerType(AlignOp->getType())) {
317 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
318 << AlignOp->getType();
319 return true;
320 }
321 Expr::EvalResult AlignResult;
322 unsigned MaxAlignmentBits = S.Context.getIntWidth(T: SrcTy) - 1;
323 // We can't check validity of alignment if it is value dependent.
324 if (!AlignOp->isValueDependent() &&
325 AlignOp->EvaluateAsInt(Result&: AlignResult, Ctx: S.Context,
326 AllowSideEffects: Expr::SE_AllowSideEffects)) {
327 llvm::APSInt AlignValue = AlignResult.Val.getInt();
328 llvm::APSInt MaxValue(
329 llvm::APInt::getOneBitSet(numBits: MaxAlignmentBits + 1, BitNo: MaxAlignmentBits));
330 if (AlignValue < 1) {
331 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
332 return true;
333 }
334 if (llvm::APSInt::compareValues(I1: AlignValue, I2: MaxValue) > 0) {
335 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
336 << toString(MaxValue, 10);
337 return true;
338 }
339 if (!AlignValue.isPowerOf2()) {
340 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
341 return true;
342 }
343 if (AlignValue == 1) {
344 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
345 << IsBooleanAlignBuiltin;
346 }
347 }
348
349 ExprResult SrcArg = S.PerformCopyInitialization(
350 Entity: InitializedEntity::InitializeParameter(Context&: S.Context, Type: SrcTy, Consumed: false),
351 EqualLoc: SourceLocation(), Init: Source);
352 if (SrcArg.isInvalid())
353 return true;
354 TheCall->setArg(Arg: 0, ArgExpr: SrcArg.get());
355 ExprResult AlignArg =
356 S.PerformCopyInitialization(Entity: InitializedEntity::InitializeParameter(
357 Context&: S.Context, Type: AlignOp->getType(), Consumed: false),
358 EqualLoc: SourceLocation(), Init: AlignOp);
359 if (AlignArg.isInvalid())
360 return true;
361 TheCall->setArg(Arg: 1, ArgExpr: AlignArg.get());
362 // For align_up/align_down, the return type is the same as the (potentially
363 // decayed) argument type including qualifiers. For is_aligned(), the result
364 // is always bool.
365 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
366 return false;
367}
368
369static bool BuiltinOverflow(Sema &S, CallExpr *TheCall, unsigned BuiltinID) {
370 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 3))
371 return true;
372
373 std::pair<unsigned, const char *> Builtins[] = {
374 { Builtin::BI__builtin_add_overflow, "ckd_add" },
375 { Builtin::BI__builtin_sub_overflow, "ckd_sub" },
376 { Builtin::BI__builtin_mul_overflow, "ckd_mul" },
377 };
378
379 bool CkdOperation = llvm::any_of(Range&: Builtins, P: [&](const std::pair<unsigned,
380 const char *> &P) {
381 return BuiltinID == P.first && TheCall->getExprLoc().isMacroID() &&
382 Lexer::getImmediateMacroName(TheCall->getExprLoc(),
383 S.getSourceManager(), S.getLangOpts()) == P.second;
384 });
385
386 auto ValidCkdIntType = [](QualType QT) {
387 // A valid checked integer type is an integer type other than a plain char,
388 // bool, a bit-precise type, or an enumeration type.
389 if (const auto *BT = QT.getCanonicalType()->getAs<BuiltinType>())
390 return (BT->getKind() >= BuiltinType::Short &&
391 BT->getKind() <= BuiltinType::Int128) || (
392 BT->getKind() >= BuiltinType::UShort &&
393 BT->getKind() <= BuiltinType::UInt128) ||
394 BT->getKind() == BuiltinType::UChar ||
395 BT->getKind() == BuiltinType::SChar;
396 return false;
397 };
398
399 // First two arguments should be integers.
400 for (unsigned I = 0; I < 2; ++I) {
401 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: I));
402 if (Arg.isInvalid()) return true;
403 TheCall->setArg(Arg: I, ArgExpr: Arg.get());
404
405 QualType Ty = Arg.get()->getType();
406 bool IsValid = CkdOperation ? ValidCkdIntType(Ty) : Ty->isIntegerType();
407 if (!IsValid) {
408 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
409 << CkdOperation << Ty << Arg.get()->getSourceRange();
410 return true;
411 }
412 }
413
414 // Third argument should be a pointer to a non-const integer.
415 // IRGen correctly handles volatile, restrict, and address spaces, and
416 // the other qualifiers aren't possible.
417 {
418 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: 2));
419 if (Arg.isInvalid()) return true;
420 TheCall->setArg(Arg: 2, ArgExpr: Arg.get());
421
422 QualType Ty = Arg.get()->getType();
423 const auto *PtrTy = Ty->getAs<PointerType>();
424 if (!PtrTy ||
425 !PtrTy->getPointeeType()->isIntegerType() ||
426 (!ValidCkdIntType(PtrTy->getPointeeType()) && CkdOperation) ||
427 PtrTy->getPointeeType().isConstQualified()) {
428 S.Diag(Arg.get()->getBeginLoc(),
429 diag::err_overflow_builtin_must_be_ptr_int)
430 << CkdOperation << Ty << Arg.get()->getSourceRange();
431 return true;
432 }
433 }
434
435 // Disallow signed bit-precise integer args larger than 128 bits to mul
436 // function until we improve backend support.
437 if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
438 for (unsigned I = 0; I < 3; ++I) {
439 const auto Arg = TheCall->getArg(Arg: I);
440 // Third argument will be a pointer.
441 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
442 if (Ty->isBitIntType() && Ty->isSignedIntegerType() &&
443 S.getASTContext().getIntWidth(Ty) > 128)
444 return S.Diag(Arg->getBeginLoc(),
445 diag::err_overflow_builtin_bit_int_max_size)
446 << 128;
447 }
448 }
449
450 return false;
451}
452
453namespace {
454struct BuiltinDumpStructGenerator {
455 Sema &S;
456 CallExpr *TheCall;
457 SourceLocation Loc = TheCall->getBeginLoc();
458 SmallVector<Expr *, 32> Actions;
459 DiagnosticErrorTrap ErrorTracker;
460 PrintingPolicy Policy;
461
462 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall)
463 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()),
464 Policy(S.Context.getPrintingPolicy()) {
465 Policy.AnonymousTagLocations = false;
466 }
467
468 Expr *makeOpaqueValueExpr(Expr *Inner) {
469 auto *OVE = new (S.Context)
470 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(),
471 Inner->getObjectKind(), Inner);
472 Actions.push_back(OVE);
473 return OVE;
474 }
475
476 Expr *getStringLiteral(llvm::StringRef Str) {
477 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Key: Str);
478 // Wrap the literal in parentheses to attach a source location.
479 return new (S.Context) ParenExpr(Loc, Loc, Lit);
480 }
481
482 bool callPrintFunction(llvm::StringRef Format,
483 llvm::ArrayRef<Expr *> Exprs = {}) {
484 SmallVector<Expr *, 8> Args;
485 assert(TheCall->getNumArgs() >= 2);
486 Args.reserve(N: (TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size());
487 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end());
488 Args.push_back(Elt: getStringLiteral(Str: Format));
489 Args.insert(I: Args.end(), From: Exprs.begin(), To: Exprs.end());
490
491 // Register a note to explain why we're performing the call.
492 Sema::CodeSynthesisContext Ctx;
493 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall;
494 Ctx.PointOfInstantiation = Loc;
495 Ctx.CallArgs = Args.data();
496 Ctx.NumCallArgs = Args.size();
497 S.pushCodeSynthesisContext(Ctx);
498
499 ExprResult RealCall =
500 S.BuildCallExpr(/*Scope=*/S: nullptr, Fn: TheCall->getArg(Arg: 1),
501 LParenLoc: TheCall->getBeginLoc(), ArgExprs: Args, RParenLoc: TheCall->getRParenLoc());
502
503 S.popCodeSynthesisContext();
504 if (!RealCall.isInvalid())
505 Actions.push_back(Elt: RealCall.get());
506 // Bail out if we've hit any errors, even if we managed to build the
507 // call. We don't want to produce more than one error.
508 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred();
509 }
510
511 Expr *getIndentString(unsigned Depth) {
512 if (!Depth)
513 return nullptr;
514
515 llvm::SmallString<32> Indent;
516 Indent.resize(N: Depth * Policy.Indentation, NV: ' ');
517 return getStringLiteral(Str: Indent);
518 }
519
520 Expr *getTypeString(QualType T) {
521 return getStringLiteral(Str: T.getAsString(Policy));
522 }
523
524 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) {
525 llvm::raw_svector_ostream OS(Str);
526
527 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
528 // than trying to print a single character.
529 if (auto *BT = T->getAs<BuiltinType>()) {
530 switch (BT->getKind()) {
531 case BuiltinType::Bool:
532 OS << "%d";
533 return true;
534 case BuiltinType::Char_U:
535 case BuiltinType::UChar:
536 OS << "%hhu";
537 return true;
538 case BuiltinType::Char_S:
539 case BuiltinType::SChar:
540 OS << "%hhd";
541 return true;
542 default:
543 break;
544 }
545 }
546
547 analyze_printf::PrintfSpecifier Specifier;
548 if (Specifier.fixType(QT: T, LangOpt: S.getLangOpts(), Ctx&: S.Context, /*IsObjCLiteral=*/false)) {
549 // We were able to guess how to format this.
550 if (Specifier.getConversionSpecifier().getKind() ==
551 analyze_printf::PrintfConversionSpecifier::sArg) {
552 // Wrap double-quotes around a '%s' specifier and limit its maximum
553 // length. Ideally we'd also somehow escape special characters in the
554 // contents but printf doesn't support that.
555 // FIXME: '%s' formatting is not safe in general.
556 OS << '"';
557 Specifier.setPrecision(analyze_printf::OptionalAmount(32u));
558 Specifier.toString(os&: OS);
559 OS << '"';
560 // FIXME: It would be nice to include a '...' if the string doesn't fit
561 // in the length limit.
562 } else {
563 Specifier.toString(os&: OS);
564 }
565 return true;
566 }
567
568 if (T->isPointerType()) {
569 // Format all pointers with '%p'.
570 OS << "%p";
571 return true;
572 }
573
574 return false;
575 }
576
577 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
578 Expr *IndentLit = getIndentString(Depth);
579 Expr *TypeLit = getTypeString(T: S.Context.getRecordType(Decl: RD));
580 if (IndentLit ? callPrintFunction(Format: "%s%s", Exprs: {IndentLit, TypeLit})
581 : callPrintFunction(Format: "%s", Exprs: {TypeLit}))
582 return true;
583
584 return dumpRecordValue(RD, E, RecordIndent: IndentLit, Depth);
585 }
586
587 // Dump a record value. E should be a pointer or lvalue referring to an RD.
588 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent,
589 unsigned Depth) {
590 // FIXME: Decide what to do if RD is a union. At least we should probably
591 // turn off printing `const char*` members with `%s`, because that is very
592 // likely to crash if that's not the active member. Whatever we decide, we
593 // should document it.
594
595 // Build an OpaqueValueExpr so we can refer to E more than once without
596 // triggering re-evaluation.
597 Expr *RecordArg = makeOpaqueValueExpr(Inner: E);
598 bool RecordArgIsPtr = RecordArg->getType()->isPointerType();
599
600 if (callPrintFunction(Format: " {\n"))
601 return true;
602
603 // Dump each base class, regardless of whether they're aggregates.
604 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
605 for (const auto &Base : CXXRD->bases()) {
606 QualType BaseType =
607 RecordArgIsPtr ? S.Context.getPointerType(T: Base.getType())
608 : S.Context.getLValueReferenceType(T: Base.getType());
609 ExprResult BasePtr = S.BuildCStyleCastExpr(
610 LParenLoc: Loc, Ty: S.Context.getTrivialTypeSourceInfo(T: BaseType, Loc), RParenLoc: Loc,
611 Op: RecordArg);
612 if (BasePtr.isInvalid() ||
613 dumpUnnamedRecord(RD: Base.getType()->getAsRecordDecl(), E: BasePtr.get(),
614 Depth: Depth + 1))
615 return true;
616 }
617 }
618
619 Expr *FieldIndentArg = getIndentString(Depth: Depth + 1);
620
621 // Dump each field.
622 for (auto *D : RD->decls()) {
623 auto *IFD = dyn_cast<IndirectFieldDecl>(D);
624 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
625 if (!FD || FD->isUnnamedBitField() || FD->isAnonymousStructOrUnion())
626 continue;
627
628 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
629 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg,
630 getTypeString(FD->getType()),
631 getStringLiteral(FD->getName())};
632
633 if (FD->isBitField()) {
634 Format += ": %zu ";
635 QualType SizeT = S.Context.getSizeType();
636 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT),
637 FD->getBitWidthValue(S.Context));
638 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc));
639 }
640
641 Format += "=";
642
643 ExprResult Field =
644 IFD ? S.BuildAnonymousStructUnionMemberReference(
645 CXXScopeSpec(), Loc, IFD,
646 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc)
647 : S.BuildFieldReferenceExpr(
648 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD,
649 DeclAccessPair::make(FD, AS_public),
650 DeclarationNameInfo(FD->getDeclName(), Loc));
651 if (Field.isInvalid())
652 return true;
653
654 auto *InnerRD = FD->getType()->getAsRecordDecl();
655 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD);
656 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) {
657 // Recursively print the values of members of aggregate record type.
658 if (callPrintFunction(Format, Args) ||
659 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1))
660 return true;
661 } else {
662 Format += " ";
663 if (appendFormatSpecifier(FD->getType(), Format)) {
664 // We know how to print this field.
665 Args.push_back(Field.get());
666 } else {
667 // We don't know how to print this field. Print out its address
668 // with a format specifier that a smart tool will be able to
669 // recognize and treat specially.
670 Format += "*%p";
671 ExprResult FieldAddr =
672 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get());
673 if (FieldAddr.isInvalid())
674 return true;
675 Args.push_back(FieldAddr.get());
676 }
677 Format += "\n";
678 if (callPrintFunction(Format, Args))
679 return true;
680 }
681 }
682
683 return RecordIndent ? callPrintFunction(Format: "%s}\n", Exprs: RecordIndent)
684 : callPrintFunction(Format: "}\n");
685 }
686
687 Expr *buildWrapper() {
688 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions,
689 PseudoObjectExpr::NoResult);
690 TheCall->setType(Wrapper->getType());
691 TheCall->setValueKind(Wrapper->getValueKind());
692 return Wrapper;
693 }
694};
695} // namespace
696
697static ExprResult BuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
698 if (checkArgCountAtLeast(S, Call: TheCall, MinArgCount: 2))
699 return ExprError();
700
701 ExprResult PtrArgResult = S.DefaultLvalueConversion(E: TheCall->getArg(Arg: 0));
702 if (PtrArgResult.isInvalid())
703 return ExprError();
704 TheCall->setArg(Arg: 0, ArgExpr: PtrArgResult.get());
705
706 // First argument should be a pointer to a struct.
707 QualType PtrArgType = PtrArgResult.get()->getType();
708 if (!PtrArgType->isPointerType() ||
709 !PtrArgType->getPointeeType()->isRecordType()) {
710 S.Diag(PtrArgResult.get()->getBeginLoc(),
711 diag::err_expected_struct_pointer_argument)
712 << 1 << TheCall->getDirectCallee() << PtrArgType;
713 return ExprError();
714 }
715 QualType Pointee = PtrArgType->getPointeeType();
716 const RecordDecl *RD = Pointee->getAsRecordDecl();
717 // Try to instantiate the class template as appropriate; otherwise, access to
718 // its data() may lead to a crash.
719 if (S.RequireCompleteType(PtrArgResult.get()->getBeginLoc(), Pointee,
720 diag::err_incomplete_type))
721 return ExprError();
722 // Second argument is a callable, but we can't fully validate it until we try
723 // calling it.
724 QualType FnArgType = TheCall->getArg(Arg: 1)->getType();
725 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() &&
726 !FnArgType->isBlockPointerType() &&
727 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) {
728 auto *BT = FnArgType->getAs<BuiltinType>();
729 switch (BT ? BT->getKind() : BuiltinType::Void) {
730 case BuiltinType::Dependent:
731 case BuiltinType::Overload:
732 case BuiltinType::BoundMember:
733 case BuiltinType::PseudoObject:
734 case BuiltinType::UnknownAny:
735 case BuiltinType::BuiltinFn:
736 // This might be a callable.
737 break;
738
739 default:
740 S.Diag(TheCall->getArg(1)->getBeginLoc(),
741 diag::err_expected_callable_argument)
742 << 2 << TheCall->getDirectCallee() << FnArgType;
743 return ExprError();
744 }
745 }
746
747 BuiltinDumpStructGenerator Generator(S, TheCall);
748
749 // Wrap parentheses around the given pointer. This is not necessary for
750 // correct code generation, but it means that when we pretty-print the call
751 // arguments in our diagnostics we will produce '(&s)->n' instead of the
752 // incorrect '&s->n'.
753 Expr *PtrArg = PtrArgResult.get();
754 PtrArg = new (S.Context)
755 ParenExpr(PtrArg->getBeginLoc(),
756 S.getLocForEndOfToken(Loc: PtrArg->getEndLoc()), PtrArg);
757 if (Generator.dumpUnnamedRecord(RD, E: PtrArg, Depth: 0))
758 return ExprError();
759
760 return Generator.buildWrapper();
761}
762
763static bool BuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
764 if (checkArgCount(S, Call: BuiltinCall, DesiredArgCount: 2))
765 return true;
766
767 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
768 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
769 Expr *Call = BuiltinCall->getArg(Arg: 0);
770 Expr *Chain = BuiltinCall->getArg(Arg: 1);
771
772 if (Call->getStmtClass() != Stmt::CallExprClass) {
773 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
774 << Call->getSourceRange();
775 return true;
776 }
777
778 auto CE = cast<CallExpr>(Val: Call);
779 if (CE->getCallee()->getType()->isBlockPointerType()) {
780 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
781 << Call->getSourceRange();
782 return true;
783 }
784
785 const Decl *TargetDecl = CE->getCalleeDecl();
786 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl))
787 if (FD->getBuiltinID()) {
788 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
789 << Call->getSourceRange();
790 return true;
791 }
792
793 if (isa<CXXPseudoDestructorExpr>(Val: CE->getCallee()->IgnoreParens())) {
794 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
795 << Call->getSourceRange();
796 return true;
797 }
798
799 ExprResult ChainResult = S.UsualUnaryConversions(E: Chain);
800 if (ChainResult.isInvalid())
801 return true;
802 if (!ChainResult.get()->getType()->isPointerType()) {
803 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
804 << Chain->getSourceRange();
805 return true;
806 }
807
808 QualType ReturnTy = CE->getCallReturnType(Ctx: S.Context);
809 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
810 QualType BuiltinTy = S.Context.getFunctionType(
811 ResultTy: ReturnTy, Args: ArgTys, EPI: FunctionProtoType::ExtProtoInfo());
812 QualType BuiltinPtrTy = S.Context.getPointerType(T: BuiltinTy);
813
814 Builtin =
815 S.ImpCastExprToType(E: Builtin, Type: BuiltinPtrTy, CK: CK_BuiltinFnToFnPtr).get();
816
817 BuiltinCall->setType(CE->getType());
818 BuiltinCall->setValueKind(CE->getValueKind());
819 BuiltinCall->setObjectKind(CE->getObjectKind());
820 BuiltinCall->setCallee(Builtin);
821 BuiltinCall->setArg(Arg: 1, ArgExpr: ChainResult.get());
822
823 return false;
824}
825
826namespace {
827
828class ScanfDiagnosticFormatHandler
829 : public analyze_format_string::FormatStringHandler {
830 // Accepts the argument index (relative to the first destination index) of the
831 // argument whose size we want.
832 using ComputeSizeFunction =
833 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>;
834
835 // Accepts the argument index (relative to the first destination index), the
836 // destination size, and the source size).
837 using DiagnoseFunction =
838 llvm::function_ref<void(unsigned, unsigned, unsigned)>;
839
840 ComputeSizeFunction ComputeSizeArgument;
841 DiagnoseFunction Diagnose;
842
843public:
844 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument,
845 DiagnoseFunction Diagnose)
846 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {}
847
848 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
849 const char *StartSpecifier,
850 unsigned specifierLen) override {
851 if (!FS.consumesDataArgument())
852 return true;
853
854 unsigned NulByte = 0;
855 switch ((FS.getConversionSpecifier().getKind())) {
856 default:
857 return true;
858 case analyze_format_string::ConversionSpecifier::sArg:
859 case analyze_format_string::ConversionSpecifier::ScanListArg:
860 NulByte = 1;
861 break;
862 case analyze_format_string::ConversionSpecifier::cArg:
863 break;
864 }
865
866 analyze_format_string::OptionalAmount FW = FS.getFieldWidth();
867 if (FW.getHowSpecified() !=
868 analyze_format_string::OptionalAmount::HowSpecified::Constant)
869 return true;
870
871 unsigned SourceSize = FW.getConstantAmount() + NulByte;
872
873 std::optional<llvm::APSInt> DestSizeAPS =
874 ComputeSizeArgument(FS.getArgIndex());
875 if (!DestSizeAPS)
876 return true;
877
878 unsigned DestSize = DestSizeAPS->getZExtValue();
879
880 if (DestSize < SourceSize)
881 Diagnose(FS.getArgIndex(), DestSize, SourceSize);
882
883 return true;
884 }
885};
886
887class EstimateSizeFormatHandler
888 : public analyze_format_string::FormatStringHandler {
889 size_t Size;
890 /// Whether the format string contains Linux kernel's format specifier
891 /// extension.
892 bool IsKernelCompatible = true;
893
894public:
895 EstimateSizeFormatHandler(StringRef Format)
896 : Size(std::min(a: Format.find(C: 0), b: Format.size()) +
897 1 /* null byte always written by sprintf */) {}
898
899 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
900 const char *, unsigned SpecifierLen,
901 const TargetInfo &) override {
902
903 const size_t FieldWidth = computeFieldWidth(FS);
904 const size_t Precision = computePrecision(FS);
905
906 // The actual format.
907 switch (FS.getConversionSpecifier().getKind()) {
908 // Just a char.
909 case analyze_format_string::ConversionSpecifier::cArg:
910 case analyze_format_string::ConversionSpecifier::CArg:
911 Size += std::max(a: FieldWidth, b: (size_t)1);
912 break;
913 // Just an integer.
914 case analyze_format_string::ConversionSpecifier::dArg:
915 case analyze_format_string::ConversionSpecifier::DArg:
916 case analyze_format_string::ConversionSpecifier::iArg:
917 case analyze_format_string::ConversionSpecifier::oArg:
918 case analyze_format_string::ConversionSpecifier::OArg:
919 case analyze_format_string::ConversionSpecifier::uArg:
920 case analyze_format_string::ConversionSpecifier::UArg:
921 case analyze_format_string::ConversionSpecifier::xArg:
922 case analyze_format_string::ConversionSpecifier::XArg:
923 Size += std::max(a: FieldWidth, b: Precision);
924 break;
925
926 // %g style conversion switches between %f or %e style dynamically.
927 // %g removes trailing zeros, and does not print decimal point if there are
928 // no digits that follow it. Thus %g can print a single digit.
929 // FIXME: If it is alternative form:
930 // For g and G conversions, trailing zeros are not removed from the result.
931 case analyze_format_string::ConversionSpecifier::gArg:
932 case analyze_format_string::ConversionSpecifier::GArg:
933 Size += 1;
934 break;
935
936 // Floating point number in the form '[+]ddd.ddd'.
937 case analyze_format_string::ConversionSpecifier::fArg:
938 case analyze_format_string::ConversionSpecifier::FArg:
939 Size += std::max(a: FieldWidth, b: 1 /* integer part */ +
940 (Precision ? 1 + Precision
941 : 0) /* period + decimal */);
942 break;
943
944 // Floating point number in the form '[-]d.ddde[+-]dd'.
945 case analyze_format_string::ConversionSpecifier::eArg:
946 case analyze_format_string::ConversionSpecifier::EArg:
947 Size +=
948 std::max(a: FieldWidth,
949 b: 1 /* integer part */ +
950 (Precision ? 1 + Precision : 0) /* period + decimal */ +
951 1 /* e or E letter */ + 2 /* exponent */);
952 break;
953
954 // Floating point number in the form '[-]0xh.hhhhp±dd'.
955 case analyze_format_string::ConversionSpecifier::aArg:
956 case analyze_format_string::ConversionSpecifier::AArg:
957 Size +=
958 std::max(a: FieldWidth,
959 b: 2 /* 0x */ + 1 /* integer part */ +
960 (Precision ? 1 + Precision : 0) /* period + decimal */ +
961 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
962 break;
963
964 // Just a string.
965 case analyze_format_string::ConversionSpecifier::sArg:
966 case analyze_format_string::ConversionSpecifier::SArg:
967 Size += FieldWidth;
968 break;
969
970 // Just a pointer in the form '0xddd'.
971 case analyze_format_string::ConversionSpecifier::pArg:
972 // Linux kernel has its own extesion for `%p` specifier.
973 // Kernel Document:
974 // https://docs.kernel.org/core-api/printk-formats.html#pointer-types
975 IsKernelCompatible = false;
976 Size += std::max(a: FieldWidth, b: 2 /* leading 0x */ + Precision);
977 break;
978
979 // A plain percent.
980 case analyze_format_string::ConversionSpecifier::PercentArg:
981 Size += 1;
982 break;
983
984 default:
985 break;
986 }
987
988 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
989
990 if (FS.hasAlternativeForm()) {
991 switch (FS.getConversionSpecifier().getKind()) {
992 // For o conversion, it increases the precision, if and only if necessary,
993 // to force the first digit of the result to be a zero
994 // (if the value and precision are both 0, a single 0 is printed)
995 case analyze_format_string::ConversionSpecifier::oArg:
996 // For b conversion, a nonzero result has 0b prefixed to it.
997 case analyze_format_string::ConversionSpecifier::bArg:
998 // For x (or X) conversion, a nonzero result has 0x (or 0X) prefixed to
999 // it.
1000 case analyze_format_string::ConversionSpecifier::xArg:
1001 case analyze_format_string::ConversionSpecifier::XArg:
1002 // Note: even when the prefix is added, if
1003 // (prefix_width <= FieldWidth - formatted_length) holds,
1004 // the prefix does not increase the format
1005 // size. e.g.(("%#3x", 0xf) is "0xf")
1006
1007 // If the result is zero, o, b, x, X adds nothing.
1008 break;
1009 // For a, A, e, E, f, F, g, and G conversions,
1010 // the result of converting a floating-point number always contains a
1011 // decimal-point
1012 case analyze_format_string::ConversionSpecifier::aArg:
1013 case analyze_format_string::ConversionSpecifier::AArg:
1014 case analyze_format_string::ConversionSpecifier::eArg:
1015 case analyze_format_string::ConversionSpecifier::EArg:
1016 case analyze_format_string::ConversionSpecifier::fArg:
1017 case analyze_format_string::ConversionSpecifier::FArg:
1018 case analyze_format_string::ConversionSpecifier::gArg:
1019 case analyze_format_string::ConversionSpecifier::GArg:
1020 Size += (Precision ? 0 : 1);
1021 break;
1022 // For other conversions, the behavior is undefined.
1023 default:
1024 break;
1025 }
1026 }
1027 assert(SpecifierLen <= Size && "no underflow");
1028 Size -= SpecifierLen;
1029 return true;
1030 }
1031
1032 size_t getSizeLowerBound() const { return Size; }
1033 bool isKernelCompatible() const { return IsKernelCompatible; }
1034
1035private:
1036 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
1037 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
1038 size_t FieldWidth = 0;
1039 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
1040 FieldWidth = FW.getConstantAmount();
1041 return FieldWidth;
1042 }
1043
1044 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
1045 const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
1046 size_t Precision = 0;
1047
1048 // See man 3 printf for default precision value based on the specifier.
1049 switch (FW.getHowSpecified()) {
1050 case analyze_format_string::OptionalAmount::NotSpecified:
1051 switch (FS.getConversionSpecifier().getKind()) {
1052 default:
1053 break;
1054 case analyze_format_string::ConversionSpecifier::dArg: // %d
1055 case analyze_format_string::ConversionSpecifier::DArg: // %D
1056 case analyze_format_string::ConversionSpecifier::iArg: // %i
1057 Precision = 1;
1058 break;
1059 case analyze_format_string::ConversionSpecifier::oArg: // %d
1060 case analyze_format_string::ConversionSpecifier::OArg: // %D
1061 case analyze_format_string::ConversionSpecifier::uArg: // %d
1062 case analyze_format_string::ConversionSpecifier::UArg: // %D
1063 case analyze_format_string::ConversionSpecifier::xArg: // %d
1064 case analyze_format_string::ConversionSpecifier::XArg: // %D
1065 Precision = 1;
1066 break;
1067 case analyze_format_string::ConversionSpecifier::fArg: // %f
1068 case analyze_format_string::ConversionSpecifier::FArg: // %F
1069 case analyze_format_string::ConversionSpecifier::eArg: // %e
1070 case analyze_format_string::ConversionSpecifier::EArg: // %E
1071 case analyze_format_string::ConversionSpecifier::gArg: // %g
1072 case analyze_format_string::ConversionSpecifier::GArg: // %G
1073 Precision = 6;
1074 break;
1075 case analyze_format_string::ConversionSpecifier::pArg: // %d
1076 Precision = 1;
1077 break;
1078 }
1079 break;
1080 case analyze_format_string::OptionalAmount::Constant:
1081 Precision = FW.getConstantAmount();
1082 break;
1083 default:
1084 break;
1085 }
1086 return Precision;
1087 }
1088};
1089
1090} // namespace
1091
1092static bool ProcessFormatStringLiteral(const Expr *FormatExpr,
1093 StringRef &FormatStrRef, size_t &StrLen,
1094 ASTContext &Context) {
1095 if (const auto *Format = dyn_cast<StringLiteral>(Val: FormatExpr);
1096 Format && (Format->isOrdinary() || Format->isUTF8())) {
1097 FormatStrRef = Format->getString();
1098 const ConstantArrayType *T =
1099 Context.getAsConstantArrayType(T: Format->getType());
1100 assert(T && "String literal not of constant array type!");
1101 size_t TypeSize = T->getZExtSize();
1102 // In case there's a null byte somewhere.
1103 StrLen = std::min(a: std::max(a: TypeSize, b: size_t(1)) - 1, b: FormatStrRef.find(C: 0));
1104 return true;
1105 }
1106 return false;
1107}
1108
1109void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
1110 CallExpr *TheCall) {
1111 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
1112 isConstantEvaluatedContext())
1113 return;
1114
1115 bool UseDABAttr = false;
1116 const FunctionDecl *UseDecl = FD;
1117
1118 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>();
1119 if (DABAttr) {
1120 UseDecl = DABAttr->getFunction();
1121 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
1122 UseDABAttr = true;
1123 }
1124
1125 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/ConsiderWrapperFunctions: true);
1126
1127 if (!BuiltinID)
1128 return;
1129
1130 const TargetInfo &TI = getASTContext().getTargetInfo();
1131 unsigned SizeTypeWidth = TI.getTypeWidth(T: TI.getSizeType());
1132
1133 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> {
1134 // If we refer to a diagnose_as_builtin attribute, we need to change the
1135 // argument index to refer to the arguments of the called function. Unless
1136 // the index is out of bounds, which presumably means it's a variadic
1137 // function.
1138 if (!UseDABAttr)
1139 return Index;
1140 unsigned DABIndices = DABAttr->argIndices_size();
1141 unsigned NewIndex = Index < DABIndices
1142 ? DABAttr->argIndices_begin()[Index]
1143 : Index - DABIndices + FD->getNumParams();
1144 if (NewIndex >= TheCall->getNumArgs())
1145 return std::nullopt;
1146 return NewIndex;
1147 };
1148
1149 auto ComputeExplicitObjectSizeArgument =
1150 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1151 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1152 if (!IndexOptional)
1153 return std::nullopt;
1154 unsigned NewIndex = *IndexOptional;
1155 Expr::EvalResult Result;
1156 Expr *SizeArg = TheCall->getArg(Arg: NewIndex);
1157 if (!SizeArg->EvaluateAsInt(Result, Ctx: getASTContext()))
1158 return std::nullopt;
1159 llvm::APSInt Integer = Result.Val.getInt();
1160 Integer.setIsUnsigned(true);
1161 return Integer;
1162 };
1163
1164 auto ComputeSizeArgument =
1165 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1166 // If the parameter has a pass_object_size attribute, then we should use its
1167 // (potentially) more strict checking mode. Otherwise, conservatively assume
1168 // type 0.
1169 int BOSType = 0;
1170 // This check can fail for variadic functions.
1171 if (Index < FD->getNumParams()) {
1172 if (const auto *POS =
1173 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
1174 BOSType = POS->getType();
1175 }
1176
1177 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1178 if (!IndexOptional)
1179 return std::nullopt;
1180 unsigned NewIndex = *IndexOptional;
1181
1182 if (NewIndex >= TheCall->getNumArgs())
1183 return std::nullopt;
1184
1185 const Expr *ObjArg = TheCall->getArg(Arg: NewIndex);
1186 uint64_t Result;
1187 if (!ObjArg->tryEvaluateObjectSize(Result, Ctx&: getASTContext(), Type: BOSType))
1188 return std::nullopt;
1189
1190 // Get the object size in the target's size_t width.
1191 return llvm::APSInt::getUnsigned(X: Result).extOrTrunc(width: SizeTypeWidth);
1192 };
1193
1194 auto ComputeStrLenArgument =
1195 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1196 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1197 if (!IndexOptional)
1198 return std::nullopt;
1199 unsigned NewIndex = *IndexOptional;
1200
1201 const Expr *ObjArg = TheCall->getArg(Arg: NewIndex);
1202 uint64_t Result;
1203 if (!ObjArg->tryEvaluateStrLen(Result, Ctx&: getASTContext()))
1204 return std::nullopt;
1205 // Add 1 for null byte.
1206 return llvm::APSInt::getUnsigned(X: Result + 1).extOrTrunc(width: SizeTypeWidth);
1207 };
1208
1209 std::optional<llvm::APSInt> SourceSize;
1210 std::optional<llvm::APSInt> DestinationSize;
1211 unsigned DiagID = 0;
1212 bool IsChkVariant = false;
1213
1214 auto GetFunctionName = [&]() {
1215 StringRef FunctionName = getASTContext().BuiltinInfo.getName(ID: BuiltinID);
1216 // Skim off the details of whichever builtin was called to produce a better
1217 // diagnostic, as it's unlikely that the user wrote the __builtin
1218 // explicitly.
1219 if (IsChkVariant) {
1220 FunctionName = FunctionName.drop_front(N: std::strlen(s: "__builtin___"));
1221 FunctionName = FunctionName.drop_back(N: std::strlen(s: "_chk"));
1222 } else {
1223 FunctionName.consume_front(Prefix: "__builtin_");
1224 }
1225 return FunctionName;
1226 };
1227
1228 switch (BuiltinID) {
1229 default:
1230 return;
1231 case Builtin::BI__builtin_strcpy:
1232 case Builtin::BIstrcpy: {
1233 DiagID = diag::warn_fortify_strlen_overflow;
1234 SourceSize = ComputeStrLenArgument(1);
1235 DestinationSize = ComputeSizeArgument(0);
1236 break;
1237 }
1238
1239 case Builtin::BI__builtin___strcpy_chk: {
1240 DiagID = diag::warn_fortify_strlen_overflow;
1241 SourceSize = ComputeStrLenArgument(1);
1242 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1243 IsChkVariant = true;
1244 break;
1245 }
1246
1247 case Builtin::BIscanf:
1248 case Builtin::BIfscanf:
1249 case Builtin::BIsscanf: {
1250 unsigned FormatIndex = 1;
1251 unsigned DataIndex = 2;
1252 if (BuiltinID == Builtin::BIscanf) {
1253 FormatIndex = 0;
1254 DataIndex = 1;
1255 }
1256
1257 const auto *FormatExpr =
1258 TheCall->getArg(Arg: FormatIndex)->IgnoreParenImpCasts();
1259
1260 StringRef FormatStrRef;
1261 size_t StrLen;
1262 if (!ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context))
1263 return;
1264
1265 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
1266 unsigned SourceSize) {
1267 DiagID = diag::warn_fortify_scanf_overflow;
1268 unsigned Index = ArgIndex + DataIndex;
1269 StringRef FunctionName = GetFunctionName();
1270 DiagRuntimeBehavior(TheCall->getArg(Arg: Index)->getBeginLoc(), TheCall,
1271 PDiag(DiagID) << FunctionName << (Index + 1)
1272 << DestSize << SourceSize);
1273 };
1274
1275 auto ShiftedComputeSizeArgument = [&](unsigned Index) {
1276 return ComputeSizeArgument(Index + DataIndex);
1277 };
1278 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose);
1279 const char *FormatBytes = FormatStrRef.data();
1280 analyze_format_string::ParseScanfString(H, beg: FormatBytes,
1281 end: FormatBytes + StrLen, LO: getLangOpts(),
1282 Target: Context.getTargetInfo());
1283
1284 // Unlike the other cases, in this one we have already issued the diagnostic
1285 // here, so no need to continue (because unlike the other cases, here the
1286 // diagnostic refers to the argument number).
1287 return;
1288 }
1289
1290 case Builtin::BIsprintf:
1291 case Builtin::BI__builtin___sprintf_chk: {
1292 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
1293 auto *FormatExpr = TheCall->getArg(Arg: FormatIndex)->IgnoreParenImpCasts();
1294
1295 StringRef FormatStrRef;
1296 size_t StrLen;
1297 if (ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1298 EstimateSizeFormatHandler H(FormatStrRef);
1299 const char *FormatBytes = FormatStrRef.data();
1300 if (!analyze_format_string::ParsePrintfString(
1301 H, beg: FormatBytes, end: FormatBytes + StrLen, LO: getLangOpts(),
1302 Target: Context.getTargetInfo(), isFreeBSDKPrintf: false)) {
1303 DiagID = H.isKernelCompatible()
1304 ? diag::warn_format_overflow
1305 : diag::warn_format_overflow_non_kprintf;
1306 SourceSize = llvm::APSInt::getUnsigned(X: H.getSizeLowerBound())
1307 .extOrTrunc(width: SizeTypeWidth);
1308 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
1309 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1310 IsChkVariant = true;
1311 } else {
1312 DestinationSize = ComputeSizeArgument(0);
1313 }
1314 break;
1315 }
1316 }
1317 return;
1318 }
1319 case Builtin::BI__builtin___memcpy_chk:
1320 case Builtin::BI__builtin___memmove_chk:
1321 case Builtin::BI__builtin___memset_chk:
1322 case Builtin::BI__builtin___strlcat_chk:
1323 case Builtin::BI__builtin___strlcpy_chk:
1324 case Builtin::BI__builtin___strncat_chk:
1325 case Builtin::BI__builtin___strncpy_chk:
1326 case Builtin::BI__builtin___stpncpy_chk:
1327 case Builtin::BI__builtin___memccpy_chk:
1328 case Builtin::BI__builtin___mempcpy_chk: {
1329 DiagID = diag::warn_builtin_chk_overflow;
1330 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
1331 DestinationSize =
1332 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1333 IsChkVariant = true;
1334 break;
1335 }
1336
1337 case Builtin::BI__builtin___snprintf_chk:
1338 case Builtin::BI__builtin___vsnprintf_chk: {
1339 DiagID = diag::warn_builtin_chk_overflow;
1340 SourceSize = ComputeExplicitObjectSizeArgument(1);
1341 DestinationSize = ComputeExplicitObjectSizeArgument(3);
1342 IsChkVariant = true;
1343 break;
1344 }
1345
1346 case Builtin::BIstrncat:
1347 case Builtin::BI__builtin_strncat:
1348 case Builtin::BIstrncpy:
1349 case Builtin::BI__builtin_strncpy:
1350 case Builtin::BIstpncpy:
1351 case Builtin::BI__builtin_stpncpy: {
1352 // Whether these functions overflow depends on the runtime strlen of the
1353 // string, not just the buffer size, so emitting the "always overflow"
1354 // diagnostic isn't quite right. We should still diagnose passing a buffer
1355 // size larger than the destination buffer though; this is a runtime abort
1356 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
1357 DiagID = diag::warn_fortify_source_size_mismatch;
1358 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1359 DestinationSize = ComputeSizeArgument(0);
1360 break;
1361 }
1362
1363 case Builtin::BImemcpy:
1364 case Builtin::BI__builtin_memcpy:
1365 case Builtin::BImemmove:
1366 case Builtin::BI__builtin_memmove:
1367 case Builtin::BImemset:
1368 case Builtin::BI__builtin_memset:
1369 case Builtin::BImempcpy:
1370 case Builtin::BI__builtin_mempcpy: {
1371 DiagID = diag::warn_fortify_source_overflow;
1372 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1373 DestinationSize = ComputeSizeArgument(0);
1374 break;
1375 }
1376 case Builtin::BIsnprintf:
1377 case Builtin::BI__builtin_snprintf:
1378 case Builtin::BIvsnprintf:
1379 case Builtin::BI__builtin_vsnprintf: {
1380 DiagID = diag::warn_fortify_source_size_mismatch;
1381 SourceSize = ComputeExplicitObjectSizeArgument(1);
1382 const auto *FormatExpr = TheCall->getArg(Arg: 2)->IgnoreParenImpCasts();
1383 StringRef FormatStrRef;
1384 size_t StrLen;
1385 if (SourceSize &&
1386 ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1387 EstimateSizeFormatHandler H(FormatStrRef);
1388 const char *FormatBytes = FormatStrRef.data();
1389 if (!analyze_format_string::ParsePrintfString(
1390 H, beg: FormatBytes, end: FormatBytes + StrLen, LO: getLangOpts(),
1391 Target: Context.getTargetInfo(), /*isFreeBSDKPrintf=*/false)) {
1392 llvm::APSInt FormatSize =
1393 llvm::APSInt::getUnsigned(X: H.getSizeLowerBound())
1394 .extOrTrunc(width: SizeTypeWidth);
1395 if (FormatSize > *SourceSize && *SourceSize != 0) {
1396 unsigned TruncationDiagID =
1397 H.isKernelCompatible() ? diag::warn_format_truncation
1398 : diag::warn_format_truncation_non_kprintf;
1399 SmallString<16> SpecifiedSizeStr;
1400 SmallString<16> FormatSizeStr;
1401 SourceSize->toString(Str&: SpecifiedSizeStr, /*Radix=*/10);
1402 FormatSize.toString(Str&: FormatSizeStr, /*Radix=*/10);
1403 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1404 PDiag(DiagID: TruncationDiagID)
1405 << GetFunctionName() << SpecifiedSizeStr
1406 << FormatSizeStr);
1407 }
1408 }
1409 }
1410 DestinationSize = ComputeSizeArgument(0);
1411 }
1412 }
1413
1414 if (!SourceSize || !DestinationSize ||
1415 llvm::APSInt::compareValues(I1: *SourceSize, I2: *DestinationSize) <= 0)
1416 return;
1417
1418 StringRef FunctionName = GetFunctionName();
1419
1420 SmallString<16> DestinationStr;
1421 SmallString<16> SourceStr;
1422 DestinationSize->toString(Str&: DestinationStr, /*Radix=*/10);
1423 SourceSize->toString(Str&: SourceStr, /*Radix=*/10);
1424 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1425 PDiag(DiagID)
1426 << FunctionName << DestinationStr << SourceStr);
1427}
1428
1429static bool BuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
1430 Scope::ScopeFlags NeededScopeFlags,
1431 unsigned DiagID) {
1432 // Scopes aren't available during instantiation. Fortunately, builtin
1433 // functions cannot be template args so they cannot be formed through template
1434 // instantiation. Therefore checking once during the parse is sufficient.
1435 if (SemaRef.inTemplateInstantiation())
1436 return false;
1437
1438 Scope *S = SemaRef.getCurScope();
1439 while (S && !S->isSEHExceptScope())
1440 S = S->getParent();
1441 if (!S || !(S->getFlags() & NeededScopeFlags)) {
1442 auto *DRE = cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
1443 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
1444 << DRE->getDecl()->getIdentifier();
1445 return true;
1446 }
1447
1448 return false;
1449}
1450
1451static inline bool isBlockPointer(Expr *Arg) {
1452 return Arg->getType()->isBlockPointerType();
1453}
1454
1455/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
1456/// void*, which is a requirement of device side enqueue.
1457static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
1458 const BlockPointerType *BPT =
1459 cast<BlockPointerType>(Val: BlockArg->getType().getCanonicalType());
1460 ArrayRef<QualType> Params =
1461 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
1462 unsigned ArgCounter = 0;
1463 bool IllegalParams = false;
1464 // Iterate through the block parameters until either one is found that is not
1465 // a local void*, or the block is valid.
1466 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
1467 I != E; ++I, ++ArgCounter) {
1468 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
1469 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
1470 LangAS::opencl_local) {
1471 // Get the location of the error. If a block literal has been passed
1472 // (BlockExpr) then we can point straight to the offending argument,
1473 // else we just point to the variable reference.
1474 SourceLocation ErrorLoc;
1475 if (isa<BlockExpr>(Val: BlockArg)) {
1476 BlockDecl *BD = cast<BlockExpr>(Val: BlockArg)->getBlockDecl();
1477 ErrorLoc = BD->getParamDecl(i: ArgCounter)->getBeginLoc();
1478 } else if (isa<DeclRefExpr>(Val: BlockArg)) {
1479 ErrorLoc = cast<DeclRefExpr>(Val: BlockArg)->getBeginLoc();
1480 }
1481 S.Diag(ErrorLoc,
1482 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
1483 IllegalParams = true;
1484 }
1485 }
1486
1487 return IllegalParams;
1488}
1489
1490static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
1491 // OpenCL device can support extension but not the feature as extension
1492 // requires subgroup independent forward progress, but subgroup independent
1493 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
1494 if (!S.getOpenCLOptions().isSupported(Ext: "cl_khr_subgroups", LO: S.getLangOpts()) &&
1495 !S.getOpenCLOptions().isSupported(Ext: "__opencl_c_subgroups",
1496 LO: S.getLangOpts())) {
1497 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
1498 << 1 << Call->getDirectCallee()
1499 << "cl_khr_subgroups or __opencl_c_subgroups";
1500 return true;
1501 }
1502 return false;
1503}
1504
1505static bool OpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
1506 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 2))
1507 return true;
1508
1509 if (checkOpenCLSubgroupExt(S, Call: TheCall))
1510 return true;
1511
1512 // First argument is an ndrange_t type.
1513 Expr *NDRangeArg = TheCall->getArg(Arg: 0);
1514 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1515 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1516 << TheCall->getDirectCallee() << "'ndrange_t'";
1517 return true;
1518 }
1519
1520 Expr *BlockArg = TheCall->getArg(Arg: 1);
1521 if (!isBlockPointer(Arg: BlockArg)) {
1522 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1523 << TheCall->getDirectCallee() << "block";
1524 return true;
1525 }
1526 return checkOpenCLBlockArgs(S, BlockArg);
1527}
1528
1529/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
1530/// get_kernel_work_group_size
1531/// and get_kernel_preferred_work_group_size_multiple builtin functions.
1532static bool OpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
1533 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
1534 return true;
1535
1536 Expr *BlockArg = TheCall->getArg(Arg: 0);
1537 if (!isBlockPointer(Arg: BlockArg)) {
1538 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1539 << TheCall->getDirectCallee() << "block";
1540 return true;
1541 }
1542 return checkOpenCLBlockArgs(S, BlockArg);
1543}
1544
1545/// Diagnose integer type and any valid implicit conversion to it.
1546static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
1547 const QualType &IntType);
1548
1549static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
1550 unsigned Start, unsigned End) {
1551 bool IllegalParams = false;
1552 for (unsigned I = Start; I <= End; ++I)
1553 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(Arg: I),
1554 S.Context.getSizeType());
1555 return IllegalParams;
1556}
1557
1558/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
1559/// 'local void*' parameter of passed block.
1560static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
1561 Expr *BlockArg,
1562 unsigned NumNonVarArgs) {
1563 const BlockPointerType *BPT =
1564 cast<BlockPointerType>(Val: BlockArg->getType().getCanonicalType());
1565 unsigned NumBlockParams =
1566 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
1567 unsigned TotalNumArgs = TheCall->getNumArgs();
1568
1569 // For each argument passed to the block, a corresponding uint needs to
1570 // be passed to describe the size of the local memory.
1571 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
1572 S.Diag(TheCall->getBeginLoc(),
1573 diag::err_opencl_enqueue_kernel_local_size_args);
1574 return true;
1575 }
1576
1577 // Check that the sizes of the local memory are specified by integers.
1578 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, Start: NumNonVarArgs,
1579 End: TotalNumArgs - 1);
1580}
1581
1582/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
1583/// overload formats specified in Table 6.13.17.1.
1584/// int enqueue_kernel(queue_t queue,
1585/// kernel_enqueue_flags_t flags,
1586/// const ndrange_t ndrange,
1587/// void (^block)(void))
1588/// int enqueue_kernel(queue_t queue,
1589/// kernel_enqueue_flags_t flags,
1590/// const ndrange_t ndrange,
1591/// uint num_events_in_wait_list,
1592/// clk_event_t *event_wait_list,
1593/// clk_event_t *event_ret,
1594/// void (^block)(void))
1595/// int enqueue_kernel(queue_t queue,
1596/// kernel_enqueue_flags_t flags,
1597/// const ndrange_t ndrange,
1598/// void (^block)(local void*, ...),
1599/// uint size0, ...)
1600/// int enqueue_kernel(queue_t queue,
1601/// kernel_enqueue_flags_t flags,
1602/// const ndrange_t ndrange,
1603/// uint num_events_in_wait_list,
1604/// clk_event_t *event_wait_list,
1605/// clk_event_t *event_ret,
1606/// void (^block)(local void*, ...),
1607/// uint size0, ...)
1608static bool OpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
1609 unsigned NumArgs = TheCall->getNumArgs();
1610
1611 if (NumArgs < 4) {
1612 S.Diag(TheCall->getBeginLoc(),
1613 diag::err_typecheck_call_too_few_args_at_least)
1614 << 0 << 4 << NumArgs << /*is non object*/ 0;
1615 return true;
1616 }
1617
1618 Expr *Arg0 = TheCall->getArg(Arg: 0);
1619 Expr *Arg1 = TheCall->getArg(Arg: 1);
1620 Expr *Arg2 = TheCall->getArg(Arg: 2);
1621 Expr *Arg3 = TheCall->getArg(Arg: 3);
1622
1623 // First argument always needs to be a queue_t type.
1624 if (!Arg0->getType()->isQueueT()) {
1625 S.Diag(TheCall->getArg(0)->getBeginLoc(),
1626 diag::err_opencl_builtin_expected_type)
1627 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
1628 return true;
1629 }
1630
1631 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
1632 if (!Arg1->getType()->isIntegerType()) {
1633 S.Diag(TheCall->getArg(1)->getBeginLoc(),
1634 diag::err_opencl_builtin_expected_type)
1635 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
1636 return true;
1637 }
1638
1639 // Third argument is always an ndrange_t type.
1640 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1641 S.Diag(TheCall->getArg(2)->getBeginLoc(),
1642 diag::err_opencl_builtin_expected_type)
1643 << TheCall->getDirectCallee() << "'ndrange_t'";
1644 return true;
1645 }
1646
1647 // With four arguments, there is only one form that the function could be
1648 // called in: no events and no variable arguments.
1649 if (NumArgs == 4) {
1650 // check that the last argument is the right block type.
1651 if (!isBlockPointer(Arg: Arg3)) {
1652 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1653 << TheCall->getDirectCallee() << "block";
1654 return true;
1655 }
1656 // we have a block type, check the prototype
1657 const BlockPointerType *BPT =
1658 cast<BlockPointerType>(Val: Arg3->getType().getCanonicalType());
1659 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1660 S.Diag(Arg3->getBeginLoc(),
1661 diag::err_opencl_enqueue_kernel_blocks_no_args);
1662 return true;
1663 }
1664 return false;
1665 }
1666 // we can have block + varargs.
1667 if (isBlockPointer(Arg: Arg3))
1668 return (checkOpenCLBlockArgs(S, BlockArg: Arg3) ||
1669 checkOpenCLEnqueueVariadicArgs(S, TheCall, BlockArg: Arg3, NumNonVarArgs: 4));
1670 // last two cases with either exactly 7 args or 7 args and varargs.
1671 if (NumArgs >= 7) {
1672 // check common block argument.
1673 Expr *Arg6 = TheCall->getArg(Arg: 6);
1674 if (!isBlockPointer(Arg: Arg6)) {
1675 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1676 << TheCall->getDirectCallee() << "block";
1677 return true;
1678 }
1679 if (checkOpenCLBlockArgs(S, BlockArg: Arg6))
1680 return true;
1681
1682 // Forth argument has to be any integer type.
1683 if (!Arg3->getType()->isIntegerType()) {
1684 S.Diag(TheCall->getArg(3)->getBeginLoc(),
1685 diag::err_opencl_builtin_expected_type)
1686 << TheCall->getDirectCallee() << "integer";
1687 return true;
1688 }
1689 // check remaining common arguments.
1690 Expr *Arg4 = TheCall->getArg(Arg: 4);
1691 Expr *Arg5 = TheCall->getArg(Arg: 5);
1692
1693 // Fifth argument is always passed as a pointer to clk_event_t.
1694 if (!Arg4->isNullPointerConstant(Ctx&: S.Context,
1695 NPC: Expr::NPC_ValueDependentIsNotNull) &&
1696 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
1697 S.Diag(TheCall->getArg(4)->getBeginLoc(),
1698 diag::err_opencl_builtin_expected_type)
1699 << TheCall->getDirectCallee()
1700 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1701 return true;
1702 }
1703
1704 // Sixth argument is always passed as a pointer to clk_event_t.
1705 if (!Arg5->isNullPointerConstant(Ctx&: S.Context,
1706 NPC: Expr::NPC_ValueDependentIsNotNull) &&
1707 !(Arg5->getType()->isPointerType() &&
1708 Arg5->getType()->getPointeeType()->isClkEventT())) {
1709 S.Diag(TheCall->getArg(5)->getBeginLoc(),
1710 diag::err_opencl_builtin_expected_type)
1711 << TheCall->getDirectCallee()
1712 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1713 return true;
1714 }
1715
1716 if (NumArgs == 7)
1717 return false;
1718
1719 return checkOpenCLEnqueueVariadicArgs(S, TheCall, BlockArg: Arg6, NumNonVarArgs: 7);
1720 }
1721
1722 // None of the specific case has been detected, give generic error
1723 S.Diag(TheCall->getBeginLoc(),
1724 diag::err_opencl_enqueue_kernel_incorrect_args);
1725 return true;
1726}
1727
1728/// Returns OpenCL access qual.
1729static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1730 return D->getAttr<OpenCLAccessAttr>();
1731}
1732
1733/// Returns true if pipe element type is different from the pointer.
1734static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
1735 const Expr *Arg0 = Call->getArg(Arg: 0);
1736 // First argument type should always be pipe.
1737 if (!Arg0->getType()->isPipeType()) {
1738 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1739 << Call->getDirectCallee() << Arg0->getSourceRange();
1740 return true;
1741 }
1742 OpenCLAccessAttr *AccessQual =
1743 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1744 // Validates the access qualifier is compatible with the call.
1745 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1746 // read_only and write_only, and assumed to be read_only if no qualifier is
1747 // specified.
1748 switch (Call->getDirectCallee()->getBuiltinID()) {
1749 case Builtin::BIread_pipe:
1750 case Builtin::BIreserve_read_pipe:
1751 case Builtin::BIcommit_read_pipe:
1752 case Builtin::BIwork_group_reserve_read_pipe:
1753 case Builtin::BIsub_group_reserve_read_pipe:
1754 case Builtin::BIwork_group_commit_read_pipe:
1755 case Builtin::BIsub_group_commit_read_pipe:
1756 if (!(!AccessQual || AccessQual->isReadOnly())) {
1757 S.Diag(Arg0->getBeginLoc(),
1758 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1759 << "read_only" << Arg0->getSourceRange();
1760 return true;
1761 }
1762 break;
1763 case Builtin::BIwrite_pipe:
1764 case Builtin::BIreserve_write_pipe:
1765 case Builtin::BIcommit_write_pipe:
1766 case Builtin::BIwork_group_reserve_write_pipe:
1767 case Builtin::BIsub_group_reserve_write_pipe:
1768 case Builtin::BIwork_group_commit_write_pipe:
1769 case Builtin::BIsub_group_commit_write_pipe:
1770 if (!(AccessQual && AccessQual->isWriteOnly())) {
1771 S.Diag(Arg0->getBeginLoc(),
1772 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1773 << "write_only" << Arg0->getSourceRange();
1774 return true;
1775 }
1776 break;
1777 default:
1778 break;
1779 }
1780 return false;
1781}
1782
1783/// Returns true if pipe element type is different from the pointer.
1784static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1785 const Expr *Arg0 = Call->getArg(Arg: 0);
1786 const Expr *ArgIdx = Call->getArg(Arg: Idx);
1787 const PipeType *PipeTy = cast<PipeType>(Val: Arg0->getType());
1788 const QualType EltTy = PipeTy->getElementType();
1789 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1790 // The Idx argument should be a pointer and the type of the pointer and
1791 // the type of pipe element should also be the same.
1792 if (!ArgTy ||
1793 !S.Context.hasSameType(
1794 T1: EltTy, T2: ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1795 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1796 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1797 << ArgIdx->getType() << ArgIdx->getSourceRange();
1798 return true;
1799 }
1800 return false;
1801}
1802
1803// Performs semantic analysis for the read/write_pipe call.
1804// \param S Reference to the semantic analyzer.
1805// \param Call A pointer to the builtin call.
1806// \return True if a semantic error has been found, false otherwise.
1807static bool BuiltinRWPipe(Sema &S, CallExpr *Call) {
1808 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1809 // functions have two forms.
1810 switch (Call->getNumArgs()) {
1811 case 2:
1812 if (checkOpenCLPipeArg(S, Call))
1813 return true;
1814 // The call with 2 arguments should be
1815 // read/write_pipe(pipe T, T*).
1816 // Check packet type T.
1817 if (checkOpenCLPipePacketType(S, Call, Idx: 1))
1818 return true;
1819 break;
1820
1821 case 4: {
1822 if (checkOpenCLPipeArg(S, Call))
1823 return true;
1824 // The call with 4 arguments should be
1825 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1826 // Check reserve_id_t.
1827 if (!Call->getArg(Arg: 1)->getType()->isReserveIDT()) {
1828 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1829 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1830 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1831 return true;
1832 }
1833
1834 // Check the index.
1835 const Expr *Arg2 = Call->getArg(Arg: 2);
1836 if (!Arg2->getType()->isIntegerType() &&
1837 !Arg2->getType()->isUnsignedIntegerType()) {
1838 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1839 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1840 << Arg2->getType() << Arg2->getSourceRange();
1841 return true;
1842 }
1843
1844 // Check packet type T.
1845 if (checkOpenCLPipePacketType(S, Call, Idx: 3))
1846 return true;
1847 } break;
1848 default:
1849 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1850 << Call->getDirectCallee() << Call->getSourceRange();
1851 return true;
1852 }
1853
1854 return false;
1855}
1856
1857// Performs a semantic analysis on the {work_group_/sub_group_
1858// /_}reserve_{read/write}_pipe
1859// \param S Reference to the semantic analyzer.
1860// \param Call The call to the builtin function to be analyzed.
1861// \return True if a semantic error was found, false otherwise.
1862static bool BuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
1863 if (checkArgCount(S, Call, DesiredArgCount: 2))
1864 return true;
1865
1866 if (checkOpenCLPipeArg(S, Call))
1867 return true;
1868
1869 // Check the reserve size.
1870 if (!Call->getArg(Arg: 1)->getType()->isIntegerType() &&
1871 !Call->getArg(Arg: 1)->getType()->isUnsignedIntegerType()) {
1872 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1873 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1874 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1875 return true;
1876 }
1877
1878 // Since return type of reserve_read/write_pipe built-in function is
1879 // reserve_id_t, which is not defined in the builtin def file , we used int
1880 // as return type and need to override the return type of these functions.
1881 Call->setType(S.Context.OCLReserveIDTy);
1882
1883 return false;
1884}
1885
1886// Performs a semantic analysis on {work_group_/sub_group_
1887// /_}commit_{read/write}_pipe
1888// \param S Reference to the semantic analyzer.
1889// \param Call The call to the builtin function to be analyzed.
1890// \return True if a semantic error was found, false otherwise.
1891static bool BuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1892 if (checkArgCount(S, Call, DesiredArgCount: 2))
1893 return true;
1894
1895 if (checkOpenCLPipeArg(S, Call))
1896 return true;
1897
1898 // Check reserve_id_t.
1899 if (!Call->getArg(Arg: 1)->getType()->isReserveIDT()) {
1900 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1901 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1902 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1903 return true;
1904 }
1905
1906 return false;
1907}
1908
1909// Performs a semantic analysis on the call to built-in Pipe
1910// Query Functions.
1911// \param S Reference to the semantic analyzer.
1912// \param Call The call to the builtin function to be analyzed.
1913// \return True if a semantic error was found, false otherwise.
1914static bool BuiltinPipePackets(Sema &S, CallExpr *Call) {
1915 if (checkArgCount(S, Call, DesiredArgCount: 1))
1916 return true;
1917
1918 if (!Call->getArg(Arg: 0)->getType()->isPipeType()) {
1919 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1920 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1921 return true;
1922 }
1923
1924 return false;
1925}
1926
1927// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1928// Performs semantic analysis for the to_global/local/private call.
1929// \param S Reference to the semantic analyzer.
1930// \param BuiltinID ID of the builtin function.
1931// \param Call A pointer to the builtin call.
1932// \return True if a semantic error has been found, false otherwise.
1933static bool OpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, CallExpr *Call) {
1934 if (checkArgCount(S, Call, DesiredArgCount: 1))
1935 return true;
1936
1937 auto RT = Call->getArg(Arg: 0)->getType();
1938 if (!RT->isPointerType() || RT->getPointeeType()
1939 .getAddressSpace() == LangAS::opencl_constant) {
1940 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1941 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1942 return true;
1943 }
1944
1945 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1946 S.Diag(Call->getArg(0)->getBeginLoc(),
1947 diag::warn_opencl_generic_address_space_arg)
1948 << Call->getDirectCallee()->getNameInfo().getAsString()
1949 << Call->getArg(0)->getSourceRange();
1950 }
1951
1952 RT = RT->getPointeeType();
1953 auto Qual = RT.getQualifiers();
1954 switch (BuiltinID) {
1955 case Builtin::BIto_global:
1956 Qual.setAddressSpace(LangAS::opencl_global);
1957 break;
1958 case Builtin::BIto_local:
1959 Qual.setAddressSpace(LangAS::opencl_local);
1960 break;
1961 case Builtin::BIto_private:
1962 Qual.setAddressSpace(LangAS::opencl_private);
1963 break;
1964 default:
1965 llvm_unreachable("Invalid builtin function");
1966 }
1967 Call->setType(S.Context.getPointerType(T: S.Context.getQualifiedType(
1968 T: RT.getUnqualifiedType(), Qs: Qual)));
1969
1970 return false;
1971}
1972
1973namespace {
1974enum PointerAuthOpKind {
1975 PAO_Strip,
1976 PAO_Sign,
1977 PAO_Auth,
1978 PAO_SignGeneric,
1979 PAO_Discriminator,
1980 PAO_BlendPointer,
1981 PAO_BlendInteger
1982};
1983}
1984
1985static bool checkPointerAuthEnabled(Sema &S, Expr *E) {
1986 if (S.getLangOpts().PointerAuthIntrinsics)
1987 return false;
1988
1989 S.Diag(E->getExprLoc(), diag::err_ptrauth_disabled) << E->getSourceRange();
1990 return true;
1991}
1992
1993static bool checkPointerAuthKey(Sema &S, Expr *&Arg) {
1994 // Convert it to type 'int'.
1995 if (convertArgumentToType(S, Arg, S.Context.IntTy))
1996 return true;
1997
1998 // Value-dependent expressions are okay; wait for template instantiation.
1999 if (Arg->isValueDependent())
2000 return false;
2001
2002 unsigned KeyValue;
2003 return S.checkConstantPointerAuthKey(keyExpr: Arg, key&: KeyValue);
2004}
2005
2006bool Sema::checkConstantPointerAuthKey(Expr *Arg, unsigned &Result) {
2007 // Attempt to constant-evaluate the expression.
2008 std::optional<llvm::APSInt> KeyValue = Arg->getIntegerConstantExpr(Ctx: Context);
2009 if (!KeyValue) {
2010 Diag(Arg->getExprLoc(), diag::err_expr_not_ice)
2011 << 0 << Arg->getSourceRange();
2012 return true;
2013 }
2014
2015 // Ask the target to validate the key parameter.
2016 if (!Context.getTargetInfo().validatePointerAuthKey(value: *KeyValue)) {
2017 llvm::SmallString<32> Value;
2018 {
2019 llvm::raw_svector_ostream Str(Value);
2020 Str << *KeyValue;
2021 }
2022
2023 Diag(Arg->getExprLoc(), diag::err_ptrauth_invalid_key)
2024 << Value << Arg->getSourceRange();
2025 return true;
2026 }
2027
2028 Result = KeyValue->getZExtValue();
2029 return false;
2030}
2031
2032static bool checkPointerAuthValue(Sema &S, Expr *&Arg,
2033 PointerAuthOpKind OpKind) {
2034 if (Arg->hasPlaceholderType()) {
2035 ExprResult R = S.CheckPlaceholderExpr(E: Arg);
2036 if (R.isInvalid())
2037 return true;
2038 Arg = R.get();
2039 }
2040
2041 auto AllowsPointer = [](PointerAuthOpKind OpKind) {
2042 return OpKind != PAO_BlendInteger;
2043 };
2044 auto AllowsInteger = [](PointerAuthOpKind OpKind) {
2045 return OpKind == PAO_Discriminator || OpKind == PAO_BlendInteger ||
2046 OpKind == PAO_SignGeneric;
2047 };
2048
2049 // Require the value to have the right range of type.
2050 QualType ExpectedTy;
2051 if (AllowsPointer(OpKind) && Arg->getType()->isPointerType()) {
2052 ExpectedTy = Arg->getType().getUnqualifiedType();
2053 } else if (AllowsPointer(OpKind) && Arg->getType()->isNullPtrType()) {
2054 ExpectedTy = S.Context.VoidPtrTy;
2055 } else if (AllowsInteger(OpKind) &&
2056 Arg->getType()->isIntegralOrUnscopedEnumerationType()) {
2057 ExpectedTy = S.Context.getUIntPtrType();
2058
2059 } else {
2060 // Diagnose the failures.
2061 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_value_bad_type)
2062 << unsigned(OpKind == PAO_Discriminator ? 1
2063 : OpKind == PAO_BlendPointer ? 2
2064 : OpKind == PAO_BlendInteger ? 3
2065 : 0)
2066 << unsigned(AllowsInteger(OpKind) ? (AllowsPointer(OpKind) ? 2 : 1) : 0)
2067 << Arg->getType() << Arg->getSourceRange();
2068 return true;
2069 }
2070
2071 // Convert to that type. This should just be an lvalue-to-rvalue
2072 // conversion.
2073 if (convertArgumentToType(S, Value&: Arg, Ty: ExpectedTy))
2074 return true;
2075
2076 // Warn about null pointers for non-generic sign and auth operations.
2077 if ((OpKind == PAO_Sign || OpKind == PAO_Auth) &&
2078 Arg->isNullPointerConstant(Ctx&: S.Context, NPC: Expr::NPC_ValueDependentIsNull)) {
2079 S.Diag(Arg->getExprLoc(), OpKind == PAO_Sign
2080 ? diag::warn_ptrauth_sign_null_pointer
2081 : diag::warn_ptrauth_auth_null_pointer)
2082 << Arg->getSourceRange();
2083 }
2084
2085 return false;
2086}
2087
2088static ExprResult PointerAuthStrip(Sema &S, CallExpr *Call) {
2089 if (checkArgCount(S, Call, DesiredArgCount: 2))
2090 return ExprError();
2091 if (checkPointerAuthEnabled(S, Call))
2092 return ExprError();
2093 if (checkPointerAuthValue(S, Arg&: Call->getArgs()[0], OpKind: PAO_Strip) ||
2094 checkPointerAuthKey(S, Arg&: Call->getArgs()[1]))
2095 return ExprError();
2096
2097 Call->setType(Call->getArgs()[0]->getType());
2098 return Call;
2099}
2100
2101static ExprResult PointerAuthBlendDiscriminator(Sema &S, CallExpr *Call) {
2102 if (checkArgCount(S, Call, DesiredArgCount: 2))
2103 return ExprError();
2104 if (checkPointerAuthEnabled(S, Call))
2105 return ExprError();
2106 if (checkPointerAuthValue(S, Arg&: Call->getArgs()[0], OpKind: PAO_BlendPointer) ||
2107 checkPointerAuthValue(S, Arg&: Call->getArgs()[1], OpKind: PAO_BlendInteger))
2108 return ExprError();
2109
2110 Call->setType(S.Context.getUIntPtrType());
2111 return Call;
2112}
2113
2114static ExprResult PointerAuthSignGenericData(Sema &S, CallExpr *Call) {
2115 if (checkArgCount(S, Call, DesiredArgCount: 2))
2116 return ExprError();
2117 if (checkPointerAuthEnabled(S, Call))
2118 return ExprError();
2119 if (checkPointerAuthValue(S, Arg&: Call->getArgs()[0], OpKind: PAO_SignGeneric) ||
2120 checkPointerAuthValue(S, Arg&: Call->getArgs()[1], OpKind: PAO_Discriminator))
2121 return ExprError();
2122
2123 Call->setType(S.Context.getUIntPtrType());
2124 return Call;
2125}
2126
2127static ExprResult PointerAuthSignOrAuth(Sema &S, CallExpr *Call,
2128 PointerAuthOpKind OpKind) {
2129 if (checkArgCount(S, Call, DesiredArgCount: 3))
2130 return ExprError();
2131 if (checkPointerAuthEnabled(S, Call))
2132 return ExprError();
2133 if (checkPointerAuthValue(S, Arg&: Call->getArgs()[0], OpKind) ||
2134 checkPointerAuthKey(S, Arg&: Call->getArgs()[1]) ||
2135 checkPointerAuthValue(S, Arg&: Call->getArgs()[2], OpKind: PAO_Discriminator))
2136 return ExprError();
2137
2138 Call->setType(Call->getArgs()[0]->getType());
2139 return Call;
2140}
2141
2142static ExprResult PointerAuthAuthAndResign(Sema &S, CallExpr *Call) {
2143 if (checkArgCount(S, Call, DesiredArgCount: 5))
2144 return ExprError();
2145 if (checkPointerAuthEnabled(S, Call))
2146 return ExprError();
2147 if (checkPointerAuthValue(S, Arg&: Call->getArgs()[0], OpKind: PAO_Auth) ||
2148 checkPointerAuthKey(S, Arg&: Call->getArgs()[1]) ||
2149 checkPointerAuthValue(S, Arg&: Call->getArgs()[2], OpKind: PAO_Discriminator) ||
2150 checkPointerAuthKey(S, Arg&: Call->getArgs()[3]) ||
2151 checkPointerAuthValue(S, Arg&: Call->getArgs()[4], OpKind: PAO_Discriminator))
2152 return ExprError();
2153
2154 Call->setType(Call->getArgs()[0]->getType());
2155 return Call;
2156}
2157
2158static ExprResult BuiltinLaunder(Sema &S, CallExpr *TheCall) {
2159 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
2160 return ExprError();
2161
2162 // Compute __builtin_launder's parameter type from the argument.
2163 // The parameter type is:
2164 // * The type of the argument if it's not an array or function type,
2165 // Otherwise,
2166 // * The decayed argument type.
2167 QualType ParamTy = [&]() {
2168 QualType ArgTy = TheCall->getArg(Arg: 0)->getType();
2169 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
2170 return S.Context.getPointerType(Ty->getElementType());
2171 if (ArgTy->isFunctionType()) {
2172 return S.Context.getPointerType(ArgTy);
2173 }
2174 return ArgTy;
2175 }();
2176
2177 TheCall->setType(ParamTy);
2178
2179 auto DiagSelect = [&]() -> std::optional<unsigned> {
2180 if (!ParamTy->isPointerType())
2181 return 0;
2182 if (ParamTy->isFunctionPointerType())
2183 return 1;
2184 if (ParamTy->isVoidPointerType())
2185 return 2;
2186 return std::optional<unsigned>{};
2187 }();
2188 if (DiagSelect) {
2189 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
2190 << *DiagSelect << TheCall->getSourceRange();
2191 return ExprError();
2192 }
2193
2194 // We either have an incomplete class type, or we have a class template
2195 // whose instantiation has not been forced. Example:
2196 //
2197 // template <class T> struct Foo { T value; };
2198 // Foo<int> *p = nullptr;
2199 // auto *d = __builtin_launder(p);
2200 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
2201 diag::err_incomplete_type))
2202 return ExprError();
2203
2204 assert(ParamTy->getPointeeType()->isObjectType() &&
2205 "Unhandled non-object pointer case");
2206
2207 InitializedEntity Entity =
2208 InitializedEntity::InitializeParameter(Context&: S.Context, Type: ParamTy, Consumed: false);
2209 ExprResult Arg =
2210 S.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: TheCall->getArg(Arg: 0));
2211 if (Arg.isInvalid())
2212 return ExprError();
2213 TheCall->setArg(Arg: 0, ArgExpr: Arg.get());
2214
2215 return TheCall;
2216}
2217
2218// Emit an error and return true if the current object format type is in the
2219// list of unsupported types.
2220static bool CheckBuiltinTargetNotInUnsupported(
2221 Sema &S, unsigned BuiltinID, CallExpr *TheCall,
2222 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) {
2223 llvm::Triple::ObjectFormatType CurObjFormat =
2224 S.getASTContext().getTargetInfo().getTriple().getObjectFormat();
2225 if (llvm::is_contained(Range&: UnsupportedObjectFormatTypes, Element: CurObjFormat)) {
2226 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2227 << TheCall->getSourceRange();
2228 return true;
2229 }
2230 return false;
2231}
2232
2233// Emit an error and return true if the current architecture is not in the list
2234// of supported architectures.
2235static bool
2236CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
2237 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
2238 llvm::Triple::ArchType CurArch =
2239 S.getASTContext().getTargetInfo().getTriple().getArch();
2240 if (llvm::is_contained(Range&: SupportedArchs, Element: CurArch))
2241 return false;
2242 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2243 << TheCall->getSourceRange();
2244 return true;
2245}
2246
2247static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
2248 SourceLocation CallSiteLoc);
2249
2250bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2251 CallExpr *TheCall) {
2252 switch (TI.getTriple().getArch()) {
2253 default:
2254 // Some builtins don't require additional checking, so just consider these
2255 // acceptable.
2256 return false;
2257 case llvm::Triple::arm:
2258 case llvm::Triple::armeb:
2259 case llvm::Triple::thumb:
2260 case llvm::Triple::thumbeb:
2261 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
2262 case llvm::Triple::aarch64:
2263 case llvm::Triple::aarch64_32:
2264 case llvm::Triple::aarch64_be:
2265 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
2266 case llvm::Triple::bpfeb:
2267 case llvm::Triple::bpfel:
2268 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
2269 case llvm::Triple::hexagon:
2270 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
2271 case llvm::Triple::mips:
2272 case llvm::Triple::mipsel:
2273 case llvm::Triple::mips64:
2274 case llvm::Triple::mips64el:
2275 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
2276 case llvm::Triple::systemz:
2277 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
2278 case llvm::Triple::x86:
2279 case llvm::Triple::x86_64:
2280 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
2281 case llvm::Triple::ppc:
2282 case llvm::Triple::ppcle:
2283 case llvm::Triple::ppc64:
2284 case llvm::Triple::ppc64le:
2285 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
2286 case llvm::Triple::amdgcn:
2287 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
2288 case llvm::Triple::riscv32:
2289 case llvm::Triple::riscv64:
2290 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
2291 case llvm::Triple::loongarch32:
2292 case llvm::Triple::loongarch64:
2293 return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
2294 case llvm::Triple::wasm32:
2295 case llvm::Triple::wasm64:
2296 return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
2297 case llvm::Triple::nvptx:
2298 case llvm::Triple::nvptx64:
2299 return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
2300 }
2301}
2302
2303// Check if \p Ty is a valid type for the elementwise math builtins. If it is
2304// not a valid type, emit an error message and return true. Otherwise return
2305// false.
2306static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc,
2307 QualType ArgTy, int ArgIndex) {
2308 if (!ArgTy->getAs<VectorType>() &&
2309 !ConstantMatrixType::isValidElementType(T: ArgTy)) {
2310 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
2311 << ArgIndex << /* vector, integer or float ty*/ 0 << ArgTy;
2312 }
2313
2314 return false;
2315}
2316
2317static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc,
2318 QualType ArgTy, int ArgIndex) {
2319 QualType EltTy = ArgTy;
2320 if (auto *VecTy = EltTy->getAs<VectorType>())
2321 EltTy = VecTy->getElementType();
2322
2323 if (!EltTy->isRealFloatingType()) {
2324 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
2325 << ArgIndex << /* vector or float ty*/ 5 << ArgTy;
2326 }
2327
2328 return false;
2329}
2330
2331/// BuiltinCpu{Supports|Is} - Handle __builtin_cpu_{supports|is}(char *).
2332/// This checks that the target supports the builtin and that the string
2333/// argument is constant and valid.
2334static bool BuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
2335 const TargetInfo *AuxTI, unsigned BuiltinID) {
2336 assert((BuiltinID == Builtin::BI__builtin_cpu_supports ||
2337 BuiltinID == Builtin::BI__builtin_cpu_is) &&
2338 "Expecting __builtin_cpu_...");
2339
2340 bool IsCPUSupports = BuiltinID == Builtin::BI__builtin_cpu_supports;
2341 const TargetInfo *TheTI = &TI;
2342 auto SupportsBI = [=](const TargetInfo *TInfo) {
2343 return TInfo && ((IsCPUSupports && TInfo->supportsCpuSupports()) ||
2344 (!IsCPUSupports && TInfo->supportsCpuIs()));
2345 };
2346 if (!SupportsBI(&TI) && SupportsBI(AuxTI))
2347 TheTI = AuxTI;
2348
2349 if (IsCPUSupports && !TheTI->supportsCpuSupports())
2350 return S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2351 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2352 if (!IsCPUSupports && !TheTI->supportsCpuIs())
2353 return S.Diag(TheCall->getBeginLoc(),
2354 TI.getTriple().isOSAIX()
2355 ? diag::err_builtin_aix_os_unsupported
2356 : diag::err_builtin_target_unsupported)
2357 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2358
2359 Expr *Arg = TheCall->getArg(Arg: 0)->IgnoreParenImpCasts();
2360 // Check if the argument is a string literal.
2361 if (!isa<StringLiteral>(Arg))
2362 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
2363 << Arg->getSourceRange();
2364
2365 // Check the contents of the string.
2366 StringRef Feature = cast<StringLiteral>(Val: Arg)->getString();
2367 if (IsCPUSupports && !TheTI->validateCpuSupports(Name: Feature)) {
2368 S.Diag(TheCall->getBeginLoc(), diag::warn_invalid_cpu_supports)
2369 << Arg->getSourceRange();
2370 return false;
2371 }
2372 if (!IsCPUSupports && !TheTI->validateCpuIs(Feature))
2373 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
2374 << Arg->getSourceRange();
2375 return false;
2376}
2377
2378/// Checks that __builtin_popcountg was called with a single argument, which is
2379/// an unsigned integer.
2380static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
2381 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
2382 return true;
2383
2384 ExprResult ArgRes = S.DefaultLvalueConversion(E: TheCall->getArg(Arg: 0));
2385 if (ArgRes.isInvalid())
2386 return true;
2387
2388 Expr *Arg = ArgRes.get();
2389 TheCall->setArg(Arg: 0, ArgExpr: Arg);
2390
2391 QualType ArgTy = Arg->getType();
2392
2393 if (!ArgTy->isUnsignedIntegerType()) {
2394 S.Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2395 << 1 << /*unsigned integer ty*/ 7 << ArgTy;
2396 return true;
2397 }
2398 return false;
2399}
2400
2401/// Checks that __builtin_{clzg,ctzg} was called with a first argument, which is
2402/// an unsigned integer, and an optional second argument, which is promoted to
2403/// an 'int'.
2404static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
2405 if (checkArgCountRange(S, Call: TheCall, MinArgCount: 1, MaxArgCount: 2))
2406 return true;
2407
2408 ExprResult Arg0Res = S.DefaultLvalueConversion(E: TheCall->getArg(Arg: 0));
2409 if (Arg0Res.isInvalid())
2410 return true;
2411
2412 Expr *Arg0 = Arg0Res.get();
2413 TheCall->setArg(Arg: 0, ArgExpr: Arg0);
2414
2415 QualType Arg0Ty = Arg0->getType();
2416
2417 if (!Arg0Ty->isUnsignedIntegerType()) {
2418 S.Diag(Arg0->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2419 << 1 << /*unsigned integer ty*/ 7 << Arg0Ty;
2420 return true;
2421 }
2422
2423 if (TheCall->getNumArgs() > 1) {
2424 ExprResult Arg1Res = S.UsualUnaryConversions(E: TheCall->getArg(Arg: 1));
2425 if (Arg1Res.isInvalid())
2426 return true;
2427
2428 Expr *Arg1 = Arg1Res.get();
2429 TheCall->setArg(Arg: 1, ArgExpr: Arg1);
2430
2431 QualType Arg1Ty = Arg1->getType();
2432
2433 if (!Arg1Ty->isSpecificBuiltinType(K: BuiltinType::Int)) {
2434 S.Diag(Arg1->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2435 << 2 << /*'int' ty*/ 8 << Arg1Ty;
2436 return true;
2437 }
2438 }
2439
2440 return false;
2441}
2442
2443ExprResult
2444Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
2445 CallExpr *TheCall) {
2446 ExprResult TheCallResult(TheCall);
2447
2448 // Find out if any arguments are required to be integer constant expressions.
2449 unsigned ICEArguments = 0;
2450 ASTContext::GetBuiltinTypeError Error;
2451 Context.GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments);
2452 if (Error != ASTContext::GE_None)
2453 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
2454
2455 // If any arguments are required to be ICE's, check and diagnose.
2456 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
2457 // Skip arguments not required to be ICE's.
2458 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
2459
2460 llvm::APSInt Result;
2461 // If we don't have enough arguments, continue so we can issue better
2462 // diagnostic in checkArgCount(...)
2463 if (ArgNo < TheCall->getNumArgs() &&
2464 BuiltinConstantArg(TheCall, ArgNum: ArgNo, Result))
2465 return true;
2466 ICEArguments &= ~(1 << ArgNo);
2467 }
2468
2469 FPOptions FPO;
2470 switch (BuiltinID) {
2471 case Builtin::BI__builtin_cpu_supports:
2472 case Builtin::BI__builtin_cpu_is:
2473 if (BuiltinCpu(S&: *this, TI: Context.getTargetInfo(), TheCall,
2474 AuxTI: Context.getAuxTargetInfo(), BuiltinID))
2475 return ExprError();
2476 break;
2477 case Builtin::BI__builtin_cpu_init:
2478 if (!Context.getTargetInfo().supportsCpuInit()) {
2479 Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2480 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2481 return ExprError();
2482 }
2483 break;
2484 case Builtin::BI__builtin___CFStringMakeConstantString:
2485 // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
2486 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
2487 if (CheckBuiltinTargetNotInUnsupported(
2488 S&: *this, BuiltinID, TheCall,
2489 UnsupportedObjectFormatTypes: {llvm::Triple::GOFF, llvm::Triple::XCOFF}))
2490 return ExprError();
2491 assert(TheCall->getNumArgs() == 1 &&
2492 "Wrong # arguments to builtin CFStringMakeConstantString");
2493 if (CheckObjCString(Arg: TheCall->getArg(Arg: 0)))
2494 return ExprError();
2495 break;
2496 case Builtin::BI__builtin_ms_va_start:
2497 case Builtin::BI__builtin_stdarg_start:
2498 case Builtin::BI__builtin_va_start:
2499 if (BuiltinVAStart(BuiltinID, TheCall))
2500 return ExprError();
2501 break;
2502 case Builtin::BI__va_start: {
2503 switch (Context.getTargetInfo().getTriple().getArch()) {
2504 case llvm::Triple::aarch64:
2505 case llvm::Triple::arm:
2506 case llvm::Triple::thumb:
2507 if (BuiltinVAStartARMMicrosoft(Call: TheCall))
2508 return ExprError();
2509 break;
2510 default:
2511 if (BuiltinVAStart(BuiltinID, TheCall))
2512 return ExprError();
2513 break;
2514 }
2515 break;
2516 }
2517
2518 // The acquire, release, and no fence variants are ARM and AArch64 only.
2519 case Builtin::BI_interlockedbittestandset_acq:
2520 case Builtin::BI_interlockedbittestandset_rel:
2521 case Builtin::BI_interlockedbittestandset_nf:
2522 case Builtin::BI_interlockedbittestandreset_acq:
2523 case Builtin::BI_interlockedbittestandreset_rel:
2524 case Builtin::BI_interlockedbittestandreset_nf:
2525 if (CheckBuiltinTargetInSupported(
2526 S&: *this, BuiltinID, TheCall,
2527 SupportedArchs: {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
2528 return ExprError();
2529 break;
2530
2531 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
2532 case Builtin::BI_bittest64:
2533 case Builtin::BI_bittestandcomplement64:
2534 case Builtin::BI_bittestandreset64:
2535 case Builtin::BI_bittestandset64:
2536 case Builtin::BI_interlockedbittestandreset64:
2537 case Builtin::BI_interlockedbittestandset64:
2538 if (CheckBuiltinTargetInSupported(S&: *this, BuiltinID, TheCall,
2539 SupportedArchs: {llvm::Triple::x86_64, llvm::Triple::arm,
2540 llvm::Triple::thumb,
2541 llvm::Triple::aarch64}))
2542 return ExprError();
2543 break;
2544
2545 case Builtin::BI__builtin_set_flt_rounds:
2546 if (CheckBuiltinTargetInSupported(S&: *this, BuiltinID, TheCall,
2547 SupportedArchs: {llvm::Triple::x86, llvm::Triple::x86_64,
2548 llvm::Triple::arm, llvm::Triple::thumb,
2549 llvm::Triple::aarch64}))
2550 return ExprError();
2551 break;
2552
2553 case Builtin::BI__builtin_isgreater:
2554 case Builtin::BI__builtin_isgreaterequal:
2555 case Builtin::BI__builtin_isless:
2556 case Builtin::BI__builtin_islessequal:
2557 case Builtin::BI__builtin_islessgreater:
2558 case Builtin::BI__builtin_isunordered:
2559 if (BuiltinUnorderedCompare(TheCall, BuiltinID))
2560 return ExprError();
2561 break;
2562 case Builtin::BI__builtin_fpclassify:
2563 if (BuiltinFPClassification(TheCall, NumArgs: 6, BuiltinID))
2564 return ExprError();
2565 break;
2566 case Builtin::BI__builtin_isfpclass:
2567 if (BuiltinFPClassification(TheCall, NumArgs: 2, BuiltinID))
2568 return ExprError();
2569 break;
2570 case Builtin::BI__builtin_isfinite:
2571 case Builtin::BI__builtin_isinf:
2572 case Builtin::BI__builtin_isinf_sign:
2573 case Builtin::BI__builtin_isnan:
2574 case Builtin::BI__builtin_issignaling:
2575 case Builtin::BI__builtin_isnormal:
2576 case Builtin::BI__builtin_issubnormal:
2577 case Builtin::BI__builtin_iszero:
2578 case Builtin::BI__builtin_signbit:
2579 case Builtin::BI__builtin_signbitf:
2580 case Builtin::BI__builtin_signbitl:
2581 if (BuiltinFPClassification(TheCall, NumArgs: 1, BuiltinID))
2582 return ExprError();
2583 break;
2584 case Builtin::BI__builtin_shufflevector:
2585 return BuiltinShuffleVector(TheCall);
2586 // TheCall will be freed by the smart pointer here, but that's fine, since
2587 // BuiltinShuffleVector guts it, but then doesn't release it.
2588 case Builtin::BI__builtin_prefetch:
2589 if (BuiltinPrefetch(TheCall))
2590 return ExprError();
2591 break;
2592 case Builtin::BI__builtin_alloca_with_align:
2593 case Builtin::BI__builtin_alloca_with_align_uninitialized:
2594 if (BuiltinAllocaWithAlign(TheCall))
2595 return ExprError();
2596 [[fallthrough]];
2597 case Builtin::BI__builtin_alloca:
2598 case Builtin::BI__builtin_alloca_uninitialized:
2599 Diag(TheCall->getBeginLoc(), diag::warn_alloca)
2600 << TheCall->getDirectCallee();
2601 break;
2602 case Builtin::BI__arithmetic_fence:
2603 if (BuiltinArithmeticFence(TheCall))
2604 return ExprError();
2605 break;
2606 case Builtin::BI__assume:
2607 case Builtin::BI__builtin_assume:
2608 if (BuiltinAssume(TheCall))
2609 return ExprError();
2610 break;
2611 case Builtin::BI__builtin_assume_aligned:
2612 if (BuiltinAssumeAligned(TheCall))
2613 return ExprError();
2614 break;
2615 case Builtin::BI__builtin_dynamic_object_size:
2616 case Builtin::BI__builtin_object_size:
2617 if (BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3))
2618 return ExprError();
2619 break;
2620 case Builtin::BI__builtin_longjmp:
2621 if (BuiltinLongjmp(TheCall))
2622 return ExprError();
2623 break;
2624 case Builtin::BI__builtin_setjmp:
2625 if (BuiltinSetjmp(TheCall))
2626 return ExprError();
2627 break;
2628 case Builtin::BI__builtin_classify_type:
2629 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1)) return true;
2630 TheCall->setType(Context.IntTy);
2631 break;
2632 case Builtin::BI__builtin_complex:
2633 if (BuiltinComplex(TheCall))
2634 return ExprError();
2635 break;
2636 case Builtin::BI__builtin_constant_p: {
2637 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1)) return true;
2638 ExprResult Arg = DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: 0));
2639 if (Arg.isInvalid()) return true;
2640 TheCall->setArg(Arg: 0, ArgExpr: Arg.get());
2641 TheCall->setType(Context.IntTy);
2642 break;
2643 }
2644 case Builtin::BI__builtin_launder:
2645 return BuiltinLaunder(S&: *this, TheCall);
2646 case Builtin::BI__sync_fetch_and_add:
2647 case Builtin::BI__sync_fetch_and_add_1:
2648 case Builtin::BI__sync_fetch_and_add_2:
2649 case Builtin::BI__sync_fetch_and_add_4:
2650 case Builtin::BI__sync_fetch_and_add_8:
2651 case Builtin::BI__sync_fetch_and_add_16:
2652 case Builtin::BI__sync_fetch_and_sub:
2653 case Builtin::BI__sync_fetch_and_sub_1:
2654 case Builtin::BI__sync_fetch_and_sub_2:
2655 case Builtin::BI__sync_fetch_and_sub_4:
2656 case Builtin::BI__sync_fetch_and_sub_8:
2657 case Builtin::BI__sync_fetch_and_sub_16:
2658 case Builtin::BI__sync_fetch_and_or:
2659 case Builtin::BI__sync_fetch_and_or_1:
2660 case Builtin::BI__sync_fetch_and_or_2:
2661 case Builtin::BI__sync_fetch_and_or_4:
2662 case Builtin::BI__sync_fetch_and_or_8:
2663 case Builtin::BI__sync_fetch_and_or_16:
2664 case Builtin::BI__sync_fetch_and_and:
2665 case Builtin::BI__sync_fetch_and_and_1:
2666 case Builtin::BI__sync_fetch_and_and_2:
2667 case Builtin::BI__sync_fetch_and_and_4:
2668 case Builtin::BI__sync_fetch_and_and_8:
2669 case Builtin::BI__sync_fetch_and_and_16:
2670 case Builtin::BI__sync_fetch_and_xor:
2671 case Builtin::BI__sync_fetch_and_xor_1:
2672 case Builtin::BI__sync_fetch_and_xor_2:
2673 case Builtin::BI__sync_fetch_and_xor_4:
2674 case Builtin::BI__sync_fetch_and_xor_8:
2675 case Builtin::BI__sync_fetch_and_xor_16:
2676 case Builtin::BI__sync_fetch_and_nand:
2677 case Builtin::BI__sync_fetch_and_nand_1:
2678 case Builtin::BI__sync_fetch_and_nand_2:
2679 case Builtin::BI__sync_fetch_and_nand_4:
2680 case Builtin::BI__sync_fetch_and_nand_8:
2681 case Builtin::BI__sync_fetch_and_nand_16:
2682 case Builtin::BI__sync_add_and_fetch:
2683 case Builtin::BI__sync_add_and_fetch_1:
2684 case Builtin::BI__sync_add_and_fetch_2:
2685 case Builtin::BI__sync_add_and_fetch_4:
2686 case Builtin::BI__sync_add_and_fetch_8:
2687 case Builtin::BI__sync_add_and_fetch_16:
2688 case Builtin::BI__sync_sub_and_fetch:
2689 case Builtin::BI__sync_sub_and_fetch_1:
2690 case Builtin::BI__sync_sub_and_fetch_2:
2691 case Builtin::BI__sync_sub_and_fetch_4:
2692 case Builtin::BI__sync_sub_and_fetch_8:
2693 case Builtin::BI__sync_sub_and_fetch_16:
2694 case Builtin::BI__sync_and_and_fetch:
2695 case Builtin::BI__sync_and_and_fetch_1:
2696 case Builtin::BI__sync_and_and_fetch_2:
2697 case Builtin::BI__sync_and_and_fetch_4:
2698 case Builtin::BI__sync_and_and_fetch_8:
2699 case Builtin::BI__sync_and_and_fetch_16:
2700 case Builtin::BI__sync_or_and_fetch:
2701 case Builtin::BI__sync_or_and_fetch_1:
2702 case Builtin::BI__sync_or_and_fetch_2:
2703 case Builtin::BI__sync_or_and_fetch_4:
2704 case Builtin::BI__sync_or_and_fetch_8:
2705 case Builtin::BI__sync_or_and_fetch_16:
2706 case Builtin::BI__sync_xor_and_fetch:
2707 case Builtin::BI__sync_xor_and_fetch_1:
2708 case Builtin::BI__sync_xor_and_fetch_2:
2709 case Builtin::BI__sync_xor_and_fetch_4:
2710 case Builtin::BI__sync_xor_and_fetch_8:
2711 case Builtin::BI__sync_xor_and_fetch_16:
2712 case Builtin::BI__sync_nand_and_fetch:
2713 case Builtin::BI__sync_nand_and_fetch_1:
2714 case Builtin::BI__sync_nand_and_fetch_2:
2715 case Builtin::BI__sync_nand_and_fetch_4:
2716 case Builtin::BI__sync_nand_and_fetch_8:
2717 case Builtin::BI__sync_nand_and_fetch_16:
2718 case Builtin::BI__sync_val_compare_and_swap:
2719 case Builtin::BI__sync_val_compare_and_swap_1:
2720 case Builtin::BI__sync_val_compare_and_swap_2:
2721 case Builtin::BI__sync_val_compare_and_swap_4:
2722 case Builtin::BI__sync_val_compare_and_swap_8:
2723 case Builtin::BI__sync_val_compare_and_swap_16:
2724 case Builtin::BI__sync_bool_compare_and_swap:
2725 case Builtin::BI__sync_bool_compare_and_swap_1:
2726 case Builtin::BI__sync_bool_compare_and_swap_2:
2727 case Builtin::BI__sync_bool_compare_and_swap_4:
2728 case Builtin::BI__sync_bool_compare_and_swap_8:
2729 case Builtin::BI__sync_bool_compare_and_swap_16:
2730 case Builtin::BI__sync_lock_test_and_set:
2731 case Builtin::BI__sync_lock_test_and_set_1:
2732 case Builtin::BI__sync_lock_test_and_set_2:
2733 case Builtin::BI__sync_lock_test_and_set_4:
2734 case Builtin::BI__sync_lock_test_and_set_8:
2735 case Builtin::BI__sync_lock_test_and_set_16:
2736 case Builtin::BI__sync_lock_release:
2737 case Builtin::BI__sync_lock_release_1:
2738 case Builtin::BI__sync_lock_release_2:
2739 case Builtin::BI__sync_lock_release_4:
2740 case Builtin::BI__sync_lock_release_8:
2741 case Builtin::BI__sync_lock_release_16:
2742 case Builtin::BI__sync_swap:
2743 case Builtin::BI__sync_swap_1:
2744 case Builtin::BI__sync_swap_2:
2745 case Builtin::BI__sync_swap_4:
2746 case Builtin::BI__sync_swap_8:
2747 case Builtin::BI__sync_swap_16:
2748 return BuiltinAtomicOverloaded(TheCallResult);
2749 case Builtin::BI__sync_synchronize:
2750 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
2751 << TheCall->getCallee()->getSourceRange();
2752 break;
2753 case Builtin::BI__builtin_nontemporal_load:
2754 case Builtin::BI__builtin_nontemporal_store:
2755 return BuiltinNontemporalOverloaded(TheCallResult);
2756 case Builtin::BI__builtin_memcpy_inline: {
2757 clang::Expr *SizeOp = TheCall->getArg(Arg: 2);
2758 // We warn about copying to or from `nullptr` pointers when `size` is
2759 // greater than 0. When `size` is value dependent we cannot evaluate its
2760 // value so we bail out.
2761 if (SizeOp->isValueDependent())
2762 break;
2763 if (!SizeOp->EvaluateKnownConstInt(Ctx: Context).isZero()) {
2764 CheckNonNullArgument(*this, TheCall->getArg(Arg: 0), TheCall->getExprLoc());
2765 CheckNonNullArgument(*this, TheCall->getArg(Arg: 1), TheCall->getExprLoc());
2766 }
2767 break;
2768 }
2769 case Builtin::BI__builtin_memset_inline: {
2770 clang::Expr *SizeOp = TheCall->getArg(Arg: 2);
2771 // We warn about filling to `nullptr` pointers when `size` is greater than
2772 // 0. When `size` is value dependent we cannot evaluate its value so we bail
2773 // out.
2774 if (SizeOp->isValueDependent())
2775 break;
2776 if (!SizeOp->EvaluateKnownConstInt(Ctx: Context).isZero())
2777 CheckNonNullArgument(*this, TheCall->getArg(Arg: 0), TheCall->getExprLoc());
2778 break;
2779 }
2780#define BUILTIN(ID, TYPE, ATTRS)
2781#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
2782 case Builtin::BI##ID: \
2783 return AtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
2784#include "clang/Basic/Builtins.inc"
2785 case Builtin::BI__annotation:
2786 if (BuiltinMSVCAnnotation(S&: *this, TheCall))
2787 return ExprError();
2788 break;
2789 case Builtin::BI__builtin_annotation:
2790 if (BuiltinAnnotation(S&: *this, TheCall))
2791 return ExprError();
2792 break;
2793 case Builtin::BI__builtin_addressof:
2794 if (BuiltinAddressof(S&: *this, TheCall))
2795 return ExprError();
2796 break;
2797 case Builtin::BI__builtin_function_start:
2798 if (BuiltinFunctionStart(S&: *this, TheCall))
2799 return ExprError();
2800 break;
2801 case Builtin::BI__builtin_is_aligned:
2802 case Builtin::BI__builtin_align_up:
2803 case Builtin::BI__builtin_align_down:
2804 if (BuiltinAlignment(S&: *this, TheCall, ID: BuiltinID))
2805 return ExprError();
2806 break;
2807 case Builtin::BI__builtin_add_overflow:
2808 case Builtin::BI__builtin_sub_overflow:
2809 case Builtin::BI__builtin_mul_overflow:
2810 if (BuiltinOverflow(S&: *this, TheCall, BuiltinID))
2811 return ExprError();
2812 break;
2813 case Builtin::BI__builtin_operator_new:
2814 case Builtin::BI__builtin_operator_delete: {
2815 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
2816 ExprResult Res =
2817 BuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
2818 if (Res.isInvalid())
2819 CorrectDelayedTyposInExpr(E: TheCallResult.get());
2820 return Res;
2821 }
2822 case Builtin::BI__builtin_dump_struct:
2823 return BuiltinDumpStruct(S&: *this, TheCall);
2824 case Builtin::BI__builtin_expect_with_probability: {
2825 // We first want to ensure we are called with 3 arguments
2826 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
2827 return ExprError();
2828 // then check probability is constant float in range [0.0, 1.0]
2829 const Expr *ProbArg = TheCall->getArg(Arg: 2);
2830 SmallVector<PartialDiagnosticAt, 8> Notes;
2831 Expr::EvalResult Eval;
2832 Eval.Diag = &Notes;
2833 if ((!ProbArg->EvaluateAsConstantExpr(Result&: Eval, Ctx: Context)) ||
2834 !Eval.Val.isFloat()) {
2835 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
2836 << ProbArg->getSourceRange();
2837 for (const PartialDiagnosticAt &PDiag : Notes)
2838 Diag(PDiag.first, PDiag.second);
2839 return ExprError();
2840 }
2841 llvm::APFloat Probability = Eval.Val.getFloat();
2842 bool LoseInfo = false;
2843 Probability.convert(ToSemantics: llvm::APFloat::IEEEdouble(),
2844 RM: llvm::RoundingMode::Dynamic, losesInfo: &LoseInfo);
2845 if (!(Probability >= llvm::APFloat(0.0) &&
2846 Probability <= llvm::APFloat(1.0))) {
2847 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
2848 << ProbArg->getSourceRange();
2849 return ExprError();
2850 }
2851 break;
2852 }
2853 case Builtin::BI__builtin_preserve_access_index:
2854 if (BuiltinPreserveAI(S&: *this, TheCall))
2855 return ExprError();
2856 break;
2857 case Builtin::BI__builtin_call_with_static_chain:
2858 if (BuiltinCallWithStaticChain(S&: *this, BuiltinCall: TheCall))
2859 return ExprError();
2860 break;
2861 case Builtin::BI__exception_code:
2862 case Builtin::BI_exception_code:
2863 if (BuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
2864 diag::err_seh___except_block))
2865 return ExprError();
2866 break;
2867 case Builtin::BI__exception_info:
2868 case Builtin::BI_exception_info:
2869 if (BuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
2870 diag::err_seh___except_filter))
2871 return ExprError();
2872 break;
2873 case Builtin::BI__GetExceptionInfo:
2874 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
2875 return ExprError();
2876
2877 if (CheckCXXThrowOperand(
2878 ThrowLoc: TheCall->getBeginLoc(),
2879 ThrowTy: Context.getExceptionObjectType(T: FDecl->getParamDecl(i: 0)->getType()),
2880 E: TheCall))
2881 return ExprError();
2882
2883 TheCall->setType(Context.VoidPtrTy);
2884 break;
2885 case Builtin::BIaddressof:
2886 case Builtin::BI__addressof:
2887 case Builtin::BIforward:
2888 case Builtin::BIforward_like:
2889 case Builtin::BImove:
2890 case Builtin::BImove_if_noexcept:
2891 case Builtin::BIas_const: {
2892 // These are all expected to be of the form
2893 // T &/&&/* f(U &/&&)
2894 // where T and U only differ in qualification.
2895 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
2896 return ExprError();
2897 QualType Param = FDecl->getParamDecl(i: 0)->getType();
2898 QualType Result = FDecl->getReturnType();
2899 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof ||
2900 BuiltinID == Builtin::BI__addressof;
2901 if (!(Param->isReferenceType() &&
2902 (ReturnsPointer ? Result->isAnyPointerType()
2903 : Result->isReferenceType()) &&
2904 Context.hasSameUnqualifiedType(T1: Param->getPointeeType(),
2905 T2: Result->getPointeeType()))) {
2906 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported)
2907 << FDecl;
2908 return ExprError();
2909 }
2910 break;
2911 }
2912 case Builtin::BI__builtin_ptrauth_strip:
2913 return PointerAuthStrip(S&: *this, Call: TheCall);
2914 case Builtin::BI__builtin_ptrauth_blend_discriminator:
2915 return PointerAuthBlendDiscriminator(S&: *this, Call: TheCall);
2916 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
2917 return PointerAuthSignOrAuth(S&: *this, Call: TheCall, OpKind: PAO_Sign);
2918 case Builtin::BI__builtin_ptrauth_auth:
2919 return PointerAuthSignOrAuth(S&: *this, Call: TheCall, OpKind: PAO_Auth);
2920 case Builtin::BI__builtin_ptrauth_sign_generic_data:
2921 return PointerAuthSignGenericData(S&: *this, Call: TheCall);
2922 case Builtin::BI__builtin_ptrauth_auth_and_resign:
2923 return PointerAuthAuthAndResign(S&: *this, Call: TheCall);
2924 // OpenCL v2.0, s6.13.16 - Pipe functions
2925 case Builtin::BIread_pipe:
2926 case Builtin::BIwrite_pipe:
2927 // Since those two functions are declared with var args, we need a semantic
2928 // check for the argument.
2929 if (BuiltinRWPipe(S&: *this, Call: TheCall))
2930 return ExprError();
2931 break;
2932 case Builtin::BIreserve_read_pipe:
2933 case Builtin::BIreserve_write_pipe:
2934 case Builtin::BIwork_group_reserve_read_pipe:
2935 case Builtin::BIwork_group_reserve_write_pipe:
2936 if (BuiltinReserveRWPipe(S&: *this, Call: TheCall))
2937 return ExprError();
2938 break;
2939 case Builtin::BIsub_group_reserve_read_pipe:
2940 case Builtin::BIsub_group_reserve_write_pipe:
2941 if (checkOpenCLSubgroupExt(S&: *this, Call: TheCall) ||
2942 BuiltinReserveRWPipe(S&: *this, Call: TheCall))
2943 return ExprError();
2944 break;
2945 case Builtin::BIcommit_read_pipe:
2946 case Builtin::BIcommit_write_pipe:
2947 case Builtin::BIwork_group_commit_read_pipe:
2948 case Builtin::BIwork_group_commit_write_pipe:
2949 if (BuiltinCommitRWPipe(S&: *this, Call: TheCall))
2950 return ExprError();
2951 break;
2952 case Builtin::BIsub_group_commit_read_pipe:
2953 case Builtin::BIsub_group_commit_write_pipe:
2954 if (checkOpenCLSubgroupExt(S&: *this, Call: TheCall) ||
2955 BuiltinCommitRWPipe(S&: *this, Call: TheCall))
2956 return ExprError();
2957 break;
2958 case Builtin::BIget_pipe_num_packets:
2959 case Builtin::BIget_pipe_max_packets:
2960 if (BuiltinPipePackets(S&: *this, Call: TheCall))
2961 return ExprError();
2962 break;
2963 case Builtin::BIto_global:
2964 case Builtin::BIto_local:
2965 case Builtin::BIto_private:
2966 if (OpenCLBuiltinToAddr(S&: *this, BuiltinID, Call: TheCall))
2967 return ExprError();
2968 break;
2969 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
2970 case Builtin::BIenqueue_kernel:
2971 if (OpenCLBuiltinEnqueueKernel(S&: *this, TheCall))
2972 return ExprError();
2973 break;
2974 case Builtin::BIget_kernel_work_group_size:
2975 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2976 if (OpenCLBuiltinKernelWorkGroupSize(S&: *this, TheCall))
2977 return ExprError();
2978 break;
2979 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2980 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2981 if (OpenCLBuiltinNDRangeAndBlock(S&: *this, TheCall))
2982 return ExprError();
2983 break;
2984 case Builtin::BI__builtin_os_log_format:
2985 Cleanup.setExprNeedsCleanups(true);
2986 [[fallthrough]];
2987 case Builtin::BI__builtin_os_log_format_buffer_size:
2988 if (BuiltinOSLogFormat(TheCall))
2989 return ExprError();
2990 break;
2991 case Builtin::BI__builtin_frame_address:
2992 case Builtin::BI__builtin_return_address: {
2993 if (BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xFFFF))
2994 return ExprError();
2995
2996 // -Wframe-address warning if non-zero passed to builtin
2997 // return/frame address.
2998 Expr::EvalResult Result;
2999 if (!TheCall->getArg(0)->isValueDependent() &&
3000 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
3001 Result.Val.getInt() != 0)
3002 Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
3003 << ((BuiltinID == Builtin::BI__builtin_return_address)
3004 ? "__builtin_return_address"
3005 : "__builtin_frame_address")
3006 << TheCall->getSourceRange();
3007 break;
3008 }
3009
3010 case Builtin::BI__builtin_nondeterministic_value: {
3011 if (BuiltinNonDeterministicValue(TheCall))
3012 return ExprError();
3013 break;
3014 }
3015
3016 // __builtin_elementwise_abs restricts the element type to signed integers or
3017 // floating point types only.
3018 case Builtin::BI__builtin_elementwise_abs: {
3019 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3020 return ExprError();
3021
3022 QualType ArgTy = TheCall->getArg(Arg: 0)->getType();
3023 QualType EltTy = ArgTy;
3024
3025 if (auto *VecTy = EltTy->getAs<VectorType>())
3026 EltTy = VecTy->getElementType();
3027 if (EltTy->isUnsignedIntegerType()) {
3028 Diag(TheCall->getArg(0)->getBeginLoc(),
3029 diag::err_builtin_invalid_arg_type)
3030 << 1 << /* signed integer or float ty*/ 3 << ArgTy;
3031 return ExprError();
3032 }
3033 break;
3034 }
3035
3036 // These builtins restrict the element type to floating point
3037 // types only.
3038 case Builtin::BI__builtin_elementwise_ceil:
3039 case Builtin::BI__builtin_elementwise_cos:
3040 case Builtin::BI__builtin_elementwise_exp:
3041 case Builtin::BI__builtin_elementwise_exp2:
3042 case Builtin::BI__builtin_elementwise_floor:
3043 case Builtin::BI__builtin_elementwise_log:
3044 case Builtin::BI__builtin_elementwise_log2:
3045 case Builtin::BI__builtin_elementwise_log10:
3046 case Builtin::BI__builtin_elementwise_roundeven:
3047 case Builtin::BI__builtin_elementwise_round:
3048 case Builtin::BI__builtin_elementwise_rint:
3049 case Builtin::BI__builtin_elementwise_nearbyint:
3050 case Builtin::BI__builtin_elementwise_sin:
3051 case Builtin::BI__builtin_elementwise_sqrt:
3052 case Builtin::BI__builtin_elementwise_trunc:
3053 case Builtin::BI__builtin_elementwise_canonicalize: {
3054 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3055 return ExprError();
3056
3057 QualType ArgTy = TheCall->getArg(Arg: 0)->getType();
3058 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 0)->getBeginLoc(),
3059 ArgTy, 1))
3060 return ExprError();
3061 break;
3062 }
3063 case Builtin::BI__builtin_elementwise_fma: {
3064 if (BuiltinElementwiseTernaryMath(TheCall))
3065 return ExprError();
3066 break;
3067 }
3068
3069 // These builtins restrict the element type to floating point
3070 // types only, and take in two arguments.
3071 case Builtin::BI__builtin_elementwise_pow: {
3072 if (BuiltinElementwiseMath(TheCall))
3073 return ExprError();
3074
3075 QualType ArgTy = TheCall->getArg(Arg: 0)->getType();
3076 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 0)->getBeginLoc(),
3077 ArgTy, 1) ||
3078 checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 1)->getBeginLoc(),
3079 ArgTy, 2))
3080 return ExprError();
3081 break;
3082 }
3083
3084 // These builtins restrict the element type to integer
3085 // types only.
3086 case Builtin::BI__builtin_elementwise_add_sat:
3087 case Builtin::BI__builtin_elementwise_sub_sat: {
3088 if (BuiltinElementwiseMath(TheCall))
3089 return ExprError();
3090
3091 const Expr *Arg = TheCall->getArg(Arg: 0);
3092 QualType ArgTy = Arg->getType();
3093 QualType EltTy = ArgTy;
3094
3095 if (auto *VecTy = EltTy->getAs<VectorType>())
3096 EltTy = VecTy->getElementType();
3097
3098 if (!EltTy->isIntegerType()) {
3099 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
3100 << 1 << /* integer ty */ 6 << ArgTy;
3101 return ExprError();
3102 }
3103 break;
3104 }
3105
3106 case Builtin::BI__builtin_elementwise_min:
3107 case Builtin::BI__builtin_elementwise_max:
3108 if (BuiltinElementwiseMath(TheCall))
3109 return ExprError();
3110 break;
3111
3112 case Builtin::BI__builtin_elementwise_bitreverse: {
3113 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3114 return ExprError();
3115
3116 const Expr *Arg = TheCall->getArg(Arg: 0);
3117 QualType ArgTy = Arg->getType();
3118 QualType EltTy = ArgTy;
3119
3120 if (auto *VecTy = EltTy->getAs<VectorType>())
3121 EltTy = VecTy->getElementType();
3122
3123 if (!EltTy->isIntegerType()) {
3124 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
3125 << 1 << /* integer ty */ 6 << ArgTy;
3126 return ExprError();
3127 }
3128 break;
3129 }
3130
3131 case Builtin::BI__builtin_elementwise_copysign: {
3132 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
3133 return ExprError();
3134
3135 ExprResult Magnitude = UsualUnaryConversions(E: TheCall->getArg(Arg: 0));
3136 ExprResult Sign = UsualUnaryConversions(E: TheCall->getArg(Arg: 1));
3137 if (Magnitude.isInvalid() || Sign.isInvalid())
3138 return ExprError();
3139
3140 QualType MagnitudeTy = Magnitude.get()->getType();
3141 QualType SignTy = Sign.get()->getType();
3142 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 0)->getBeginLoc(),
3143 MagnitudeTy, 1) ||
3144 checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 1)->getBeginLoc(),
3145 SignTy, 2)) {
3146 return ExprError();
3147 }
3148
3149 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) {
3150 return Diag(Sign.get()->getBeginLoc(),
3151 diag::err_typecheck_call_different_arg_types)
3152 << MagnitudeTy << SignTy;
3153 }
3154
3155 TheCall->setArg(Arg: 0, ArgExpr: Magnitude.get());
3156 TheCall->setArg(Arg: 1, ArgExpr: Sign.get());
3157 TheCall->setType(Magnitude.get()->getType());
3158 break;
3159 }
3160 case Builtin::BI__builtin_reduce_max:
3161 case Builtin::BI__builtin_reduce_min: {
3162 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
3163 return ExprError();
3164
3165 const Expr *Arg = TheCall->getArg(Arg: 0);
3166 const auto *TyA = Arg->getType()->getAs<VectorType>();
3167 if (!TyA) {
3168 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
3169 << 1 << /* vector ty*/ 4 << Arg->getType();
3170 return ExprError();
3171 }
3172
3173 TheCall->setType(TyA->getElementType());
3174 break;
3175 }
3176
3177 // These builtins support vectors of integers only.
3178 // TODO: ADD/MUL should support floating-point types.
3179 case Builtin::BI__builtin_reduce_add:
3180 case Builtin::BI__builtin_reduce_mul:
3181 case Builtin::BI__builtin_reduce_xor:
3182 case Builtin::BI__builtin_reduce_or:
3183 case Builtin::BI__builtin_reduce_and: {
3184 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
3185 return ExprError();
3186
3187 const Expr *Arg = TheCall->getArg(Arg: 0);
3188 const auto *TyA = Arg->getType()->getAs<VectorType>();
3189 if (!TyA || !TyA->getElementType()->isIntegerType()) {
3190 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
3191 << 1 << /* vector of integers */ 6 << Arg->getType();
3192 return ExprError();
3193 }
3194 TheCall->setType(TyA->getElementType());
3195 break;
3196 }
3197
3198 case Builtin::BI__builtin_matrix_transpose:
3199 return BuiltinMatrixTranspose(TheCall, CallResult: TheCallResult);
3200
3201 case Builtin::BI__builtin_matrix_column_major_load:
3202 return BuiltinMatrixColumnMajorLoad(TheCall, CallResult: TheCallResult);
3203
3204 case Builtin::BI__builtin_matrix_column_major_store:
3205 return BuiltinMatrixColumnMajorStore(TheCall, CallResult: TheCallResult);
3206
3207 case Builtin::BI__builtin_get_device_side_mangled_name: {
3208 auto Check = [](CallExpr *TheCall) {
3209 if (TheCall->getNumArgs() != 1)
3210 return false;
3211 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(Arg: 0)->IgnoreImpCasts());
3212 if (!DRE)
3213 return false;
3214 auto *D = DRE->getDecl();
3215 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
3216 return false;
3217 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
3218 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
3219 };
3220 if (!Check(TheCall)) {
3221 Diag(TheCall->getBeginLoc(),
3222 diag::err_hip_invalid_args_builtin_mangled_name);
3223 return ExprError();
3224 }
3225 break;
3226 }
3227 case Builtin::BI__builtin_popcountg:
3228 if (BuiltinPopcountg(S&: *this, TheCall))
3229 return ExprError();
3230 break;
3231 case Builtin::BI__builtin_clzg:
3232 case Builtin::BI__builtin_ctzg:
3233 if (BuiltinCountZeroBitsGeneric(S&: *this, TheCall))
3234 return ExprError();
3235 break;
3236
3237 case Builtin::BI__builtin_allow_runtime_check: {
3238 Expr *Arg = TheCall->getArg(Arg: 0);
3239 // Check if the argument is a string literal.
3240 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
3241 Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3242 << Arg->getSourceRange();
3243 return ExprError();
3244 }
3245 break;
3246 }
3247 }
3248
3249 if (getLangOpts().HLSL && CheckHLSLBuiltinFunctionCall(BuiltinID, TheCall))
3250 return ExprError();
3251
3252 // Since the target specific builtins for each arch overlap, only check those
3253 // of the arch we are compiling for.
3254 if (Context.BuiltinInfo.isTSBuiltin(ID: BuiltinID)) {
3255 if (Context.BuiltinInfo.isAuxBuiltinID(ID: BuiltinID)) {
3256 assert(Context.getAuxTargetInfo() &&
3257 "Aux Target Builtin, but not an aux target?");
3258
3259 if (CheckTSBuiltinFunctionCall(
3260 TI: *Context.getAuxTargetInfo(),
3261 BuiltinID: Context.BuiltinInfo.getAuxBuiltinID(ID: BuiltinID), TheCall))
3262 return ExprError();
3263 } else {
3264 if (CheckTSBuiltinFunctionCall(TI: Context.getTargetInfo(), BuiltinID,
3265 TheCall))
3266 return ExprError();
3267 }
3268 }
3269
3270 return TheCallResult;
3271}
3272
3273// Get the valid immediate range for the specified NEON type code.
3274static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
3275 NeonTypeFlags Type(t);
3276 int IsQuad = ForceQuad ? true : Type.isQuad();
3277 switch (Type.getEltType()) {
3278 case NeonTypeFlags::Int8:
3279 case NeonTypeFlags::Poly8:
3280 return shift ? 7 : (8 << IsQuad) - 1;
3281 case NeonTypeFlags::Int16:
3282 case NeonTypeFlags::Poly16:
3283 return shift ? 15 : (4 << IsQuad) - 1;
3284 case NeonTypeFlags::Int32:
3285 return shift ? 31 : (2 << IsQuad) - 1;
3286 case NeonTypeFlags::Int64:
3287 case NeonTypeFlags::Poly64:
3288 return shift ? 63 : (1 << IsQuad) - 1;
3289 case NeonTypeFlags::Poly128:
3290 return shift ? 127 : (1 << IsQuad) - 1;
3291 case NeonTypeFlags::Float16:
3292 assert(!shift && "cannot shift float types!");
3293 return (4 << IsQuad) - 1;
3294 case NeonTypeFlags::Float32:
3295 assert(!shift && "cannot shift float types!");
3296 return (2 << IsQuad) - 1;
3297 case NeonTypeFlags::Float64:
3298 assert(!shift && "cannot shift float types!");
3299 return (1 << IsQuad) - 1;
3300 case NeonTypeFlags::BFloat16:
3301 assert(!shift && "cannot shift float types!");
3302 return (4 << IsQuad) - 1;
3303 }
3304 llvm_unreachable("Invalid NeonTypeFlag!");
3305}
3306
3307/// getNeonEltType - Return the QualType corresponding to the elements of
3308/// the vector type specified by the NeonTypeFlags. This is used to check
3309/// the pointer arguments for Neon load/store intrinsics.
3310static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
3311 bool IsPolyUnsigned, bool IsInt64Long) {
3312 switch (Flags.getEltType()) {
3313 case NeonTypeFlags::Int8:
3314 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
3315 case NeonTypeFlags::Int16:
3316 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
3317 case NeonTypeFlags::Int32:
3318 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
3319 case NeonTypeFlags::Int64:
3320 if (IsInt64Long)
3321 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
3322 else
3323 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
3324 : Context.LongLongTy;
3325 case NeonTypeFlags::Poly8:
3326 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
3327 case NeonTypeFlags::Poly16:
3328 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
3329 case NeonTypeFlags::Poly64:
3330 if (IsInt64Long)
3331 return Context.UnsignedLongTy;
3332 else
3333 return Context.UnsignedLongLongTy;
3334 case NeonTypeFlags::Poly128:
3335 break;
3336 case NeonTypeFlags::Float16:
3337 return Context.HalfTy;
3338 case NeonTypeFlags::Float32:
3339 return Context.FloatTy;
3340 case NeonTypeFlags::Float64:
3341 return Context.DoubleTy;
3342 case NeonTypeFlags::BFloat16:
3343 return Context.BFloat16Ty;
3344 }
3345 llvm_unreachable("Invalid NeonTypeFlag!");
3346}
3347
3348enum ArmStreamingType {
3349 ArmNonStreaming,
3350 ArmStreaming,
3351 ArmStreamingCompatible,
3352 ArmStreamingOrSVE2p1
3353};
3354
3355enum ArmSMEState : unsigned {
3356 ArmNoState = 0,
3357
3358 ArmInZA = 0b01,
3359 ArmOutZA = 0b10,
3360 ArmInOutZA = 0b11,
3361 ArmZAMask = 0b11,
3362
3363 ArmInZT0 = 0b01 << 2,
3364 ArmOutZT0 = 0b10 << 2,
3365 ArmInOutZT0 = 0b11 << 2,
3366 ArmZT0Mask = 0b11 << 2
3367};
3368
3369bool Sema::ParseSVEImmChecks(
3370 CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) {
3371 // Perform all the immediate checks for this builtin call.
3372 bool HasError = false;
3373 for (auto &I : ImmChecks) {
3374 int ArgNum, CheckTy, ElementSizeInBits;
3375 std::tie(args&: ArgNum, args&: CheckTy, args&: ElementSizeInBits) = I;
3376
3377 typedef bool (*OptionSetCheckFnTy)(int64_t Value);
3378
3379 // Function that checks whether the operand (ArgNum) is an immediate
3380 // that is one of the predefined values.
3381 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
3382 int ErrDiag) -> bool {
3383 // We can't check the value of a dependent argument.
3384 Expr *Arg = TheCall->getArg(Arg: ArgNum);
3385 if (Arg->isTypeDependent() || Arg->isValueDependent())
3386 return false;
3387
3388 // Check constant-ness first.
3389 llvm::APSInt Imm;
3390 if (BuiltinConstantArg(TheCall, ArgNum, Result&: Imm))
3391 return true;
3392
3393 if (!CheckImm(Imm.getSExtValue()))
3394 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
3395 return false;
3396 };
3397
3398 switch ((SVETypeFlags::ImmCheckType)CheckTy) {
3399 case SVETypeFlags::ImmCheck0_31:
3400 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 31))
3401 HasError = true;
3402 break;
3403 case SVETypeFlags::ImmCheck0_13:
3404 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 13))
3405 HasError = true;
3406 break;
3407 case SVETypeFlags::ImmCheck1_16:
3408 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: 16))
3409 HasError = true;
3410 break;
3411 case SVETypeFlags::ImmCheck0_7:
3412 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 7))
3413 HasError = true;
3414 break;
3415 case SVETypeFlags::ImmCheck1_1:
3416 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: 1))
3417 HasError = true;
3418 break;
3419 case SVETypeFlags::ImmCheck1_3:
3420 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: 3))
3421 HasError = true;
3422 break;
3423 case SVETypeFlags::ImmCheck1_7:
3424 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: 7))
3425 HasError = true;
3426 break;
3427 case SVETypeFlags::ImmCheckExtract:
3428 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3429 High: (2048 / ElementSizeInBits) - 1))
3430 HasError = true;
3431 break;
3432 case SVETypeFlags::ImmCheckShiftRight:
3433 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: ElementSizeInBits))
3434 HasError = true;
3435 break;
3436 case SVETypeFlags::ImmCheckShiftRightNarrow:
3437 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: ElementSizeInBits / 2))
3438 HasError = true;
3439 break;
3440 case SVETypeFlags::ImmCheckShiftLeft:
3441 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: ElementSizeInBits - 1))
3442 HasError = true;
3443 break;
3444 case SVETypeFlags::ImmCheckLaneIndex:
3445 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3446 High: (128 / (1 * ElementSizeInBits)) - 1))
3447 HasError = true;
3448 break;
3449 case SVETypeFlags::ImmCheckLaneIndexCompRotate:
3450 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3451 High: (128 / (2 * ElementSizeInBits)) - 1))
3452 HasError = true;
3453 break;
3454 case SVETypeFlags::ImmCheckLaneIndexDot:
3455 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3456 High: (128 / (4 * ElementSizeInBits)) - 1))
3457 HasError = true;
3458 break;
3459 case SVETypeFlags::ImmCheckComplexRot90_270:
3460 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
3461 diag::err_rotation_argument_to_cadd))
3462 HasError = true;
3463 break;
3464 case SVETypeFlags::ImmCheckComplexRotAll90:
3465 if (CheckImmediateInSet(
3466 [](int64_t V) {
3467 return V == 0 || V == 90 || V == 180 || V == 270;
3468 },
3469 diag::err_rotation_argument_to_cmla))
3470 HasError = true;
3471 break;
3472 case SVETypeFlags::ImmCheck0_1:
3473 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 1))
3474 HasError = true;
3475 break;
3476 case SVETypeFlags::ImmCheck0_2:
3477 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 2))
3478 HasError = true;
3479 break;
3480 case SVETypeFlags::ImmCheck0_3:
3481 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 3))
3482 HasError = true;
3483 break;
3484 case SVETypeFlags::ImmCheck0_0:
3485 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 0))
3486 HasError = true;
3487 break;
3488 case SVETypeFlags::ImmCheck0_15:
3489 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 15))
3490 HasError = true;
3491 break;
3492 case SVETypeFlags::ImmCheck0_255:
3493 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 255))
3494 HasError = true;
3495 break;
3496 case SVETypeFlags::ImmCheck2_4_Mul2:
3497 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: 2, High: 4) ||
3498 BuiltinConstantArgMultiple(TheCall, ArgNum, Multiple: 2))
3499 HasError = true;
3500 break;
3501 }
3502 }
3503
3504 return HasError;
3505}
3506
3507static ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
3508 if (FD->hasAttr<ArmLocallyStreamingAttr>())
3509 return ArmStreaming;
3510 if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
3511 if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
3512 if (FPT->getAArch64SMEAttributes() &
3513 FunctionType::SME_PStateSMEnabledMask)
3514 return ArmStreaming;
3515 if (FPT->getAArch64SMEAttributes() &
3516 FunctionType::SME_PStateSMCompatibleMask)
3517 return ArmStreamingCompatible;
3518 }
3519 }
3520 return ArmNonStreaming;
3521}
3522
3523static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
3524 const FunctionDecl *FD,
3525 ArmStreamingType BuiltinType) {
3526 ArmStreamingType FnType = getArmStreamingFnType(FD);
3527 if (BuiltinType == ArmStreamingOrSVE2p1) {
3528 // Check intrinsics that are available in [sve2p1 or sme/sme2].
3529 llvm::StringMap<bool> CallerFeatureMap;
3530 S.Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
3531 if (Builtin::evaluateRequiredTargetFeatures("sve2p1", CallerFeatureMap))
3532 BuiltinType = ArmStreamingCompatible;
3533 else
3534 BuiltinType = ArmStreaming;
3535 }
3536
3537 if (FnType == ArmStreaming && BuiltinType == ArmNonStreaming) {
3538 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
3539 << TheCall->getSourceRange() << "streaming";
3540 }
3541
3542 if (FnType == ArmStreamingCompatible &&
3543 BuiltinType != ArmStreamingCompatible) {
3544 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
3545 << TheCall->getSourceRange() << "streaming compatible";
3546 return;
3547 }
3548
3549 if (FnType == ArmNonStreaming && BuiltinType == ArmStreaming) {
3550 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
3551 << TheCall->getSourceRange() << "non-streaming";
3552 }
3553}
3554
3555static bool hasArmZAState(const FunctionDecl *FD) {
3556 const auto *T = FD->getType()->getAs<FunctionProtoType>();
3557 return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) !=
3558 FunctionType::ARM_None) ||
3559 (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA());
3560}
3561
3562static bool hasArmZT0State(const FunctionDecl *FD) {
3563 const auto *T = FD->getType()->getAs<FunctionProtoType>();
3564 return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) !=
3565 FunctionType::ARM_None) ||
3566 (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0());
3567}
3568
3569static ArmSMEState getSMEState(unsigned BuiltinID) {
3570 switch (BuiltinID) {
3571 default:
3572 return ArmNoState;
3573#define GET_SME_BUILTIN_GET_STATE
3574#include "clang/Basic/arm_sme_builtins_za_state.inc"
3575#undef GET_SME_BUILTIN_GET_STATE
3576 }
3577}
3578
3579bool Sema::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3580 if (const FunctionDecl *FD = getCurFunctionDecl()) {
3581 std::optional<ArmStreamingType> BuiltinType;
3582
3583 switch (BuiltinID) {
3584#define GET_SME_STREAMING_ATTRS
3585#include "clang/Basic/arm_sme_streaming_attrs.inc"
3586#undef GET_SME_STREAMING_ATTRS
3587 }
3588
3589 if (BuiltinType)
3590 checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType);
3591
3592 if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
3593 Diag(TheCall->getBeginLoc(),
3594 diag::warn_attribute_arm_za_builtin_no_za_state)
3595 << TheCall->getSourceRange();
3596
3597 if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
3598 Diag(TheCall->getBeginLoc(),
3599 diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
3600 << TheCall->getSourceRange();
3601 }
3602
3603 // Range check SME intrinsics that take immediate values.
3604 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
3605
3606 switch (BuiltinID) {
3607 default:
3608 return false;
3609#define GET_SME_IMMEDIATE_CHECK
3610#include "clang/Basic/arm_sme_sema_rangechecks.inc"
3611#undef GET_SME_IMMEDIATE_CHECK
3612 }
3613
3614 return ParseSVEImmChecks(TheCall, ImmChecks);
3615}
3616
3617bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3618 if (const FunctionDecl *FD = getCurFunctionDecl()) {
3619 std::optional<ArmStreamingType> BuiltinType;
3620
3621 switch (BuiltinID) {
3622#define GET_SVE_STREAMING_ATTRS
3623#include "clang/Basic/arm_sve_streaming_attrs.inc"
3624#undef GET_SVE_STREAMING_ATTRS
3625 }
3626 if (BuiltinType)
3627 checkArmStreamingBuiltin(S&: *this, TheCall, FD, BuiltinType: *BuiltinType);
3628 }
3629 // Range check SVE intrinsics that take immediate values.
3630 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
3631
3632 switch (BuiltinID) {
3633 default:
3634 return false;
3635#define GET_SVE_IMMEDIATE_CHECK
3636#include "clang/Basic/arm_sve_sema_rangechecks.inc"
3637#undef GET_SVE_IMMEDIATE_CHECK
3638 }
3639
3640 return ParseSVEImmChecks(TheCall, ImmChecks);
3641}
3642
3643bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
3644 unsigned BuiltinID, CallExpr *TheCall) {
3645 if (const FunctionDecl *FD = getCurFunctionDecl()) {
3646
3647 switch (BuiltinID) {
3648 default:
3649 break;
3650#define GET_NEON_BUILTINS
3651#define TARGET_BUILTIN(id, ...) case NEON::BI##id:
3652#define BUILTIN(id, ...) case NEON::BI##id:
3653#include "clang/Basic/arm_neon.inc"
3654 checkArmStreamingBuiltin(S&: *this, TheCall, FD, BuiltinType: ArmNonStreaming);
3655 break;
3656#undef TARGET_BUILTIN
3657#undef BUILTIN
3658#undef GET_NEON_BUILTINS
3659 }
3660 }
3661
3662 llvm::APSInt Result;
3663 uint64_t mask = 0;
3664 unsigned TV = 0;
3665 int PtrArgNum = -1;
3666 bool HasConstPtr = false;
3667 switch (BuiltinID) {
3668#define GET_NEON_OVERLOAD_CHECK
3669#include "clang/Basic/arm_neon.inc"
3670#include "clang/Basic/arm_fp16.inc"
3671#undef GET_NEON_OVERLOAD_CHECK
3672 }
3673
3674 // For NEON intrinsics which are overloaded on vector element type, validate
3675 // the immediate which specifies which variant to emit.
3676 unsigned ImmArg = TheCall->getNumArgs()-1;
3677 if (mask) {
3678 if (BuiltinConstantArg(TheCall, ArgNum: ImmArg, Result))
3679 return true;
3680
3681 TV = Result.getLimitedValue(Limit: 64);
3682 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
3683 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
3684 << TheCall->getArg(ImmArg)->getSourceRange();
3685 }
3686
3687 if (PtrArgNum >= 0) {
3688 // Check that pointer arguments have the specified type.
3689 Expr *Arg = TheCall->getArg(Arg: PtrArgNum);
3690 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
3691 Arg = ICE->getSubExpr();
3692 ExprResult RHS = DefaultFunctionArrayLvalueConversion(E: Arg);
3693 QualType RHSTy = RHS.get()->getType();
3694
3695 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
3696 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
3697 Arch == llvm::Triple::aarch64_32 ||
3698 Arch == llvm::Triple::aarch64_be;
3699 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
3700 QualType EltTy =
3701 getNeonEltType(Flags: NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
3702 if (HasConstPtr)
3703 EltTy = EltTy.withConst();
3704 QualType LHSTy = Context.getPointerType(T: EltTy);
3705 AssignConvertType ConvTy;
3706 ConvTy = CheckSingleAssignmentConstraints(LHSType: LHSTy, RHS);
3707 if (RHS.isInvalid())
3708 return true;
3709 if (DiagnoseAssignmentResult(ConvTy, Loc: Arg->getBeginLoc(), DstType: LHSTy, SrcType: RHSTy,
3710 SrcExpr: RHS.get(), Action: AA_Assigning))
3711 return true;
3712 }
3713
3714 // For NEON intrinsics which take an immediate value as part of the
3715 // instruction, range check them here.
3716 unsigned i = 0, l = 0, u = 0;
3717 switch (BuiltinID) {
3718 default:
3719 return false;
3720 #define GET_NEON_IMMEDIATE_CHECK
3721 #include "clang/Basic/arm_neon.inc"
3722 #include "clang/Basic/arm_fp16.inc"
3723 #undef GET_NEON_IMMEDIATE_CHECK
3724 }
3725
3726 return BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l);
3727}
3728
3729bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3730 switch (BuiltinID) {
3731 default:
3732 return false;
3733 #include "clang/Basic/arm_mve_builtin_sema.inc"
3734 }
3735}
3736
3737bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3738 CallExpr *TheCall) {
3739 bool Err = false;
3740 switch (BuiltinID) {
3741 default:
3742 return false;
3743#include "clang/Basic/arm_cde_builtin_sema.inc"
3744 }
3745
3746 if (Err)
3747 return true;
3748
3749 return CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0), /*WantCDE*/ true);
3750}
3751
3752bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
3753 const Expr *CoprocArg, bool WantCDE) {
3754 if (isConstantEvaluatedContext())
3755 return false;
3756
3757 // We can't check the value of a dependent argument.
3758 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
3759 return false;
3760
3761 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Ctx: Context);
3762 int64_t CoprocNo = CoprocNoAP.getExtValue();
3763 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
3764
3765 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
3766 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
3767
3768 if (IsCDECoproc != WantCDE)
3769 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
3770 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
3771
3772 return false;
3773}
3774
3775bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
3776 unsigned MaxWidth) {
3777 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
3778 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3779 BuiltinID == ARM::BI__builtin_arm_strex ||
3780 BuiltinID == ARM::BI__builtin_arm_stlex ||
3781 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3782 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
3783 BuiltinID == AArch64::BI__builtin_arm_strex ||
3784 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
3785 "unexpected ARM builtin");
3786 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
3787 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3788 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3789 BuiltinID == AArch64::BI__builtin_arm_ldaex;
3790
3791 DeclRefExpr *DRE =cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
3792
3793 // Ensure that we have the proper number of arguments.
3794 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: IsLdrex ? 1 : 2))
3795 return true;
3796
3797 // Inspect the pointer argument of the atomic builtin. This should always be
3798 // a pointer type, whose element is an integral scalar or pointer type.
3799 // Because it is a pointer type, we don't have to worry about any implicit
3800 // casts here.
3801 Expr *PointerArg = TheCall->getArg(Arg: IsLdrex ? 0 : 1);
3802 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(E: PointerArg);
3803 if (PointerArgRes.isInvalid())
3804 return true;
3805 PointerArg = PointerArgRes.get();
3806
3807 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
3808 if (!pointerType) {
3809 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
3810 << PointerArg->getType() << PointerArg->getSourceRange();
3811 return true;
3812 }
3813
3814 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
3815 // task is to insert the appropriate casts into the AST. First work out just
3816 // what the appropriate type is.
3817 QualType ValType = pointerType->getPointeeType();
3818 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
3819 if (IsLdrex)
3820 AddrType.addConst();
3821
3822 // Issue a warning if the cast is dodgy.
3823 CastKind CastNeeded = CK_NoOp;
3824 if (!AddrType.isAtLeastAsQualifiedAs(other: ValType)) {
3825 CastNeeded = CK_BitCast;
3826 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
3827 << PointerArg->getType() << Context.getPointerType(AddrType)
3828 << AA_Passing << PointerArg->getSourceRange();
3829 }
3830
3831 // Finally, do the cast and replace the argument with the corrected version.
3832 AddrType = Context.getPointerType(T: AddrType);
3833 PointerArgRes = ImpCastExprToType(E: PointerArg, Type: AddrType, CK: CastNeeded);
3834 if (PointerArgRes.isInvalid())
3835 return true;
3836 PointerArg = PointerArgRes.get();
3837
3838 TheCall->setArg(Arg: IsLdrex ? 0 : 1, ArgExpr: PointerArg);
3839
3840 // In general, we allow ints, floats and pointers to be loaded and stored.
3841 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
3842 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
3843 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
3844 << PointerArg->getType() << PointerArg->getSourceRange();
3845 return true;
3846 }
3847
3848 // But ARM doesn't have instructions to deal with 128-bit versions.
3849 if (Context.getTypeSize(T: ValType) > MaxWidth) {
3850 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
3851 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
3852 << PointerArg->getType() << PointerArg->getSourceRange();
3853 return true;
3854 }
3855
3856 switch (ValType.getObjCLifetime()) {
3857 case Qualifiers::OCL_None:
3858 case Qualifiers::OCL_ExplicitNone:
3859 // okay
3860 break;
3861
3862 case Qualifiers::OCL_Weak:
3863 case Qualifiers::OCL_Strong:
3864 case Qualifiers::OCL_Autoreleasing:
3865 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
3866 << ValType << PointerArg->getSourceRange();
3867 return true;
3868 }
3869
3870 if (IsLdrex) {
3871 TheCall->setType(ValType);
3872 return false;
3873 }
3874
3875 // Initialize the argument to be stored.
3876 ExprResult ValArg = TheCall->getArg(Arg: 0);
3877 InitializedEntity Entity = InitializedEntity::InitializeParameter(
3878 Context, Type: ValType, /*consume*/ Consumed: false);
3879 ValArg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
3880 if (ValArg.isInvalid())
3881 return true;
3882 TheCall->setArg(Arg: 0, ArgExpr: ValArg.get());
3883
3884 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
3885 // but the custom checker bypasses all default analysis.
3886 TheCall->setType(Context.IntTy);
3887 return false;
3888}
3889
3890bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3891 CallExpr *TheCall) {
3892 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
3893 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3894 BuiltinID == ARM::BI__builtin_arm_strex ||
3895 BuiltinID == ARM::BI__builtin_arm_stlex) {
3896 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, MaxWidth: 64);
3897 }
3898
3899 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
3900 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
3901 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
3902 }
3903
3904 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
3905 BuiltinID == ARM::BI__builtin_arm_wsr64)
3906 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 3, AllowName: false);
3907
3908 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
3909 BuiltinID == ARM::BI__builtin_arm_rsrp ||
3910 BuiltinID == ARM::BI__builtin_arm_wsr ||
3911 BuiltinID == ARM::BI__builtin_arm_wsrp)
3912 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
3913
3914 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
3915 return true;
3916 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
3917 return true;
3918 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
3919 return true;
3920
3921 // For intrinsics which take an immediate value as part of the instruction,
3922 // range check them here.
3923 // FIXME: VFP Intrinsics should error if VFP not present.
3924 switch (BuiltinID) {
3925 default: return false;
3926 case ARM::BI__builtin_arm_ssat:
3927 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 32);
3928 case ARM::BI__builtin_arm_usat:
3929 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
3930 case ARM::BI__builtin_arm_ssat16:
3931 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16);
3932 case ARM::BI__builtin_arm_usat16:
3933 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
3934 case ARM::BI__builtin_arm_vcvtr_f:
3935 case ARM::BI__builtin_arm_vcvtr_d:
3936 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
3937 case ARM::BI__builtin_arm_dmb:
3938 case ARM::BI__builtin_arm_dsb:
3939 case ARM::BI__builtin_arm_isb:
3940 case ARM::BI__builtin_arm_dbg:
3941 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15);
3942 case ARM::BI__builtin_arm_cdp:
3943 case ARM::BI__builtin_arm_cdp2:
3944 case ARM::BI__builtin_arm_mcr:
3945 case ARM::BI__builtin_arm_mcr2:
3946 case ARM::BI__builtin_arm_mrc:
3947 case ARM::BI__builtin_arm_mrc2:
3948 case ARM::BI__builtin_arm_mcrr:
3949 case ARM::BI__builtin_arm_mcrr2:
3950 case ARM::BI__builtin_arm_mrrc:
3951 case ARM::BI__builtin_arm_mrrc2:
3952 case ARM::BI__builtin_arm_ldc:
3953 case ARM::BI__builtin_arm_ldcl:
3954 case ARM::BI__builtin_arm_ldc2:
3955 case ARM::BI__builtin_arm_ldc2l:
3956 case ARM::BI__builtin_arm_stc:
3957 case ARM::BI__builtin_arm_stcl:
3958 case ARM::BI__builtin_arm_stc2:
3959 case ARM::BI__builtin_arm_stc2l:
3960 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15) ||
3961 CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0),
3962 /*WantCDE*/ false);
3963 }
3964}
3965
3966bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
3967 unsigned BuiltinID,
3968 CallExpr *TheCall) {
3969 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3970 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
3971 BuiltinID == AArch64::BI__builtin_arm_strex ||
3972 BuiltinID == AArch64::BI__builtin_arm_stlex) {
3973 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, MaxWidth: 128);
3974 }
3975
3976 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
3977 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
3978 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3) ||
3979 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1) ||
3980 BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 1);
3981 }
3982
3983 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
3984 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
3985 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
3986 BuiltinID == AArch64::BI__builtin_arm_wsr128)
3987 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
3988
3989 // Memory Tagging Extensions (MTE) Intrinsics
3990 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
3991 BuiltinID == AArch64::BI__builtin_arm_addg ||
3992 BuiltinID == AArch64::BI__builtin_arm_gmi ||
3993 BuiltinID == AArch64::BI__builtin_arm_ldg ||
3994 BuiltinID == AArch64::BI__builtin_arm_stg ||
3995 BuiltinID == AArch64::BI__builtin_arm_subp) {
3996 return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
3997 }
3998
3999 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
4000 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
4001 BuiltinID == AArch64::BI__builtin_arm_wsr ||
4002 BuiltinID == AArch64::BI__builtin_arm_wsrp)
4003 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
4004
4005 // Only check the valid encoding range. Any constant in this range would be
4006 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
4007 // an exception for incorrect registers. This matches MSVC behavior.
4008 if (BuiltinID == AArch64::BI_ReadStatusReg ||
4009 BuiltinID == AArch64::BI_WriteStatusReg)
4010 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0x7fff);
4011
4012 if (BuiltinID == AArch64::BI__getReg)
4013 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
4014
4015 if (BuiltinID == AArch64::BI__break)
4016 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
4017
4018 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
4019 return true;
4020
4021 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
4022 return true;
4023
4024 if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
4025 return true;
4026
4027 // For intrinsics which take an immediate value as part of the instruction,
4028 // range check them here.
4029 unsigned i = 0, l = 0, u = 0;
4030 switch (BuiltinID) {
4031 default: return false;
4032 case AArch64::BI__builtin_arm_dmb:
4033 case AArch64::BI__builtin_arm_dsb:
4034 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
4035 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
4036 }
4037
4038 return BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l);
4039}
4040
4041static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
4042 if (Arg->getType()->getAsPlaceholderType())
4043 return false;
4044
4045 // The first argument needs to be a record field access.
4046 // If it is an array element access, we delay decision
4047 // to BPF backend to check whether the access is a
4048 // field access or not.
4049 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
4050 isa<MemberExpr>(Val: Arg->IgnoreParens()) ||
4051 isa<ArraySubscriptExpr>(Val: Arg->IgnoreParens()));
4052}
4053
4054static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
4055 QualType ArgType = Arg->getType();
4056 if (ArgType->getAsPlaceholderType())
4057 return false;
4058
4059 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
4060 // format:
4061 // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
4062 // 2. <type> var;
4063 // __builtin_preserve_type_info(var, flag);
4064 if (!isa<DeclRefExpr>(Val: Arg->IgnoreParens()) &&
4065 !isa<UnaryOperator>(Val: Arg->IgnoreParens()))
4066 return false;
4067
4068 // Typedef type.
4069 if (ArgType->getAs<TypedefType>())
4070 return true;
4071
4072 // Record type or Enum type.
4073 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
4074 if (const auto *RT = Ty->getAs<RecordType>()) {
4075 if (!RT->getDecl()->getDeclName().isEmpty())
4076 return true;
4077 } else if (const auto *ET = Ty->getAs<EnumType>()) {
4078 if (!ET->getDecl()->getDeclName().isEmpty())
4079 return true;
4080 }
4081
4082 return false;
4083}
4084
4085static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
4086 QualType ArgType = Arg->getType();
4087 if (ArgType->getAsPlaceholderType())
4088 return false;
4089
4090 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
4091 // format:
4092 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
4093 // flag);
4094 const auto *UO = dyn_cast<UnaryOperator>(Val: Arg->IgnoreParens());
4095 if (!UO)
4096 return false;
4097
4098 const auto *CE = dyn_cast<CStyleCastExpr>(Val: UO->getSubExpr());
4099 if (!CE)
4100 return false;
4101 if (CE->getCastKind() != CK_IntegralToPointer &&
4102 CE->getCastKind() != CK_NullToPointer)
4103 return false;
4104
4105 // The integer must be from an EnumConstantDecl.
4106 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
4107 if (!DR)
4108 return false;
4109
4110 const EnumConstantDecl *Enumerator =
4111 dyn_cast<EnumConstantDecl>(DR->getDecl());
4112 if (!Enumerator)
4113 return false;
4114
4115 // The type must be EnumType.
4116 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
4117 const auto *ET = Ty->getAs<EnumType>();
4118 if (!ET)
4119 return false;
4120
4121 // The enum value must be supported.
4122 return llvm::is_contained(Range: ET->getDecl()->enumerators(), Element: Enumerator);
4123}
4124
4125bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
4126 CallExpr *TheCall) {
4127 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
4128 BuiltinID == BPF::BI__builtin_btf_type_id ||
4129 BuiltinID == BPF::BI__builtin_preserve_type_info ||
4130 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
4131 "unexpected BPF builtin");
4132
4133 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
4134 return true;
4135
4136 // The second argument needs to be a constant int
4137 Expr *Arg = TheCall->getArg(Arg: 1);
4138 std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Ctx: Context);
4139 diag::kind kind;
4140 if (!Value) {
4141 if (BuiltinID == BPF::BI__builtin_preserve_field_info)
4142 kind = diag::err_preserve_field_info_not_const;
4143 else if (BuiltinID == BPF::BI__builtin_btf_type_id)
4144 kind = diag::err_btf_type_id_not_const;
4145 else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
4146 kind = diag::err_preserve_type_info_not_const;
4147 else
4148 kind = diag::err_preserve_enum_value_not_const;
4149 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
4150 return true;
4151 }
4152
4153 // The first argument
4154 Arg = TheCall->getArg(Arg: 0);
4155 bool InvalidArg = false;
4156 bool ReturnUnsignedInt = true;
4157 if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
4158 if (!isValidBPFPreserveFieldInfoArg(Arg)) {
4159 InvalidArg = true;
4160 kind = diag::err_preserve_field_info_not_field;
4161 }
4162 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
4163 if (!isValidBPFPreserveTypeInfoArg(Arg)) {
4164 InvalidArg = true;
4165 kind = diag::err_preserve_type_info_invalid;
4166 }
4167 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
4168 if (!isValidBPFPreserveEnumValueArg(Arg)) {
4169 InvalidArg = true;
4170 kind = diag::err_preserve_enum_value_invalid;
4171 }
4172 ReturnUnsignedInt = false;
4173 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
4174 ReturnUnsignedInt = false;
4175 }
4176
4177 if (InvalidArg) {
4178 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
4179 return true;
4180 }
4181
4182 if (ReturnUnsignedInt)
4183 TheCall->setType(Context.UnsignedIntTy);
4184 else
4185 TheCall->setType(Context.UnsignedLongTy);
4186 return false;
4187}
4188
4189bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
4190 struct ArgInfo {
4191 uint8_t OpNum;
4192 bool IsSigned;
4193 uint8_t BitWidth;
4194 uint8_t Align;
4195 };
4196 struct BuiltinInfo {
4197 unsigned BuiltinID;
4198 ArgInfo Infos[2];
4199 };
4200
4201 static BuiltinInfo Infos[] = {
4202 { .BuiltinID: Hexagon::BI__builtin_circ_ldd, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 3 }} },
4203 { .BuiltinID: Hexagon::BI__builtin_circ_ldw, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 2 }} },
4204 { .BuiltinID: Hexagon::BI__builtin_circ_ldh, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
4205 { .BuiltinID: Hexagon::BI__builtin_circ_lduh, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
4206 { .BuiltinID: Hexagon::BI__builtin_circ_ldb, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
4207 { .BuiltinID: Hexagon::BI__builtin_circ_ldub, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
4208 { .BuiltinID: Hexagon::BI__builtin_circ_std, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 3 }} },
4209 { .BuiltinID: Hexagon::BI__builtin_circ_stw, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 2 }} },
4210 { .BuiltinID: Hexagon::BI__builtin_circ_sth, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
4211 { .BuiltinID: Hexagon::BI__builtin_circ_sthhi, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
4212 { .BuiltinID: Hexagon::BI__builtin_circ_stb, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
4213
4214 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
4215 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
4216 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
4217 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
4218 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 2 }} },
4219 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 3 }} },
4220 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
4221 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
4222 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
4223 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 2 }} },
4224 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 3 }} },
4225
4226 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A2_combineii, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4227 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A2_tfrih, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 16, .Align: 0 }} },
4228 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A2_tfril, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 16, .Align: 0 }} },
4229 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A2_tfrpi, .Infos: {{ .OpNum: 0, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4230 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_bitspliti, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4231 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 8, .Align: 0 }} },
4232 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4233 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_cround_ri, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4234 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_round_ri, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4235 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4236 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 8, .Align: 0 }} },
4237 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4238 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 7, .Align: 0 }} },
4239 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4240 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4241 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 7, .Align: 0 }} },
4242 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4243 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4244 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 7, .Align: 0 }} },
4245 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_C2_bitsclri, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4246 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_C2_muxii, .Infos: {{ .OpNum: 2, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
4247 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4248 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_dfclass, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4249 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, .Infos: {{ .OpNum: 0, .IsSigned: false, .BitWidth: 10, .Align: 0 }} },
4250 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, .Infos: {{ .OpNum: 0, .IsSigned: false, .BitWidth: 10, .Align: 0 }} },
4251 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_sfclass, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4252 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, .Infos: {{ .OpNum: 0, .IsSigned: false, .BitWidth: 10, .Align: 0 }} },
4253 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, .Infos: {{ .OpNum: 0, .IsSigned: false, .BitWidth: 10, .Align: 0 }} },
4254 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4255 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 2 }} },
4256 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4257 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4258 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4259 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4260 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4261 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4262 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4263 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4264 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4265 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4266 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4267 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4268 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4269 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4270 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4271 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4272 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4273 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4274 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4275 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4276 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4277 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
4278 .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4279 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4280 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4281 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4282 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4283 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4284 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4285 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
4286 .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4287 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4288 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4289 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4290 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4291 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4292 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_extractu, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 },
4293 { .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4294 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_extractup, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 },
4295 { .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4296 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_insert, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 },
4297 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4298 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_insertp, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 },
4299 { .OpNum: 3, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4300 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4301 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4302 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4303 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4304 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4305 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4306 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4307 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4308 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4309 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4310 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4311 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4312 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4313 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4314 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_setbit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4315 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
4316 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 4, .Align: 0 },
4317 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4318 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
4319 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 4, .Align: 0 },
4320 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4321 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
4322 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 4, .Align: 0 },
4323 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4324 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
4325 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 4, .Align: 0 },
4326 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4327 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4328 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4329 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_valignib, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4330 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_vspliceib, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4331 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4332 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4333 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4334 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4335 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_clbaddi, .Infos: {{ .OpNum: 1, .IsSigned: true , .BitWidth: 6, .Align: 0 }} },
4336 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 6, .Align: 0 }} },
4337 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_extract, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 },
4338 { .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4339 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_extractp, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 },
4340 { .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4341 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_lsli, .Infos: {{ .OpNum: 0, .IsSigned: true, .BitWidth: 6, .Align: 0 }} },
4342 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4343 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4344 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4345 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4346 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4347 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4348 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4349 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
4350 .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4351 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4352 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
4353 .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4354 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4355 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4356 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4357 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4358 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4359 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4360 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4361 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4362 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4363 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4364 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4365 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4366 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_valignbi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4367 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4368 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4369 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4370 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4371 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4372 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4373 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
4374 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4375 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4376 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4377 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4378 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
4379 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4380 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4381 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4382 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4383 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
4384 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4385
4386 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4387 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B,
4388 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4389 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx,
4390 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4391 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B,
4392 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4393 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4394 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B,
4395 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4396 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx,
4397 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4398 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B,
4399 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4400 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4401 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4402 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4403 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B,
4404 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4405 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4406 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4407 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4408 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B,
4409 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4410 };
4411
4412 // Use a dynamically initialized static to sort the table exactly once on
4413 // first run.
4414 static const bool SortOnce =
4415 (llvm::sort(C&: Infos,
4416 Comp: [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
4417 return LHS.BuiltinID < RHS.BuiltinID;
4418 }),
4419 true);
4420 (void)SortOnce;
4421
4422 const BuiltinInfo *F = llvm::partition_point(
4423 Range&: Infos, P: [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
4424 if (F == std::end(arr&: Infos) || F->BuiltinID != BuiltinID)
4425 return false;
4426
4427 bool Error = false;
4428
4429 for (const ArgInfo &A : F->Infos) {
4430 // Ignore empty ArgInfo elements.
4431 if (A.BitWidth == 0)
4432 continue;
4433
4434 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
4435 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
4436 if (!A.Align) {
4437 Error |= BuiltinConstantArgRange(TheCall, ArgNum: A.OpNum, Low: Min, High: Max);
4438 } else {
4439 unsigned M = 1 << A.Align;
4440 Min *= M;
4441 Max *= M;
4442 Error |= BuiltinConstantArgRange(TheCall, ArgNum: A.OpNum, Low: Min, High: Max);
4443 Error |= BuiltinConstantArgMultiple(TheCall, ArgNum: A.OpNum, Multiple: M);
4444 }
4445 }
4446 return Error;
4447}
4448
4449bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
4450 CallExpr *TheCall) {
4451 return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
4452}
4453
4454bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
4455 unsigned BuiltinID,
4456 CallExpr *TheCall) {
4457 switch (BuiltinID) {
4458 default:
4459 break;
4460 // Basic intrinsics.
4461 case LoongArch::BI__builtin_loongarch_cacop_d:
4462 case LoongArch::BI__builtin_loongarch_cacop_w: {
4463 BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: llvm::maxUIntN(N: 5));
4464 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: llvm::minIntN(N: 12), High: llvm::maxIntN(N: 12));
4465 break;
4466 }
4467 case LoongArch::BI__builtin_loongarch_break:
4468 case LoongArch::BI__builtin_loongarch_dbar:
4469 case LoongArch::BI__builtin_loongarch_ibar:
4470 case LoongArch::BI__builtin_loongarch_syscall:
4471 // Check if immediate is in [0, 32767].
4472 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 32767);
4473 case LoongArch::BI__builtin_loongarch_csrrd_w:
4474 case LoongArch::BI__builtin_loongarch_csrrd_d:
4475 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 16383);
4476 case LoongArch::BI__builtin_loongarch_csrwr_w:
4477 case LoongArch::BI__builtin_loongarch_csrwr_d:
4478 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 16383);
4479 case LoongArch::BI__builtin_loongarch_csrxchg_w:
4480 case LoongArch::BI__builtin_loongarch_csrxchg_d:
4481 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 16383);
4482 case LoongArch::BI__builtin_loongarch_lddir_d:
4483 case LoongArch::BI__builtin_loongarch_ldpte_d:
4484 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
4485 case LoongArch::BI__builtin_loongarch_movfcsr2gr:
4486 case LoongArch::BI__builtin_loongarch_movgr2fcsr:
4487 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: llvm::maxUIntN(N: 2));
4488
4489 // LSX intrinsics.
4490 case LoongArch::BI__builtin_lsx_vbitclri_b:
4491 case LoongArch::BI__builtin_lsx_vbitrevi_b:
4492 case LoongArch::BI__builtin_lsx_vbitseti_b:
4493 case LoongArch::BI__builtin_lsx_vsat_b:
4494 case LoongArch::BI__builtin_lsx_vsat_bu:
4495 case LoongArch::BI__builtin_lsx_vslli_b:
4496 case LoongArch::BI__builtin_lsx_vsrai_b:
4497 case LoongArch::BI__builtin_lsx_vsrari_b:
4498 case LoongArch::BI__builtin_lsx_vsrli_b:
4499 case LoongArch::BI__builtin_lsx_vsllwil_h_b:
4500 case LoongArch::BI__builtin_lsx_vsllwil_hu_bu:
4501 case LoongArch::BI__builtin_lsx_vrotri_b:
4502 case LoongArch::BI__builtin_lsx_vsrlri_b:
4503 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 7);
4504 case LoongArch::BI__builtin_lsx_vbitclri_h:
4505 case LoongArch::BI__builtin_lsx_vbitrevi_h:
4506 case LoongArch::BI__builtin_lsx_vbitseti_h:
4507 case LoongArch::BI__builtin_lsx_vsat_h:
4508 case LoongArch::BI__builtin_lsx_vsat_hu:
4509 case LoongArch::BI__builtin_lsx_vslli_h:
4510 case LoongArch::BI__builtin_lsx_vsrai_h:
4511 case LoongArch::BI__builtin_lsx_vsrari_h:
4512 case LoongArch::BI__builtin_lsx_vsrli_h:
4513 case LoongArch::BI__builtin_lsx_vsllwil_w_h:
4514 case LoongArch::BI__builtin_lsx_vsllwil_wu_hu:
4515 case LoongArch::BI__builtin_lsx_vrotri_h:
4516 case LoongArch::BI__builtin_lsx_vsrlri_h:
4517 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
4518 case LoongArch::BI__builtin_lsx_vssrarni_b_h:
4519 case LoongArch::BI__builtin_lsx_vssrarni_bu_h:
4520 case LoongArch::BI__builtin_lsx_vssrani_b_h:
4521 case LoongArch::BI__builtin_lsx_vssrani_bu_h:
4522 case LoongArch::BI__builtin_lsx_vsrarni_b_h:
4523 case LoongArch::BI__builtin_lsx_vsrlni_b_h:
4524 case LoongArch::BI__builtin_lsx_vsrlrni_b_h:
4525 case LoongArch::BI__builtin_lsx_vssrlni_b_h:
4526 case LoongArch::BI__builtin_lsx_vssrlni_bu_h:
4527 case LoongArch::BI__builtin_lsx_vssrlrni_b_h:
4528 case LoongArch::BI__builtin_lsx_vssrlrni_bu_h:
4529 case LoongArch::BI__builtin_lsx_vsrani_b_h:
4530 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
4531 case LoongArch::BI__builtin_lsx_vslei_bu:
4532 case LoongArch::BI__builtin_lsx_vslei_hu:
4533 case LoongArch::BI__builtin_lsx_vslei_wu:
4534 case LoongArch::BI__builtin_lsx_vslei_du:
4535 case LoongArch::BI__builtin_lsx_vslti_bu:
4536 case LoongArch::BI__builtin_lsx_vslti_hu:
4537 case LoongArch::BI__builtin_lsx_vslti_wu:
4538 case LoongArch::BI__builtin_lsx_vslti_du:
4539 case LoongArch::BI__builtin_lsx_vmaxi_bu:
4540 case LoongArch::BI__builtin_lsx_vmaxi_hu:
4541 case LoongArch::BI__builtin_lsx_vmaxi_wu:
4542 case LoongArch::BI__builtin_lsx_vmaxi_du:
4543 case LoongArch::BI__builtin_lsx_vmini_bu:
4544 case LoongArch::BI__builtin_lsx_vmini_hu:
4545 case LoongArch::BI__builtin_lsx_vmini_wu:
4546 case LoongArch::BI__builtin_lsx_vmini_du:
4547 case LoongArch::BI__builtin_lsx_vaddi_bu:
4548 case LoongArch::BI__builtin_lsx_vaddi_hu:
4549 case LoongArch::BI__builtin_lsx_vaddi_wu:
4550 case LoongArch::BI__builtin_lsx_vaddi_du:
4551 case LoongArch::BI__builtin_lsx_vbitclri_w:
4552 case LoongArch::BI__builtin_lsx_vbitrevi_w:
4553 case LoongArch::BI__builtin_lsx_vbitseti_w:
4554 case LoongArch::BI__builtin_lsx_vsat_w:
4555 case LoongArch::BI__builtin_lsx_vsat_wu:
4556 case LoongArch::BI__builtin_lsx_vslli_w:
4557 case LoongArch::BI__builtin_lsx_vsrai_w:
4558 case LoongArch::BI__builtin_lsx_vsrari_w:
4559 case LoongArch::BI__builtin_lsx_vsrli_w:
4560 case LoongArch::BI__builtin_lsx_vsllwil_d_w:
4561 case LoongArch::BI__builtin_lsx_vsllwil_du_wu:
4562 case LoongArch::BI__builtin_lsx_vsrlri_w:
4563 case LoongArch::BI__builtin_lsx_vrotri_w:
4564 case LoongArch::BI__builtin_lsx_vsubi_bu:
4565 case LoongArch::BI__builtin_lsx_vsubi_hu:
4566 case LoongArch::BI__builtin_lsx_vbsrl_v:
4567 case LoongArch::BI__builtin_lsx_vbsll_v:
4568 case LoongArch::BI__builtin_lsx_vsubi_wu:
4569 case LoongArch::BI__builtin_lsx_vsubi_du:
4570 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
4571 case LoongArch::BI__builtin_lsx_vssrarni_h_w:
4572 case LoongArch::BI__builtin_lsx_vssrarni_hu_w:
4573 case LoongArch::BI__builtin_lsx_vssrani_h_w:
4574 case LoongArch::BI__builtin_lsx_vssrani_hu_w:
4575 case LoongArch::BI__builtin_lsx_vsrarni_h_w:
4576 case LoongArch::BI__builtin_lsx_vsrani_h_w:
4577 case LoongArch::BI__builtin_lsx_vfrstpi_b:
4578 case LoongArch::BI__builtin_lsx_vfrstpi_h:
4579 case LoongArch::BI__builtin_lsx_vsrlni_h_w:
4580 case LoongArch::BI__builtin_lsx_vsrlrni_h_w:
4581 case LoongArch::BI__builtin_lsx_vssrlni_h_w:
4582 case LoongArch::BI__builtin_lsx_vssrlni_hu_w:
4583 case LoongArch::BI__builtin_lsx_vssrlrni_h_w:
4584 case LoongArch::BI__builtin_lsx_vssrlrni_hu_w:
4585 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
4586 case LoongArch::BI__builtin_lsx_vbitclri_d:
4587 case LoongArch::BI__builtin_lsx_vbitrevi_d:
4588 case LoongArch::BI__builtin_lsx_vbitseti_d:
4589 case LoongArch::BI__builtin_lsx_vsat_d:
4590 case LoongArch::BI__builtin_lsx_vsat_du:
4591 case LoongArch::BI__builtin_lsx_vslli_d:
4592 case LoongArch::BI__builtin_lsx_vsrai_d:
4593 case LoongArch::BI__builtin_lsx_vsrli_d:
4594 case LoongArch::BI__builtin_lsx_vsrari_d:
4595 case LoongArch::BI__builtin_lsx_vrotri_d:
4596 case LoongArch::BI__builtin_lsx_vsrlri_d:
4597 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 63);
4598 case LoongArch::BI__builtin_lsx_vssrarni_w_d:
4599 case LoongArch::BI__builtin_lsx_vssrarni_wu_d:
4600 case LoongArch::BI__builtin_lsx_vssrani_w_d:
4601 case LoongArch::BI__builtin_lsx_vssrani_wu_d:
4602 case LoongArch::BI__builtin_lsx_vsrarni_w_d:
4603 case LoongArch::BI__builtin_lsx_vsrlni_w_d:
4604 case LoongArch::BI__builtin_lsx_vsrlrni_w_d:
4605 case LoongArch::BI__builtin_lsx_vssrlni_w_d:
4606 case LoongArch::BI__builtin_lsx_vssrlni_wu_d:
4607 case LoongArch::BI__builtin_lsx_vssrlrni_w_d:
4608 case LoongArch::BI__builtin_lsx_vssrlrni_wu_d:
4609 case LoongArch::BI__builtin_lsx_vsrani_w_d:
4610 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 63);
4611 case LoongArch::BI__builtin_lsx_vssrarni_d_q:
4612 case LoongArch::BI__builtin_lsx_vssrarni_du_q:
4613 case LoongArch::BI__builtin_lsx_vssrani_d_q:
4614 case LoongArch::BI__builtin_lsx_vssrani_du_q:
4615 case LoongArch::BI__builtin_lsx_vsrarni_d_q:
4616 case LoongArch::BI__builtin_lsx_vssrlni_d_q:
4617 case LoongArch::BI__builtin_lsx_vssrlni_du_q:
4618 case LoongArch::BI__builtin_lsx_vssrlrni_d_q:
4619 case LoongArch::BI__builtin_lsx_vssrlrni_du_q:
4620 case LoongArch::BI__builtin_lsx_vsrani_d_q:
4621 case LoongArch::BI__builtin_lsx_vsrlrni_d_q:
4622 case LoongArch::BI__builtin_lsx_vsrlni_d_q:
4623 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 127);
4624 case LoongArch::BI__builtin_lsx_vseqi_b:
4625 case LoongArch::BI__builtin_lsx_vseqi_h:
4626 case LoongArch::BI__builtin_lsx_vseqi_w:
4627 case LoongArch::BI__builtin_lsx_vseqi_d:
4628 case LoongArch::BI__builtin_lsx_vslti_b:
4629 case LoongArch::BI__builtin_lsx_vslti_h:
4630 case LoongArch::BI__builtin_lsx_vslti_w:
4631 case LoongArch::BI__builtin_lsx_vslti_d:
4632 case LoongArch::BI__builtin_lsx_vslei_b:
4633 case LoongArch::BI__builtin_lsx_vslei_h:
4634 case LoongArch::BI__builtin_lsx_vslei_w:
4635 case LoongArch::BI__builtin_lsx_vslei_d:
4636 case LoongArch::BI__builtin_lsx_vmaxi_b:
4637 case LoongArch::BI__builtin_lsx_vmaxi_h:
4638 case LoongArch::BI__builtin_lsx_vmaxi_w:
4639 case LoongArch::BI__builtin_lsx_vmaxi_d:
4640 case LoongArch::BI__builtin_lsx_vmini_b:
4641 case LoongArch::BI__builtin_lsx_vmini_h:
4642 case LoongArch::BI__builtin_lsx_vmini_w:
4643 case LoongArch::BI__builtin_lsx_vmini_d:
4644 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -16, High: 15);
4645 case LoongArch::BI__builtin_lsx_vandi_b:
4646 case LoongArch::BI__builtin_lsx_vnori_b:
4647 case LoongArch::BI__builtin_lsx_vori_b:
4648 case LoongArch::BI__builtin_lsx_vshuf4i_b:
4649 case LoongArch::BI__builtin_lsx_vshuf4i_h:
4650 case LoongArch::BI__builtin_lsx_vshuf4i_w:
4651 case LoongArch::BI__builtin_lsx_vxori_b:
4652 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 255);
4653 case LoongArch::BI__builtin_lsx_vbitseli_b:
4654 case LoongArch::BI__builtin_lsx_vshuf4i_d:
4655 case LoongArch::BI__builtin_lsx_vextrins_b:
4656 case LoongArch::BI__builtin_lsx_vextrins_h:
4657 case LoongArch::BI__builtin_lsx_vextrins_w:
4658 case LoongArch::BI__builtin_lsx_vextrins_d:
4659 case LoongArch::BI__builtin_lsx_vpermi_w:
4660 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 255);
4661 case LoongArch::BI__builtin_lsx_vpickve2gr_b:
4662 case LoongArch::BI__builtin_lsx_vpickve2gr_bu:
4663 case LoongArch::BI__builtin_lsx_vreplvei_b:
4664 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
4665 case LoongArch::BI__builtin_lsx_vinsgr2vr_b:
4666 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
4667 case LoongArch::BI__builtin_lsx_vpickve2gr_h:
4668 case LoongArch::BI__builtin_lsx_vpickve2gr_hu:
4669 case LoongArch::BI__builtin_lsx_vreplvei_h:
4670 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 7);
4671 case LoongArch::BI__builtin_lsx_vinsgr2vr_h:
4672 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 7);
4673 case LoongArch::BI__builtin_lsx_vpickve2gr_w:
4674 case LoongArch::BI__builtin_lsx_vpickve2gr_wu:
4675 case LoongArch::BI__builtin_lsx_vreplvei_w:
4676 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3);
4677 case LoongArch::BI__builtin_lsx_vinsgr2vr_w:
4678 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
4679 case LoongArch::BI__builtin_lsx_vpickve2gr_d:
4680 case LoongArch::BI__builtin_lsx_vpickve2gr_du:
4681 case LoongArch::BI__builtin_lsx_vreplvei_d:
4682 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
4683 case LoongArch::BI__builtin_lsx_vinsgr2vr_d:
4684 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
4685 case LoongArch::BI__builtin_lsx_vstelm_b:
4686 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -128, High: 127) ||
4687 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 15);
4688 case LoongArch::BI__builtin_lsx_vstelm_h:
4689 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -256, High: 254) ||
4690 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 7);
4691 case LoongArch::BI__builtin_lsx_vstelm_w:
4692 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -512, High: 508) ||
4693 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 3);
4694 case LoongArch::BI__builtin_lsx_vstelm_d:
4695 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -1024, High: 1016) ||
4696 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1);
4697 case LoongArch::BI__builtin_lsx_vldrepl_b:
4698 case LoongArch::BI__builtin_lsx_vld:
4699 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2047);
4700 case LoongArch::BI__builtin_lsx_vldrepl_h:
4701 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2046);
4702 case LoongArch::BI__builtin_lsx_vldrepl_w:
4703 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2044);
4704 case LoongArch::BI__builtin_lsx_vldrepl_d:
4705 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2040);
4706 case LoongArch::BI__builtin_lsx_vst:
4707 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -2048, High: 2047);
4708 case LoongArch::BI__builtin_lsx_vldi:
4709 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: -4096, High: 4095);
4710 case LoongArch::BI__builtin_lsx_vrepli_b:
4711 case LoongArch::BI__builtin_lsx_vrepli_h:
4712 case LoongArch::BI__builtin_lsx_vrepli_w:
4713 case LoongArch::BI__builtin_lsx_vrepli_d:
4714 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: -512, High: 511);
4715
4716 // LASX intrinsics.
4717 case LoongArch::BI__builtin_lasx_xvbitclri_b:
4718 case LoongArch::BI__builtin_lasx_xvbitrevi_b:
4719 case LoongArch::BI__builtin_lasx_xvbitseti_b:
4720 case LoongArch::BI__builtin_lasx_xvsat_b:
4721 case LoongArch::BI__builtin_lasx_xvsat_bu:
4722 case LoongArch::BI__builtin_lasx_xvslli_b:
4723 case LoongArch::BI__builtin_lasx_xvsrai_b:
4724 case LoongArch::BI__builtin_lasx_xvsrari_b:
4725 case LoongArch::BI__builtin_lasx_xvsrli_b:
4726 case LoongArch::BI__builtin_lasx_xvsllwil_h_b:
4727 case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu:
4728 case LoongArch::BI__builtin_lasx_xvrotri_b:
4729 case LoongArch::BI__builtin_lasx_xvsrlri_b:
4730 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 7);
4731 case LoongArch::BI__builtin_lasx_xvbitclri_h:
4732 case LoongArch::BI__builtin_lasx_xvbitrevi_h:
4733 case LoongArch::BI__builtin_lasx_xvbitseti_h:
4734 case LoongArch::BI__builtin_lasx_xvsat_h:
4735 case LoongArch::BI__builtin_lasx_xvsat_hu:
4736 case LoongArch::BI__builtin_lasx_xvslli_h:
4737 case LoongArch::BI__builtin_lasx_xvsrai_h:
4738 case LoongArch::BI__builtin_lasx_xvsrari_h:
4739 case LoongArch::BI__builtin_lasx_xvsrli_h:
4740 case LoongArch::BI__builtin_lasx_xvsllwil_w_h:
4741 case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu:
4742 case LoongArch::BI__builtin_lasx_xvrotri_h:
4743 case LoongArch::BI__builtin_lasx_xvsrlri_h:
4744 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
4745 case LoongArch::BI__builtin_lasx_xvssrarni_b_h:
4746 case LoongArch::BI__builtin_lasx_xvssrarni_bu_h:
4747 case LoongArch::BI__builtin_lasx_xvssrani_b_h:
4748 case LoongArch::BI__builtin_lasx_xvssrani_bu_h:
4749 case LoongArch::BI__builtin_lasx_xvsrarni_b_h:
4750 case LoongArch::BI__builtin_lasx_xvsrlni_b_h:
4751 case LoongArch::BI__builtin_lasx_xvsrlrni_b_h:
4752 case LoongArch::BI__builtin_lasx_xvssrlni_b_h:
4753 case LoongArch::BI__builtin_lasx_xvssrlni_bu_h:
4754 case LoongArch::BI__builtin_lasx_xvssrlrni_b_h:
4755 case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h:
4756 case LoongArch::BI__builtin_lasx_xvsrani_b_h:
4757 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
4758 case LoongArch::BI__builtin_lasx_xvslei_bu:
4759 case LoongArch::BI__builtin_lasx_xvslei_hu:
4760 case LoongArch::BI__builtin_lasx_xvslei_wu:
4761 case LoongArch::BI__builtin_lasx_xvslei_du:
4762 case LoongArch::BI__builtin_lasx_xvslti_bu:
4763 case LoongArch::BI__builtin_lasx_xvslti_hu:
4764 case LoongArch::BI__builtin_lasx_xvslti_wu:
4765 case LoongArch::BI__builtin_lasx_xvslti_du:
4766 case LoongArch::BI__builtin_lasx_xvmaxi_bu:
4767 case LoongArch::BI__builtin_lasx_xvmaxi_hu:
4768 case LoongArch::BI__builtin_lasx_xvmaxi_wu:
4769 case LoongArch::BI__builtin_lasx_xvmaxi_du:
4770 case LoongArch::BI__builtin_lasx_xvmini_bu:
4771 case LoongArch::BI__builtin_lasx_xvmini_hu:
4772 case LoongArch::BI__builtin_lasx_xvmini_wu:
4773 case LoongArch::BI__builtin_lasx_xvmini_du:
4774 case LoongArch::BI__builtin_lasx_xvaddi_bu:
4775 case LoongArch::BI__builtin_lasx_xvaddi_hu:
4776 case LoongArch::BI__builtin_lasx_xvaddi_wu:
4777 case LoongArch::BI__builtin_lasx_xvaddi_du:
4778 case LoongArch::BI__builtin_lasx_xvbitclri_w:
4779 case LoongArch::BI__builtin_lasx_xvbitrevi_w:
4780 case LoongArch::BI__builtin_lasx_xvbitseti_w:
4781 case LoongArch::BI__builtin_lasx_xvsat_w:
4782 case LoongArch::BI__builtin_lasx_xvsat_wu:
4783 case LoongArch::BI__builtin_lasx_xvslli_w:
4784 case LoongArch::BI__builtin_lasx_xvsrai_w:
4785 case LoongArch::BI__builtin_lasx_xvsrari_w:
4786 case LoongArch::BI__builtin_lasx_xvsrli_w:
4787 case LoongArch::BI__builtin_lasx_xvsllwil_d_w:
4788 case LoongArch::BI__builtin_lasx_xvsllwil_du_wu:
4789 case LoongArch::BI__builtin_lasx_xvsrlri_w:
4790 case LoongArch::BI__builtin_lasx_xvrotri_w:
4791 case LoongArch::BI__builtin_lasx_xvsubi_bu:
4792 case LoongArch::BI__builtin_lasx_xvsubi_hu:
4793 case LoongArch::BI__builtin_lasx_xvsubi_wu:
4794 case LoongArch::BI__builtin_lasx_xvsubi_du:
4795 case LoongArch::BI__builtin_lasx_xvbsrl_v:
4796 case LoongArch::BI__builtin_lasx_xvbsll_v:
4797 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
4798 case LoongArch::BI__builtin_lasx_xvssrarni_h_w:
4799 case LoongArch::BI__builtin_lasx_xvssrarni_hu_w:
4800 case LoongArch::BI__builtin_lasx_xvssrani_h_w:
4801 case LoongArch::BI__builtin_lasx_xvssrani_hu_w:
4802 case LoongArch::BI__builtin_lasx_xvsrarni_h_w:
4803 case LoongArch::BI__builtin_lasx_xvsrani_h_w:
4804 case LoongArch::BI__builtin_lasx_xvfrstpi_b:
4805 case LoongArch::BI__builtin_lasx_xvfrstpi_h:
4806 case LoongArch::BI__builtin_lasx_xvsrlni_h_w:
4807 case LoongArch::BI__builtin_lasx_xvsrlrni_h_w:
4808 case LoongArch::BI__builtin_lasx_xvssrlni_h_w:
4809 case LoongArch::BI__builtin_lasx_xvssrlni_hu_w:
4810 case LoongArch::BI__builtin_lasx_xvssrlrni_h_w:
4811 case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w:
4812 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
4813 case LoongArch::BI__builtin_lasx_xvbitclri_d:
4814 case LoongArch::BI__builtin_lasx_xvbitrevi_d:
4815 case LoongArch::BI__builtin_lasx_xvbitseti_d:
4816 case LoongArch::BI__builtin_lasx_xvsat_d:
4817 case LoongArch::BI__builtin_lasx_xvsat_du:
4818 case LoongArch::BI__builtin_lasx_xvslli_d:
4819 case LoongArch::BI__builtin_lasx_xvsrai_d:
4820 case LoongArch::BI__builtin_lasx_xvsrli_d:
4821 case LoongArch::BI__builtin_lasx_xvsrari_d:
4822 case LoongArch::BI__builtin_lasx_xvrotri_d:
4823 case LoongArch::BI__builtin_lasx_xvsrlri_d:
4824 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 63);
4825 case LoongArch::BI__builtin_lasx_xvssrarni_w_d:
4826 case LoongArch::BI__builtin_lasx_xvssrarni_wu_d:
4827 case LoongArch::BI__builtin_lasx_xvssrani_w_d:
4828 case LoongArch::BI__builtin_lasx_xvssrani_wu_d:
4829 case LoongArch::BI__builtin_lasx_xvsrarni_w_d:
4830 case LoongArch::BI__builtin_lasx_xvsrlni_w_d:
4831 case LoongArch::BI__builtin_lasx_xvsrlrni_w_d:
4832 case LoongArch::BI__builtin_lasx_xvssrlni_w_d:
4833 case LoongArch::BI__builtin_lasx_xvssrlni_wu_d:
4834 case LoongArch::BI__builtin_lasx_xvssrlrni_w_d:
4835 case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d:
4836 case LoongArch::BI__builtin_lasx_xvsrani_w_d:
4837 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 63);
4838 case LoongArch::BI__builtin_lasx_xvssrarni_d_q:
4839 case LoongArch::BI__builtin_lasx_xvssrarni_du_q:
4840 case LoongArch::BI__builtin_lasx_xvssrani_d_q:
4841 case LoongArch::BI__builtin_lasx_xvssrani_du_q:
4842 case LoongArch::BI__builtin_lasx_xvsrarni_d_q:
4843 case LoongArch::BI__builtin_lasx_xvssrlni_d_q:
4844 case LoongArch::BI__builtin_lasx_xvssrlni_du_q:
4845 case LoongArch::BI__builtin_lasx_xvssrlrni_d_q:
4846 case LoongArch::BI__builtin_lasx_xvssrlrni_du_q:
4847 case LoongArch::BI__builtin_lasx_xvsrani_d_q:
4848 case LoongArch::BI__builtin_lasx_xvsrlni_d_q:
4849 case LoongArch::BI__builtin_lasx_xvsrlrni_d_q:
4850 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 127);
4851 case LoongArch::BI__builtin_lasx_xvseqi_b:
4852 case LoongArch::BI__builtin_lasx_xvseqi_h:
4853 case LoongArch::BI__builtin_lasx_xvseqi_w:
4854 case LoongArch::BI__builtin_lasx_xvseqi_d:
4855 case LoongArch::BI__builtin_lasx_xvslti_b:
4856 case LoongArch::BI__builtin_lasx_xvslti_h:
4857 case LoongArch::BI__builtin_lasx_xvslti_w:
4858 case LoongArch::BI__builtin_lasx_xvslti_d:
4859 case LoongArch::BI__builtin_lasx_xvslei_b:
4860 case LoongArch::BI__builtin_lasx_xvslei_h:
4861 case LoongArch::BI__builtin_lasx_xvslei_w:
4862 case LoongArch::BI__builtin_lasx_xvslei_d:
4863 case LoongArch::BI__builtin_lasx_xvmaxi_b:
4864 case LoongArch::BI__builtin_lasx_xvmaxi_h:
4865 case LoongArch::BI__builtin_lasx_xvmaxi_w:
4866 case LoongArch::BI__builtin_lasx_xvmaxi_d:
4867 case LoongArch::BI__builtin_lasx_xvmini_b:
4868 case LoongArch::BI__builtin_lasx_xvmini_h:
4869 case LoongArch::BI__builtin_lasx_xvmini_w:
4870 case LoongArch::BI__builtin_lasx_xvmini_d:
4871 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -16, High: 15);
4872 case LoongArch::BI__builtin_lasx_xvandi_b:
4873 case LoongArch::BI__builtin_lasx_xvnori_b:
4874 case LoongArch::BI__builtin_lasx_xvori_b:
4875 case LoongArch::BI__builtin_lasx_xvshuf4i_b:
4876 case LoongArch::BI__builtin_lasx_xvshuf4i_h:
4877 case LoongArch::BI__builtin_lasx_xvshuf4i_w:
4878 case LoongArch::BI__builtin_lasx_xvxori_b:
4879 case LoongArch::BI__builtin_lasx_xvpermi_d:
4880 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 255);
4881 case LoongArch::BI__builtin_lasx_xvbitseli_b:
4882 case LoongArch::BI__builtin_lasx_xvshuf4i_d:
4883 case LoongArch::BI__builtin_lasx_xvextrins_b:
4884 case LoongArch::BI__builtin_lasx_xvextrins_h:
4885 case LoongArch::BI__builtin_lasx_xvextrins_w:
4886 case LoongArch::BI__builtin_lasx_xvextrins_d:
4887 case LoongArch::BI__builtin_lasx_xvpermi_q:
4888 case LoongArch::BI__builtin_lasx_xvpermi_w:
4889 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 255);
4890 case LoongArch::BI__builtin_lasx_xvrepl128vei_b:
4891 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
4892 case LoongArch::BI__builtin_lasx_xvrepl128vei_h:
4893 case LoongArch::BI__builtin_lasx_xvpickve2gr_w:
4894 case LoongArch::BI__builtin_lasx_xvpickve2gr_wu:
4895 case LoongArch::BI__builtin_lasx_xvpickve_w_f:
4896 case LoongArch::BI__builtin_lasx_xvpickve_w:
4897 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 7);
4898 case LoongArch::BI__builtin_lasx_xvinsgr2vr_w:
4899 case LoongArch::BI__builtin_lasx_xvinsve0_w:
4900 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 7);
4901 case LoongArch::BI__builtin_lasx_xvrepl128vei_w:
4902 case LoongArch::BI__builtin_lasx_xvpickve2gr_d:
4903 case LoongArch::BI__builtin_lasx_xvpickve2gr_du:
4904 case LoongArch::BI__builtin_lasx_xvpickve_d_f:
4905 case LoongArch::BI__builtin_lasx_xvpickve_d:
4906 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3);
4907 case LoongArch::BI__builtin_lasx_xvinsve0_d:
4908 case LoongArch::BI__builtin_lasx_xvinsgr2vr_d:
4909 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
4910 case LoongArch::BI__builtin_lasx_xvstelm_b:
4911 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -128, High: 127) ||
4912 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 31);
4913 case LoongArch::BI__builtin_lasx_xvstelm_h:
4914 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -256, High: 254) ||
4915 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 15);
4916 case LoongArch::BI__builtin_lasx_xvstelm_w:
4917 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -512, High: 508) ||
4918 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 7);
4919 case LoongArch::BI__builtin_lasx_xvstelm_d:
4920 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -1024, High: 1016) ||
4921 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 3);
4922 case LoongArch::BI__builtin_lasx_xvrepl128vei_d:
4923 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
4924 case LoongArch::BI__builtin_lasx_xvldrepl_b:
4925 case LoongArch::BI__builtin_lasx_xvld:
4926 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2047);
4927 case LoongArch::BI__builtin_lasx_xvldrepl_h:
4928 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2046);
4929 case LoongArch::BI__builtin_lasx_xvldrepl_w:
4930 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2044);
4931 case LoongArch::BI__builtin_lasx_xvldrepl_d:
4932 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2040);
4933 case LoongArch::BI__builtin_lasx_xvst:
4934 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -2048, High: 2047);
4935 case LoongArch::BI__builtin_lasx_xvldi:
4936 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: -4096, High: 4095);
4937 case LoongArch::BI__builtin_lasx_xvrepli_b:
4938 case LoongArch::BI__builtin_lasx_xvrepli_h:
4939 case LoongArch::BI__builtin_lasx_xvrepli_w:
4940 case LoongArch::BI__builtin_lasx_xvrepli_d:
4941 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: -512, High: 511);
4942 }
4943 return false;
4944}
4945
4946bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
4947 unsigned BuiltinID, CallExpr *TheCall) {
4948 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
4949 CheckMipsBuiltinArgument(BuiltinID, TheCall);
4950}
4951
4952bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
4953 CallExpr *TheCall) {
4954
4955 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
4956 BuiltinID <= Mips::BI__builtin_mips_lwx) {
4957 if (!TI.hasFeature("dsp"))
4958 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
4959 }
4960
4961 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
4962 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
4963 if (!TI.hasFeature("dspr2"))
4964 return Diag(TheCall->getBeginLoc(),
4965 diag::err_mips_builtin_requires_dspr2);
4966 }
4967
4968 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
4969 BuiltinID <= Mips::BI__builtin_msa_xori_b) {
4970 if (!TI.hasFeature("msa"))
4971 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
4972 }
4973
4974 return false;
4975}
4976
4977// CheckMipsBuiltinArgument - Checks the constant value passed to the
4978// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
4979// ordering for DSP is unspecified. MSA is ordered by the data format used
4980// by the underlying instruction i.e., df/m, df/n and then by size.
4981//
4982// FIXME: The size tests here should instead be tablegen'd along with the
4983// definitions from include/clang/Basic/BuiltinsMips.def.
4984// FIXME: GCC is strict on signedness for some of these intrinsics, we should
4985// be too.
4986bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
4987 unsigned i = 0, l = 0, u = 0, m = 0;
4988 switch (BuiltinID) {
4989 default: return false;
4990 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
4991 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
4992 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
4993 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
4994 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
4995 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
4996 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
4997 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
4998 // df/m field.
4999 // These intrinsics take an unsigned 3 bit immediate.
5000 case Mips::BI__builtin_msa_bclri_b:
5001 case Mips::BI__builtin_msa_bnegi_b:
5002 case Mips::BI__builtin_msa_bseti_b:
5003 case Mips::BI__builtin_msa_sat_s_b:
5004 case Mips::BI__builtin_msa_sat_u_b:
5005 case Mips::BI__builtin_msa_slli_b:
5006 case Mips::BI__builtin_msa_srai_b:
5007 case Mips::BI__builtin_msa_srari_b:
5008 case Mips::BI__builtin_msa_srli_b:
5009 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
5010 case Mips::BI__builtin_msa_binsli_b:
5011 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
5012 // These intrinsics take an unsigned 4 bit immediate.
5013 case Mips::BI__builtin_msa_bclri_h:
5014 case Mips::BI__builtin_msa_bnegi_h:
5015 case Mips::BI__builtin_msa_bseti_h:
5016 case Mips::BI__builtin_msa_sat_s_h:
5017 case Mips::BI__builtin_msa_sat_u_h:
5018 case Mips::BI__builtin_msa_slli_h:
5019 case Mips::BI__builtin_msa_srai_h:
5020 case Mips::BI__builtin_msa_srari_h:
5021 case Mips::BI__builtin_msa_srli_h:
5022 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
5023 case Mips::BI__builtin_msa_binsli_h:
5024 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
5025 // These intrinsics take an unsigned 5 bit immediate.
5026 // The first block of intrinsics actually have an unsigned 5 bit field,
5027 // not a df/n field.
5028 case Mips::BI__builtin_msa_cfcmsa:
5029 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
5030 case Mips::BI__builtin_msa_clei_u_b:
5031 case Mips::BI__builtin_msa_clei_u_h:
5032 case Mips::BI__builtin_msa_clei_u_w:
5033 case Mips::BI__builtin_msa_clei_u_d:
5034 case Mips::BI__builtin_msa_clti_u_b:
5035 case Mips::BI__builtin_msa_clti_u_h:
5036 case Mips::BI__builtin_msa_clti_u_w:
5037 case Mips::BI__builtin_msa_clti_u_d:
5038 case Mips::BI__builtin_msa_maxi_u_b:
5039 case Mips::BI__builtin_msa_maxi_u_h:
5040 case Mips::BI__builtin_msa_maxi_u_w:
5041 case Mips::BI__builtin_msa_maxi_u_d:
5042 case Mips::BI__builtin_msa_mini_u_b:
5043 case Mips::BI__builtin_msa_mini_u_h:
5044 case Mips::BI__builtin_msa_mini_u_w:
5045 case Mips::BI__builtin_msa_mini_u_d:
5046 case Mips::BI__builtin_msa_addvi_b:
5047 case Mips::BI__builtin_msa_addvi_h:
5048 case Mips::BI__builtin_msa_addvi_w:
5049 case Mips::BI__builtin_msa_addvi_d:
5050 case Mips::BI__builtin_msa_bclri_w:
5051 case Mips::BI__builtin_msa_bnegi_w:
5052 case Mips::BI__builtin_msa_bseti_w:
5053 case Mips::BI__builtin_msa_sat_s_w:
5054 case Mips::BI__builtin_msa_sat_u_w:
5055 case Mips::BI__builtin_msa_slli_w:
5056 case Mips::BI__builtin_msa_srai_w:
5057 case Mips::BI__builtin_msa_srari_w:
5058 case Mips::BI__builtin_msa_srli_w:
5059 case Mips::BI__builtin_msa_srlri_w:
5060 case Mips::BI__builtin_msa_subvi_b:
5061 case Mips::BI__builtin_msa_subvi_h:
5062 case Mips::BI__builtin_msa_subvi_w:
5063 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
5064 case Mips::BI__builtin_msa_binsli_w:
5065 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
5066 // These intrinsics take an unsigned 6 bit immediate.
5067 case Mips::BI__builtin_msa_bclri_d:
5068 case Mips::BI__builtin_msa_bnegi_d:
5069 case Mips::BI__builtin_msa_bseti_d:
5070 case Mips::BI__builtin_msa_sat_s_d:
5071 case Mips::BI__builtin_msa_sat_u_d:
5072 case Mips::BI__builtin_msa_slli_d:
5073 case Mips::BI__builtin_msa_srai_d:
5074 case Mips::BI__builtin_msa_srari_d:
5075 case Mips::BI__builtin_msa_srli_d:
5076 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
5077 case Mips::BI__builtin_msa_binsli_d:
5078 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
5079 // These intrinsics take a signed 5 bit immediate.
5080 case Mips::BI__builtin_msa_ceqi_b:
5081 case Mips::BI__builtin_msa_ceqi_h:
5082 case Mips::BI__builtin_msa_ceqi_w:
5083 case Mips::BI__builtin_msa_ceqi_d:
5084 case Mips::BI__builtin_msa_clti_s_b:
5085 case Mips::BI__builtin_msa_clti_s_h:
5086 case Mips::BI__builtin_msa_clti_s_w:
5087 case Mips::BI__builtin_msa_clti_s_d:
5088 case Mips::BI__builtin_msa_clei_s_b:
5089 case Mips::BI__builtin_msa_clei_s_h:
5090 case Mips::BI__builtin_msa_clei_s_w:
5091 case Mips::BI__builtin_msa_clei_s_d:
5092 case Mips::BI__builtin_msa_maxi_s_b:
5093 case Mips::BI__builtin_msa_maxi_s_h:
5094 case Mips::BI__builtin_msa_maxi_s_w:
5095 case Mips::BI__builtin_msa_maxi_s_d:
5096 case Mips::BI__builtin_msa_mini_s_b:
5097 case Mips::BI__builtin_msa_mini_s_h:
5098 case Mips::BI__builtin_msa_mini_s_w:
5099 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
5100 // These intrinsics take an unsigned 8 bit immediate.
5101 case Mips::BI__builtin_msa_andi_b:
5102 case Mips::BI__builtin_msa_nori_b:
5103 case Mips::BI__builtin_msa_ori_b:
5104 case Mips::BI__builtin_msa_shf_b:
5105 case Mips::BI__builtin_msa_shf_h:
5106 case Mips::BI__builtin_msa_shf_w:
5107 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
5108 case Mips::BI__builtin_msa_bseli_b:
5109 case Mips::BI__builtin_msa_bmnzi_b:
5110 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
5111 // df/n format
5112 // These intrinsics take an unsigned 4 bit immediate.
5113 case Mips::BI__builtin_msa_copy_s_b:
5114 case Mips::BI__builtin_msa_copy_u_b:
5115 case Mips::BI__builtin_msa_insve_b:
5116 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
5117 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
5118 // These intrinsics take an unsigned 3 bit immediate.
5119 case Mips::BI__builtin_msa_copy_s_h:
5120 case Mips::BI__builtin_msa_copy_u_h:
5121 case Mips::BI__builtin_msa_insve_h:
5122 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
5123 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
5124 // These intrinsics take an unsigned 2 bit immediate.
5125 case Mips::BI__builtin_msa_copy_s_w:
5126 case Mips::BI__builtin_msa_copy_u_w:
5127 case Mips::BI__builtin_msa_insve_w:
5128 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
5129 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
5130 // These intrinsics take an unsigned 1 bit immediate.
5131 case Mips::BI__builtin_msa_copy_s_d:
5132 case Mips::BI__builtin_msa_copy_u_d:
5133 case Mips::BI__builtin_msa_insve_d:
5134 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
5135 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
5136 // Memory offsets and immediate loads.
5137 // These intrinsics take a signed 10 bit immediate.
5138 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
5139 case Mips::BI__builtin_msa_ldi_h:
5140 case Mips::BI__builtin_msa_ldi_w:
5141 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
5142 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
5143 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
5144 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
5145 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
5146 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
5147 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
5148 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
5149 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
5150 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
5151 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
5152 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
5153 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
5154 }
5155
5156 if (!m)
5157 return BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u);
5158
5159 return BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u) ||
5160 BuiltinConstantArgMultiple(TheCall, ArgNum: i, Multiple: m);
5161}
5162
5163/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
5164/// advancing the pointer over the consumed characters. The decoded type is
5165/// returned. If the decoded type represents a constant integer with a
5166/// constraint on its value then Mask is set to that value. The type descriptors
5167/// used in Str are specific to PPC MMA builtins and are documented in the file
5168/// defining the PPC builtins.
5169static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
5170 unsigned &Mask) {
5171 bool RequireICE = false;
5172 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
5173 switch (*Str++) {
5174 case 'V':
5175 return Context.getVectorType(VectorType: Context.UnsignedCharTy, NumElts: 16,
5176 VecKind: VectorKind::AltiVecVector);
5177 case 'i': {
5178 char *End;
5179 unsigned size = strtoul(nptr: Str, endptr: &End, base: 10);
5180 assert(End != Str && "Missing constant parameter constraint");
5181 Str = End;
5182 Mask = size;
5183 return Context.IntTy;
5184 }
5185 case 'W': {
5186 char *End;
5187 unsigned size = strtoul(nptr: Str, endptr: &End, base: 10);
5188 assert(End != Str && "Missing PowerPC MMA type size");
5189 Str = End;
5190 QualType Type;
5191 switch (size) {
5192 #define PPC_VECTOR_TYPE(typeName, Id, size) \
5193 case size: Type = Context.Id##Ty; break;
5194 #include "clang/Basic/PPCTypes.def"
5195 default: llvm_unreachable("Invalid PowerPC MMA vector type");
5196 }
5197 bool CheckVectorArgs = false;
5198 while (!CheckVectorArgs) {
5199 switch (*Str++) {
5200 case '*':
5201 Type = Context.getPointerType(T: Type);
5202 break;
5203 case 'C':
5204 Type = Type.withConst();
5205 break;
5206 default:
5207 CheckVectorArgs = true;
5208 --Str;
5209 break;
5210 }
5211 }
5212 return Type;
5213 }
5214 default:
5215 return Context.DecodeTypeStr(Str&: --Str, Context, Error, RequireICE, AllowTypeModifiers: true);
5216 }
5217}
5218
5219static bool isPPC_64Builtin(unsigned BuiltinID) {
5220 // These builtins only work on PPC 64bit targets.
5221 switch (BuiltinID) {
5222 case PPC::BI__builtin_divde:
5223 case PPC::BI__builtin_divdeu:
5224 case PPC::BI__builtin_bpermd:
5225 case PPC::BI__builtin_pdepd:
5226 case PPC::BI__builtin_pextd:
5227 case PPC::BI__builtin_ppc_ldarx:
5228 case PPC::BI__builtin_ppc_stdcx:
5229 case PPC::BI__builtin_ppc_tdw:
5230 case PPC::BI__builtin_ppc_trapd:
5231 case PPC::BI__builtin_ppc_cmpeqb:
5232 case PPC::BI__builtin_ppc_setb:
5233 case PPC::BI__builtin_ppc_mulhd:
5234 case PPC::BI__builtin_ppc_mulhdu:
5235 case PPC::BI__builtin_ppc_maddhd:
5236 case PPC::BI__builtin_ppc_maddhdu:
5237 case PPC::BI__builtin_ppc_maddld:
5238 case PPC::BI__builtin_ppc_load8r:
5239 case PPC::BI__builtin_ppc_store8r:
5240 case PPC::BI__builtin_ppc_insert_exp:
5241 case PPC::BI__builtin_ppc_extract_sig:
5242 case PPC::BI__builtin_ppc_addex:
5243 case PPC::BI__builtin_darn:
5244 case PPC::BI__builtin_darn_raw:
5245 case PPC::BI__builtin_ppc_compare_and_swaplp:
5246 case PPC::BI__builtin_ppc_fetch_and_addlp:
5247 case PPC::BI__builtin_ppc_fetch_and_andlp:
5248 case PPC::BI__builtin_ppc_fetch_and_orlp:
5249 case PPC::BI__builtin_ppc_fetch_and_swaplp:
5250 return true;
5251 }
5252 return false;
5253}
5254
5255/// Returns true if the argument consists of one contiguous run of 1s with any
5256/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
5257/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
5258/// since all 1s are not contiguous.
5259bool Sema::ValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
5260 llvm::APSInt Result;
5261 // We can't check the value of a dependent argument.
5262 Expr *Arg = TheCall->getArg(Arg: ArgNum);
5263 if (Arg->isTypeDependent() || Arg->isValueDependent())
5264 return false;
5265
5266 // Check constant-ness first.
5267 if (BuiltinConstantArg(TheCall, ArgNum, Result))
5268 return true;
5269
5270 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
5271 if (Result.isShiftedMask() || (~Result).isShiftedMask())
5272 return false;
5273
5274 return Diag(TheCall->getBeginLoc(),
5275 diag::err_argument_not_contiguous_bit_field)
5276 << ArgNum << Arg->getSourceRange();
5277}
5278
5279bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
5280 CallExpr *TheCall) {
5281 unsigned i = 0, l = 0, u = 0;
5282 bool IsTarget64Bit = TI.getTypeWidth(T: TI.getIntPtrType()) == 64;
5283 llvm::APSInt Result;
5284
5285 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
5286 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
5287 << TheCall->getSourceRange();
5288
5289 switch (BuiltinID) {
5290 default: return false;
5291 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
5292 case PPC::BI__builtin_altivec_crypto_vshasigmad:
5293 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
5294 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
5295 case PPC::BI__builtin_altivec_dss:
5296 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3);
5297 case PPC::BI__builtin_tbegin:
5298 case PPC::BI__builtin_tend:
5299 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 1);
5300 case PPC::BI__builtin_tsr:
5301 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 7);
5302 case PPC::BI__builtin_tabortwc:
5303 case PPC::BI__builtin_tabortdc:
5304 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
5305 case PPC::BI__builtin_tabortwci:
5306 case PPC::BI__builtin_tabortdci:
5307 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31) ||
5308 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
5309 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
5310 // __builtin_(un)pack_longdouble are available only if long double uses IBM
5311 // extended double representation.
5312 case PPC::BI__builtin_unpack_longdouble:
5313 if (BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1))
5314 return true;
5315 [[fallthrough]];
5316 case PPC::BI__builtin_pack_longdouble:
5317 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
5318 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
5319 << "ibmlongdouble";
5320 return false;
5321 case PPC::BI__builtin_altivec_dst:
5322 case PPC::BI__builtin_altivec_dstt:
5323 case PPC::BI__builtin_altivec_dstst:
5324 case PPC::BI__builtin_altivec_dststt:
5325 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
5326 case PPC::BI__builtin_vsx_xxpermdi:
5327 case PPC::BI__builtin_vsx_xxsldwi:
5328 return BuiltinVSX(TheCall);
5329 case PPC::BI__builtin_unpack_vector_int128:
5330 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
5331 case PPC::BI__builtin_altivec_vgnb:
5332 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 2, High: 7);
5333 case PPC::BI__builtin_vsx_xxeval:
5334 return BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 255);
5335 case PPC::BI__builtin_altivec_vsldbi:
5336 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 7);
5337 case PPC::BI__builtin_altivec_vsrdbi:
5338 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 7);
5339 case PPC::BI__builtin_vsx_xxpermx:
5340 return BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 7);
5341 case PPC::BI__builtin_ppc_tw:
5342 case PPC::BI__builtin_ppc_tdw:
5343 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 1, High: 31);
5344 case PPC::BI__builtin_ppc_cmprb:
5345 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 1);
5346 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
5347 // be a constant that represents a contiguous bit field.
5348 case PPC::BI__builtin_ppc_rlwnm:
5349 return ValueIsRunOfOnes(TheCall, ArgNum: 2);
5350 case PPC::BI__builtin_ppc_rlwimi:
5351 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31) ||
5352 ValueIsRunOfOnes(TheCall, ArgNum: 3);
5353 case PPC::BI__builtin_ppc_rldimi:
5354 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 63) ||
5355 ValueIsRunOfOnes(TheCall, ArgNum: 3);
5356 case PPC::BI__builtin_ppc_addex: {
5357 if (BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3))
5358 return true;
5359 // Output warning for reserved values 1 to 3.
5360 int ArgValue =
5361 TheCall->getArg(Arg: 2)->getIntegerConstantExpr(Ctx: Context)->getSExtValue();
5362 if (ArgValue != 0)
5363 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
5364 << ArgValue;
5365 return false;
5366 }
5367 case PPC::BI__builtin_ppc_mtfsb0:
5368 case PPC::BI__builtin_ppc_mtfsb1:
5369 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
5370 case PPC::BI__builtin_ppc_mtfsf:
5371 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 255);
5372 case PPC::BI__builtin_ppc_mtfsfi:
5373 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 7) ||
5374 BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
5375 case PPC::BI__builtin_ppc_alignx:
5376 return BuiltinConstantArgPower2(TheCall, ArgNum: 0);
5377 case PPC::BI__builtin_ppc_rdlam:
5378 return ValueIsRunOfOnes(TheCall, ArgNum: 2);
5379 case PPC::BI__builtin_vsx_ldrmb:
5380 case PPC::BI__builtin_vsx_strmb:
5381 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16);
5382 case PPC::BI__builtin_altivec_vcntmbb:
5383 case PPC::BI__builtin_altivec_vcntmbh:
5384 case PPC::BI__builtin_altivec_vcntmbw:
5385 case PPC::BI__builtin_altivec_vcntmbd:
5386 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
5387 case PPC::BI__builtin_vsx_xxgenpcvbm:
5388 case PPC::BI__builtin_vsx_xxgenpcvhm:
5389 case PPC::BI__builtin_vsx_xxgenpcvwm:
5390 case PPC::BI__builtin_vsx_xxgenpcvdm:
5391 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3);
5392 case PPC::BI__builtin_ppc_test_data_class: {
5393 // Check if the first argument of the __builtin_ppc_test_data_class call is
5394 // valid. The argument must be 'float' or 'double' or '__float128'.
5395 QualType ArgType = TheCall->getArg(Arg: 0)->getType();
5396 if (ArgType != QualType(Context.FloatTy) &&
5397 ArgType != QualType(Context.DoubleTy) &&
5398 ArgType != QualType(Context.Float128Ty))
5399 return Diag(TheCall->getBeginLoc(),
5400 diag::err_ppc_invalid_test_data_class_type);
5401 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 127);
5402 }
5403 case PPC::BI__builtin_ppc_maxfe:
5404 case PPC::BI__builtin_ppc_minfe:
5405 case PPC::BI__builtin_ppc_maxfl:
5406 case PPC::BI__builtin_ppc_minfl:
5407 case PPC::BI__builtin_ppc_maxfs:
5408 case PPC::BI__builtin_ppc_minfs: {
5409 if (Context.getTargetInfo().getTriple().isOSAIX() &&
5410 (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
5411 BuiltinID == PPC::BI__builtin_ppc_minfe))
5412 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
5413 << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
5414 << false << Context.getTargetInfo().getTriple().str();
5415 // Argument type should be exact.
5416 QualType ArgType = QualType(Context.LongDoubleTy);
5417 if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
5418 BuiltinID == PPC::BI__builtin_ppc_minfl)
5419 ArgType = QualType(Context.DoubleTy);
5420 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
5421 BuiltinID == PPC::BI__builtin_ppc_minfs)
5422 ArgType = QualType(Context.FloatTy);
5423 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
5424 if (TheCall->getArg(I)->getType() != ArgType)
5425 return Diag(TheCall->getBeginLoc(),
5426 diag::err_typecheck_convert_incompatible)
5427 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
5428 return false;
5429 }
5430#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
5431 case PPC::BI__builtin_##Name: \
5432 return BuiltinPPCMMACall(TheCall, BuiltinID, Types);
5433#include "clang/Basic/BuiltinsPPC.def"
5434 }
5435 return BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u);
5436}
5437
5438// Check if the given type is a non-pointer PPC MMA type. This function is used
5439// in Sema to prevent invalid uses of restricted PPC MMA types.
5440bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
5441 if (Type->isPointerType() || Type->isArrayType())
5442 return false;
5443
5444 QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
5445#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
5446 if (false
5447#include "clang/Basic/PPCTypes.def"
5448 ) {
5449 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
5450 return true;
5451 }
5452 return false;
5453}
5454
5455// Helper function for CheckHLSLBuiltinFunctionCall
5456bool CheckVectorElementCallArgs(Sema *S, CallExpr *TheCall) {
5457 assert(TheCall->getNumArgs() > 1);
5458 ExprResult A = TheCall->getArg(Arg: 0);
5459
5460 QualType ArgTyA = A.get()->getType();
5461
5462 auto *VecTyA = ArgTyA->getAs<VectorType>();
5463 SourceLocation BuiltinLoc = TheCall->getBeginLoc();
5464
5465 for (unsigned i = 1; i < TheCall->getNumArgs(); ++i) {
5466 ExprResult B = TheCall->getArg(Arg: i);
5467 QualType ArgTyB = B.get()->getType();
5468 auto *VecTyB = ArgTyB->getAs<VectorType>();
5469 if (VecTyA == nullptr && VecTyB == nullptr)
5470 return false;
5471
5472 if (VecTyA && VecTyB) {
5473 bool retValue = false;
5474 if (VecTyA->getElementType() != VecTyB->getElementType()) {
5475 // Note: type promotion is intended to be handeled via the intrinsics
5476 // and not the builtin itself.
5477 S->Diag(TheCall->getBeginLoc(),
5478 diag::err_vec_builtin_incompatible_vector)
5479 << TheCall->getDirectCallee() << /*useAllTerminology*/ true
5480 << SourceRange(A.get()->getBeginLoc(), B.get()->getEndLoc());
5481 retValue = true;
5482 }
5483 if (VecTyA->getNumElements() != VecTyB->getNumElements()) {
5484 // You should only be hitting this case if you are calling the builtin
5485 // directly. HLSL intrinsics should avoid this case via a
5486 // HLSLVectorTruncation.
5487 S->Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
5488 << TheCall->getDirectCallee() << /*useAllTerminology*/ true
5489 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5490 TheCall->getArg(1)->getEndLoc());
5491 retValue = true;
5492 }
5493 return retValue;
5494 }
5495 }
5496
5497 // Note: if we get here one of the args is a scalar which
5498 // requires a VectorSplat on Arg0 or Arg1
5499 S->Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
5500 << TheCall->getDirectCallee() << /*useAllTerminology*/ true
5501 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5502 TheCall->getArg(1)->getEndLoc());
5503 return true;
5504}
5505
5506bool CheckArgsTypesAreCorrect(
5507 Sema *S, CallExpr *TheCall, QualType ExpectedType,
5508 llvm::function_ref<bool(clang::QualType PassedType)> Check) {
5509 for (unsigned i = 0; i < TheCall->getNumArgs(); ++i) {
5510 QualType PassedType = TheCall->getArg(Arg: i)->getType();
5511 if (Check(PassedType)) {
5512 if (auto *VecTyA = PassedType->getAs<VectorType>())
5513 ExpectedType = S->Context.getVectorType(
5514 VectorType: ExpectedType, NumElts: VecTyA->getNumElements(), VecKind: VecTyA->getVectorKind());
5515 S->Diag(TheCall->getArg(0)->getBeginLoc(),
5516 diag::err_typecheck_convert_incompatible)
5517 << PassedType << ExpectedType << 1 << 0 << 0;
5518 return true;
5519 }
5520 }
5521 return false;
5522}
5523
5524bool CheckAllArgsHaveFloatRepresentation(Sema *S, CallExpr *TheCall) {
5525 auto checkAllFloatTypes = [](clang::QualType PassedType) -> bool {
5526 return !PassedType->hasFloatingRepresentation();
5527 };
5528 return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
5529 checkAllFloatTypes);
5530}
5531
5532bool CheckFloatOrHalfRepresentations(Sema *S, CallExpr *TheCall) {
5533 auto checkFloatorHalf = [](clang::QualType PassedType) -> bool {
5534 clang::QualType BaseType =
5535 PassedType->isVectorType()
5536 ? PassedType->getAs<clang::VectorType>()->getElementType()
5537 : PassedType;
5538 return !BaseType->isHalfType() && !BaseType->isFloat32Type();
5539 };
5540 return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
5541 checkFloatorHalf);
5542}
5543
5544bool CheckNoDoubleVectors(Sema *S, CallExpr *TheCall) {
5545 auto checkDoubleVector = [](clang::QualType PassedType) -> bool {
5546 if (const auto *VecTy = PassedType->getAs<VectorType>())
5547 return VecTy->getElementType()->isDoubleType();
5548 return false;
5549 };
5550 return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
5551 checkDoubleVector);
5552}
5553
5554bool CheckUnsignedIntRepresentation(Sema *S, CallExpr *TheCall) {
5555 auto checkAllUnsignedTypes = [](clang::QualType PassedType) -> bool {
5556 return !PassedType->hasUnsignedIntegerRepresentation();
5557 };
5558 return CheckArgsTypesAreCorrect(S, TheCall, S->Context.UnsignedIntTy,
5559 checkAllUnsignedTypes);
5560}
5561
5562void SetElementTypeAsReturnType(Sema *S, CallExpr *TheCall,
5563 QualType ReturnType) {
5564 auto *VecTyA = TheCall->getArg(Arg: 0)->getType()->getAs<VectorType>();
5565 if (VecTyA)
5566 ReturnType = S->Context.getVectorType(VectorType: ReturnType, NumElts: VecTyA->getNumElements(),
5567 VecKind: VectorKind::Generic);
5568 TheCall->setType(ReturnType);
5569}
5570
5571// Note: returning true in this case results in CheckBuiltinFunctionCall
5572// returning an ExprError
5573bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
5574 switch (BuiltinID) {
5575 case Builtin::BI__builtin_hlsl_elementwise_all:
5576 case Builtin::BI__builtin_hlsl_elementwise_any: {
5577 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
5578 return true;
5579 break;
5580 }
5581 case Builtin::BI__builtin_hlsl_elementwise_clamp: {
5582 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
5583 return true;
5584 if (CheckVectorElementCallArgs(S: this, TheCall))
5585 return true;
5586 if (BuiltinElementwiseTernaryMath(
5587 TheCall, /*CheckForFloatArgs*/
5588 TheCall->getArg(Arg: 0)->getType()->hasFloatingRepresentation()))
5589 return true;
5590 break;
5591 }
5592 case Builtin::BI__builtin_hlsl_dot: {
5593 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
5594 return true;
5595 if (CheckVectorElementCallArgs(S: this, TheCall))
5596 return true;
5597 if (BuiltinVectorToScalarMath(TheCall))
5598 return true;
5599 if (CheckNoDoubleVectors(S: this, TheCall))
5600 return true;
5601 break;
5602 }
5603 case Builtin::BI__builtin_hlsl_elementwise_rcp: {
5604 if (CheckAllArgsHaveFloatRepresentation(S: this, TheCall))
5605 return true;
5606 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
5607 return true;
5608 break;
5609 }
5610 case Builtin::BI__builtin_hlsl_elementwise_rsqrt:
5611 case Builtin::BI__builtin_hlsl_elementwise_frac: {
5612 if (CheckFloatOrHalfRepresentations(S: this, TheCall))
5613 return true;
5614 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
5615 return true;
5616 break;
5617 }
5618 case Builtin::BI__builtin_hlsl_elementwise_isinf: {
5619 if (CheckFloatOrHalfRepresentations(S: this, TheCall))
5620 return true;
5621 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
5622 return true;
5623 SetElementTypeAsReturnType(this, TheCall, this->Context.BoolTy);
5624 break;
5625 }
5626 case Builtin::BI__builtin_hlsl_lerp: {
5627 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
5628 return true;
5629 if (CheckVectorElementCallArgs(S: this, TheCall))
5630 return true;
5631 if (BuiltinElementwiseTernaryMath(TheCall))
5632 return true;
5633 if (CheckFloatOrHalfRepresentations(S: this, TheCall))
5634 return true;
5635 break;
5636 }
5637 case Builtin::BI__builtin_hlsl_mad: {
5638 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
5639 return true;
5640 if (CheckVectorElementCallArgs(S: this, TheCall))
5641 return true;
5642 if (BuiltinElementwiseTernaryMath(
5643 TheCall, /*CheckForFloatArgs*/
5644 TheCall->getArg(Arg: 0)->getType()->hasFloatingRepresentation()))
5645 return true;
5646 break;
5647 }
5648 // Note these are llvm builtins that we want to catch invalid intrinsic
5649 // generation. Normal handling of these builitns will occur elsewhere.
5650 case Builtin::BI__builtin_elementwise_bitreverse: {
5651 if (CheckUnsignedIntRepresentation(S: this, TheCall))
5652 return true;
5653 break;
5654 }
5655 case Builtin::BI__builtin_elementwise_ceil:
5656 case Builtin::BI__builtin_elementwise_cos:
5657 case Builtin::BI__builtin_elementwise_exp:
5658 case Builtin::BI__builtin_elementwise_exp2:
5659 case Builtin::BI__builtin_elementwise_floor:
5660 case Builtin::BI__builtin_elementwise_log:
5661 case Builtin::BI__builtin_elementwise_log2:
5662 case Builtin::BI__builtin_elementwise_log10:
5663 case Builtin::BI__builtin_elementwise_pow:
5664 case Builtin::BI__builtin_elementwise_roundeven:
5665 case Builtin::BI__builtin_elementwise_sin:
5666 case Builtin::BI__builtin_elementwise_sqrt:
5667 case Builtin::BI__builtin_elementwise_trunc: {
5668 if (CheckFloatOrHalfRepresentations(S: this, TheCall))
5669 return true;
5670 break;
5671 }
5672 }
5673 return false;
5674}
5675
5676bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
5677 CallExpr *TheCall) {
5678 // position of memory order and scope arguments in the builtin
5679 unsigned OrderIndex, ScopeIndex;
5680 switch (BuiltinID) {
5681 case AMDGPU::BI__builtin_amdgcn_get_fpenv:
5682 case AMDGPU::BI__builtin_amdgcn_set_fpenv:
5683 return false;
5684 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
5685 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
5686 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
5687 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
5688 OrderIndex = 2;
5689 ScopeIndex = 3;
5690 break;
5691 case AMDGPU::BI__builtin_amdgcn_fence:
5692 OrderIndex = 0;
5693 ScopeIndex = 1;
5694 break;
5695 default:
5696 return false;
5697 }
5698
5699 ExprResult Arg = TheCall->getArg(Arg: OrderIndex);
5700 auto ArgExpr = Arg.get();
5701 Expr::EvalResult ArgResult;
5702
5703 if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
5704 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
5705 << ArgExpr->getType();
5706 auto Ord = ArgResult.Val.getInt().getZExtValue();
5707
5708 // Check validity of memory ordering as per C11 / C++11's memody model.
5709 // Only fence needs check. Atomic dec/inc allow all memory orders.
5710 if (!llvm::isValidAtomicOrderingCABI(Ord))
5711 return Diag(ArgExpr->getBeginLoc(),
5712 diag::warn_atomic_op_has_invalid_memory_order)
5713 << 0 << ArgExpr->getSourceRange();
5714 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
5715 case llvm::AtomicOrderingCABI::relaxed:
5716 case llvm::AtomicOrderingCABI::consume:
5717 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
5718 return Diag(ArgExpr->getBeginLoc(),
5719 diag::warn_atomic_op_has_invalid_memory_order)
5720 << 0 << ArgExpr->getSourceRange();
5721 break;
5722 case llvm::AtomicOrderingCABI::acquire:
5723 case llvm::AtomicOrderingCABI::release:
5724 case llvm::AtomicOrderingCABI::acq_rel:
5725 case llvm::AtomicOrderingCABI::seq_cst:
5726 break;
5727 }
5728
5729 Arg = TheCall->getArg(Arg: ScopeIndex);
5730 ArgExpr = Arg.get();
5731 Expr::EvalResult ArgResult1;
5732 // Check that sync scope is a constant literal
5733 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
5734 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
5735 << ArgExpr->getType();
5736
5737 return false;
5738}
5739
5740bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
5741 llvm::APSInt Result;
5742
5743 // We can't check the value of a dependent argument.
5744 Expr *Arg = TheCall->getArg(Arg: ArgNum);
5745 if (Arg->isTypeDependent() || Arg->isValueDependent())
5746 return false;
5747
5748 // Check constant-ness first.
5749 if (BuiltinConstantArg(TheCall, ArgNum, Result))
5750 return true;
5751
5752 int64_t Val = Result.getSExtValue();
5753 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
5754 return false;
5755
5756 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
5757 << Arg->getSourceRange();
5758}
5759
5760static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
5761 Sema &S, QualType Type, int EGW) {
5762 assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
5763
5764 // LMUL * VLEN >= EGW
5765 ASTContext::BuiltinVectorTypeInfo Info =
5766 S.Context.getBuiltinVectorTypeInfo(VecTy: Type->castAs<BuiltinType>());
5767 unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
5768 unsigned MinElemCount = Info.EC.getKnownMinValue();
5769
5770 unsigned EGS = EGW / ElemSize;
5771 // If EGS is less than or equal to the minimum number of elements, then the
5772 // type is valid.
5773 if (EGS <= MinElemCount)
5774 return false;
5775
5776 // Otherwise, we need vscale to be at least EGS / MinElemCont.
5777 assert(EGS % MinElemCount == 0);
5778 unsigned VScaleFactor = EGS / MinElemCount;
5779 // Vscale is VLEN/RVVBitsPerBlock.
5780 unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
5781 std::string RequiredExt = "zvl" + std::to_string(val: MinRequiredVLEN) + "b";
5782 if (!TI.hasFeature(RequiredExt))
5783 return S.Diag(TheCall->getBeginLoc(),
5784 diag::err_riscv_type_requires_extension) << Type << RequiredExt;
5785
5786 return false;
5787}
5788
5789bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
5790 unsigned BuiltinID,
5791 CallExpr *TheCall) {
5792 // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
5793 // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
5794 switch (BuiltinID) {
5795 default:
5796 break;
5797 case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
5798 case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
5799 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
5800 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
5801 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
5802 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
5803 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
5804 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
5805 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
5806 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
5807 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
5808 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
5809 case RISCVVector::BI__builtin_rvv_vmulhu_vv:
5810 case RISCVVector::BI__builtin_rvv_vmulhu_vx:
5811 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
5812 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
5813 case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
5814 case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
5815 case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
5816 case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
5817 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
5818 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
5819 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
5820 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
5821 case RISCVVector::BI__builtin_rvv_vmulh_vv:
5822 case RISCVVector::BI__builtin_rvv_vmulh_vx:
5823 case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
5824 case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
5825 case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
5826 case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
5827 case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
5828 case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
5829 case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
5830 case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
5831 case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
5832 case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
5833 case RISCVVector::BI__builtin_rvv_vsmul_vv:
5834 case RISCVVector::BI__builtin_rvv_vsmul_vx:
5835 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
5836 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
5837 case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
5838 case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
5839 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
5840 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
5841 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
5842 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
5843 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
5844 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
5845 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
5846 VecTy: TheCall->getType()->castAs<BuiltinType>());
5847
5848 if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
5849 return Diag(TheCall->getBeginLoc(),
5850 diag::err_riscv_builtin_requires_extension)
5851 << /* IsExtension */ true << TheCall->getSourceRange() << "v";
5852
5853 break;
5854 }
5855 }
5856
5857 switch (BuiltinID) {
5858 case RISCVVector::BI__builtin_rvv_vsetvli:
5859 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3) ||
5860 CheckRISCVLMUL(TheCall, ArgNum: 2);
5861 case RISCVVector::BI__builtin_rvv_vsetvlimax:
5862 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5863 CheckRISCVLMUL(TheCall, ArgNum: 1);
5864 case RISCVVector::BI__builtin_rvv_vget_v: {
5865 ASTContext::BuiltinVectorTypeInfo ResVecInfo =
5866 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(
5867 TheCall->getType().getCanonicalType().getTypePtr()));
5868 ASTContext::BuiltinVectorTypeInfo VecInfo =
5869 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(
5870 Val: TheCall->getArg(Arg: 0)->getType().getCanonicalType().getTypePtr()));
5871 unsigned MaxIndex;
5872 if (VecInfo.NumVectors != 1) // vget for tuple type
5873 MaxIndex = VecInfo.NumVectors;
5874 else // vget for non-tuple type
5875 MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
5876 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
5877 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: MaxIndex - 1);
5878 }
5879 case RISCVVector::BI__builtin_rvv_vset_v: {
5880 ASTContext::BuiltinVectorTypeInfo ResVecInfo =
5881 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(
5882 TheCall->getType().getCanonicalType().getTypePtr()));
5883 ASTContext::BuiltinVectorTypeInfo VecInfo =
5884 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(
5885 Val: TheCall->getArg(Arg: 2)->getType().getCanonicalType().getTypePtr()));
5886 unsigned MaxIndex;
5887 if (ResVecInfo.NumVectors != 1) // vset for tuple type
5888 MaxIndex = ResVecInfo.NumVectors;
5889 else // vset fo non-tuple type
5890 MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
5891 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
5892 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: MaxIndex - 1);
5893 }
5894 // Vector Crypto
5895 case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
5896 case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
5897 case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
5898 case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
5899 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5900 QualType Op2Type = TheCall->getArg(Arg: 1)->getType();
5901 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: 128) ||
5902 CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op2Type, EGW: 128) ||
5903 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
5904 }
5905 case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
5906 case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
5907 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5908 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: 256) ||
5909 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
5910 }
5911 case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
5912 case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
5913 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5914 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: 128) ||
5915 BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
5916 }
5917 case RISCVVector::BI__builtin_rvv_vaesdf_vv:
5918 case RISCVVector::BI__builtin_rvv_vaesdf_vs:
5919 case RISCVVector::BI__builtin_rvv_vaesdm_vv:
5920 case RISCVVector::BI__builtin_rvv_vaesdm_vs:
5921 case RISCVVector::BI__builtin_rvv_vaesef_vv:
5922 case RISCVVector::BI__builtin_rvv_vaesef_vs:
5923 case RISCVVector::BI__builtin_rvv_vaesem_vv:
5924 case RISCVVector::BI__builtin_rvv_vaesem_vs:
5925 case RISCVVector::BI__builtin_rvv_vaesz_vs:
5926 case RISCVVector::BI__builtin_rvv_vsm4r_vv:
5927 case RISCVVector::BI__builtin_rvv_vsm4r_vs:
5928 case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
5929 case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
5930 case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
5931 case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
5932 case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
5933 case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
5934 case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
5935 case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
5936 case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
5937 case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
5938 case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
5939 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5940 QualType Op2Type = TheCall->getArg(Arg: 1)->getType();
5941 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: 128) ||
5942 CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op2Type, EGW: 128);
5943 }
5944 case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
5945 case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
5946 case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
5947 case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
5948 case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
5949 case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
5950 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5951 QualType Op2Type = TheCall->getArg(Arg: 1)->getType();
5952 QualType Op3Type = TheCall->getArg(Arg: 2)->getType();
5953 ASTContext::BuiltinVectorTypeInfo Info =
5954 Context.getBuiltinVectorTypeInfo(VecTy: Op1Type->castAs<BuiltinType>());
5955 uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
5956 if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
5957 return Diag(TheCall->getBeginLoc(),
5958 diag::err_riscv_builtin_requires_extension)
5959 << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
5960
5961 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: ElemSize * 4) ||
5962 CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op2Type, EGW: ElemSize * 4) ||
5963 CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op3Type, EGW: ElemSize * 4);
5964 }
5965
5966 case RISCVVector::BI__builtin_rvv_sf_vc_i_se:
5967 // bit_27_26, bit_24_20, bit_11_7, simm5, sew, log2lmul
5968 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5969 BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31) ||
5970 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31) ||
5971 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -16, High: 15) ||
5972 CheckRISCVLMUL(TheCall, ArgNum: 5);
5973 case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
5974 // bit_27_26, bit_11_7, vs2, simm5
5975 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5976 BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31) ||
5977 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -16, High: 15);
5978 case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
5979 case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
5980 // bit_27_26, bit_24_20, simm5
5981 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5982 BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31) ||
5983 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -16, High: 15);
5984 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
5985 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
5986 // bit_27_26, vs2, simm5
5987 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5988 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -16, High: 15);
5989 case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
5990 case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
5991 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
5992 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
5993 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
5994 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
5995 // bit_27_26, vd, vs2, simm5
5996 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5997 BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -16, High: 15);
5998 case RISCVVector::BI__builtin_rvv_sf_vc_x_se:
5999 // bit_27_26, bit_24_20, bit_11_7, xs1, sew, log2lmul
6000 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
6001 BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31) ||
6002 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31) ||
6003 CheckRISCVLMUL(TheCall, ArgNum: 5);
6004 case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
6005 case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
6006 // bit_27_26, bit_11_7, vs2, xs1/vs1
6007 case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
6008 case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
6009 // bit_27_26, bit_24-20, xs1
6010 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
6011 BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
6012 case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
6013 case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
6014 case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
6015 case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
6016 // bit_27_26, vd, vs2, xs1
6017 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
6018 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
6019 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
6020 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
6021 // bit_27_26, vs2, xs1/vs1
6022 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
6023 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
6024 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
6025 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
6026 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
6027 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
6028 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
6029 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
6030 // bit_27_26, vd, vs2, xs1/vs1
6031 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3);
6032 case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
6033 // bit_26, bit_11_7, vs2, fs1
6034 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 1) ||
6035 BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
6036 case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
6037 case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
6038 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
6039 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
6040 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
6041 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
6042 // bit_26, vd, vs2, fs1
6043 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
6044 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
6045 // bit_26, vs2, fs1
6046 return BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 1);
6047 // Check if byteselect is in [0, 3]
6048 case RISCV::BI__builtin_riscv_aes32dsi:
6049 case RISCV::BI__builtin_riscv_aes32dsmi:
6050 case RISCV::BI__builtin_riscv_aes32esi:
6051 case RISCV::BI__builtin_riscv_aes32esmi:
6052 case RISCV::BI__builtin_riscv_sm4ks:
6053 case RISCV::BI__builtin_riscv_sm4ed:
6054 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
6055 // Check if rnum is in [0, 10]
6056 case RISCV::BI__builtin_riscv_aes64ks1i:
6057 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 10);
6058 // Check if value range for vxrm is in [0, 3]
6059 case RISCVVector::BI__builtin_rvv_vaaddu_vv:
6060 case RISCVVector::BI__builtin_rvv_vaaddu_vx:
6061 case RISCVVector::BI__builtin_rvv_vaadd_vv:
6062 case RISCVVector::BI__builtin_rvv_vaadd_vx:
6063 case RISCVVector::BI__builtin_rvv_vasubu_vv:
6064 case RISCVVector::BI__builtin_rvv_vasubu_vx:
6065 case RISCVVector::BI__builtin_rvv_vasub_vv:
6066 case RISCVVector::BI__builtin_rvv_vasub_vx:
6067 case RISCVVector::BI__builtin_rvv_vsmul_vv:
6068 case RISCVVector::BI__builtin_rvv_vsmul_vx:
6069 case RISCVVector::BI__builtin_rvv_vssra_vv:
6070 case RISCVVector::BI__builtin_rvv_vssra_vx:
6071 case RISCVVector::BI__builtin_rvv_vssrl_vv:
6072 case RISCVVector::BI__builtin_rvv_vssrl_vx:
6073 case RISCVVector::BI__builtin_rvv_vnclip_wv:
6074 case RISCVVector::BI__builtin_rvv_vnclip_wx:
6075 case RISCVVector::BI__builtin_rvv_vnclipu_wv:
6076 case RISCVVector::BI__builtin_rvv_vnclipu_wx:
6077 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
6078 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
6079 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
6080 case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
6081 case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
6082 case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
6083 case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
6084 case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
6085 case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
6086 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
6087 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
6088 case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
6089 case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
6090 case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
6091 case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
6092 case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
6093 case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
6094 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
6095 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
6096 case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
6097 case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
6098 case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
6099 case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
6100 case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
6101 case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
6102 case RISCVVector::BI__builtin_rvv_vasub_vv_m:
6103 case RISCVVector::BI__builtin_rvv_vasub_vx_m:
6104 case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
6105 case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
6106 case RISCVVector::BI__builtin_rvv_vssra_vv_m:
6107 case RISCVVector::BI__builtin_rvv_vssra_vx_m:
6108 case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
6109 case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
6110 case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
6111 case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
6112 case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
6113 case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
6114 return BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 3);
6115 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
6116 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
6117 case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
6118 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
6119 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
6120 case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
6121 case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
6122 case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
6123 case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
6124 case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
6125 case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
6126 case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
6127 case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
6128 case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
6129 case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
6130 case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
6131 case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
6132 case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
6133 case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
6134 case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
6135 case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
6136 case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
6137 case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
6138 case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
6139 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
6140 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
6141 case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
6142 case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
6143 case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
6144 case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
6145 case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
6146 case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
6147 case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
6148 case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
6149 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
6150 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
6151 case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
6152 case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
6153 case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
6154 case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
6155 case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
6156 case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
6157 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
6158 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
6159 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
6160 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
6161 case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
6162 case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
6163 case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
6164 case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
6165 case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
6166 case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
6167 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
6168 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
6169 return BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 3);
6170 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
6171 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
6172 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
6173 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
6174 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
6175 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
6176 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
6177 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
6178 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
6179 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
6180 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
6181 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
6182 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
6183 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 4);
6184 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
6185 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
6186 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
6187 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
6188 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
6189 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
6190 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
6191 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
6192 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
6193 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
6194 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
6195 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
6196 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
6197 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
6198 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
6199 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
6200 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
6201 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
6202 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
6203 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
6204 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
6205 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
6206 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
6207 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
6208 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
6209 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
6210 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
6211 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
6212 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
6213 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
6214 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
6215 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
6216 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
6217 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
6218 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
6219 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
6220 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
6221 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
6222 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
6223 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
6224 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
6225 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
6226 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
6227 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
6228 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
6229 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
6230 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
6231 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
6232 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
6233 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
6234 return BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 4);
6235 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
6236 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
6237 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
6238 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
6239 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
6240 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
6241 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
6242 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
6243 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
6244 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
6245 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
6246 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
6247 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
6248 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
6249 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
6250 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
6251 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
6252 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
6253 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
6254 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
6255 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
6256 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
6257 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
6258 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
6259 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
6260 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
6261 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
6262 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
6263 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
6264 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
6265 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
6266 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
6267 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
6268 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
6269 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
6270 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
6271 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
6272 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
6273 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
6274 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
6275 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
6276 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
6277 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
6278 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
6279 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
6280 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
6281 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
6282 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
6283 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
6284 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
6285 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
6286 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
6287 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
6288 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
6289 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
6290 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
6291 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
6292 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
6293 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
6294 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
6295 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
6296 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
6297 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
6298 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
6299 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
6300 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
6301 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
6302 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
6303 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
6304 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
6305 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
6306 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
6307 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
6308 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
6309 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
6310 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
6311 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
6312 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
6313 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
6314 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
6315 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
6316 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
6317 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
6318 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
6319 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
6320 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
6321 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
6322 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
6323 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
6324 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
6325 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
6326 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
6327 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
6328 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
6329 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
6330 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
6331 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
6332 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
6333 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
6334 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
6335 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
6336 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
6337 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
6338 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
6339 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
6340 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
6341 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
6342 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
6343 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
6344 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
6345 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
6346 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
6347 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
6348 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
6349 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
6350 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
6351 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
6352 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
6353 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
6354 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
6355 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
6356 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
6357 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
6358 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
6359 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
6360 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
6361 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
6362 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
6363 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
6364 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
6365 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
6366 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
6367 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
6368 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
6369 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
6370 return BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 4);
6371 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
6372 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
6373 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
6374 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
6375 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
6376 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
6377 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
6378 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
6379 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
6380 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
6381 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
6382 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
6383 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
6384 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
6385 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
6386 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
6387 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
6388 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
6389 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
6390 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
6391 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
6392 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
6393 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
6394 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
6395 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
6396 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
6397 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
6398 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
6399 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
6400 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
6401 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
6402 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
6403 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
6404 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
6405 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
6406 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
6407 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
6408 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
6409 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
6410 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
6411 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
6412 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
6413 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
6414 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
6415 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
6416 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
6417 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
6418 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
6419 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
6420 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
6421 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
6422 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
6423 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
6424 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
6425 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
6426 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
6427 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
6428 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
6429 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
6430 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
6431 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
6432 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
6433 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
6434 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
6435 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
6436 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
6437 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
6438 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
6439 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
6440 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
6441 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
6442 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
6443 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
6444 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
6445 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
6446 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
6447 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
6448 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
6449 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
6450 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
6451 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
6452 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
6453 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
6454 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
6455 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
6456 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
6457 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
6458 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
6459 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
6460 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
6461 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
6462 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
6463 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
6464 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
6465 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
6466 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
6467 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
6468 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
6469 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
6470 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
6471 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
6472 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
6473 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
6474 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
6475 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
6476 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
6477 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
6478 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
6479 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
6480 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
6481 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
6482 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
6483 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
6484 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
6485 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
6486 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
6487 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
6488 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
6489 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
6490 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
6491 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
6492 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
6493 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
6494 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
6495 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
6496 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
6497 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
6498 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
6499 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
6500 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
6501 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
6502 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
6503 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
6504 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
6505 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
6506 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
6507 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
6508 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
6509 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
6510 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
6511 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
6512 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
6513 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
6514 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
6515 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
6516 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
6517 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
6518 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
6519 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
6520 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
6521 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
6522 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
6523 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
6524 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
6525 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
6526 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
6527 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
6528 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
6529 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
6530 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
6531 return BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 4);
6532 case RISCV::BI__builtin_riscv_ntl_load:
6533 case RISCV::BI__builtin_riscv_ntl_store:
6534 DeclRefExpr *DRE =
6535 cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
6536 assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
6537 BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
6538 "Unexpected RISC-V nontemporal load/store builtin!");
6539 bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
6540 unsigned NumArgs = IsStore ? 3 : 2;
6541
6542 if (checkArgCountAtLeast(S&: *this, Call: TheCall, MinArgCount: NumArgs - 1))
6543 return true;
6544
6545 if (checkArgCountAtMost(S&: *this, Call: TheCall, MaxArgCount: NumArgs))
6546 return true;
6547
6548 // Domain value should be compile-time constant.
6549 // 2 <= domain <= 5
6550 if (TheCall->getNumArgs() == NumArgs &&
6551 BuiltinConstantArgRange(TheCall, ArgNum: NumArgs - 1, Low: 2, High: 5))
6552 return true;
6553
6554 Expr *PointerArg = TheCall->getArg(Arg: 0);
6555 ExprResult PointerArgResult =
6556 DefaultFunctionArrayLvalueConversion(E: PointerArg);
6557
6558 if (PointerArgResult.isInvalid())
6559 return true;
6560 PointerArg = PointerArgResult.get();
6561
6562 const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
6563 if (!PtrType) {
6564 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
6565 << PointerArg->getType() << PointerArg->getSourceRange();
6566 return true;
6567 }
6568
6569 QualType ValType = PtrType->getPointeeType();
6570 ValType = ValType.getUnqualifiedType();
6571 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
6572 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
6573 !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
6574 Diag(DRE->getBeginLoc(),
6575 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
6576 << PointerArg->getType() << PointerArg->getSourceRange();
6577 return true;
6578 }
6579
6580 if (!IsStore) {
6581 TheCall->setType(ValType);
6582 return false;
6583 }
6584
6585 ExprResult ValArg = TheCall->getArg(Arg: 1);
6586 InitializedEntity Entity = InitializedEntity::InitializeParameter(
6587 Context, Type: ValType, /*consume*/ Consumed: false);
6588 ValArg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
6589 if (ValArg.isInvalid())
6590 return true;
6591
6592 TheCall->setArg(Arg: 1, ArgExpr: ValArg.get());
6593 TheCall->setType(Context.VoidTy);
6594 return false;
6595 }
6596
6597 return false;
6598}
6599
6600bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
6601 CallExpr *TheCall) {
6602 if (BuiltinID == SystemZ::BI__builtin_tabort) {
6603 Expr *Arg = TheCall->getArg(Arg: 0);
6604 if (std::optional<llvm::APSInt> AbortCode =
6605 Arg->getIntegerConstantExpr(Context))
6606 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
6607 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
6608 << Arg->getSourceRange();
6609 }
6610
6611 // For intrinsics which take an immediate value as part of the instruction,
6612 // range check them here.
6613 unsigned i = 0, l = 0, u = 0;
6614 switch (BuiltinID) {
6615 default: return false;
6616 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
6617 case SystemZ::BI__builtin_s390_verimb:
6618 case SystemZ::BI__builtin_s390_verimh:
6619 case SystemZ::BI__builtin_s390_verimf:
6620 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
6621 case SystemZ::BI__builtin_s390_vfaeb:
6622 case SystemZ::BI__builtin_s390_vfaeh:
6623 case SystemZ::BI__builtin_s390_vfaef:
6624 case SystemZ::BI__builtin_s390_vfaebs:
6625 case SystemZ::BI__builtin_s390_vfaehs:
6626 case SystemZ::BI__builtin_s390_vfaefs:
6627 case SystemZ::BI__builtin_s390_vfaezb:
6628 case SystemZ::BI__builtin_s390_vfaezh:
6629 case SystemZ::BI__builtin_s390_vfaezf:
6630 case SystemZ::BI__builtin_s390_vfaezbs:
6631 case SystemZ::BI__builtin_s390_vfaezhs:
6632 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
6633 case SystemZ::BI__builtin_s390_vfisb:
6634 case SystemZ::BI__builtin_s390_vfidb:
6635 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15) ||
6636 BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
6637 case SystemZ::BI__builtin_s390_vftcisb:
6638 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
6639 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
6640 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
6641 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
6642 case SystemZ::BI__builtin_s390_vstrcb:
6643 case SystemZ::BI__builtin_s390_vstrch:
6644 case SystemZ::BI__builtin_s390_vstrcf:
6645 case SystemZ::BI__builtin_s390_vstrczb:
6646 case SystemZ::BI__builtin_s390_vstrczh:
6647 case SystemZ::BI__builtin_s390_vstrczf:
6648 case SystemZ::BI__builtin_s390_vstrcbs:
6649 case SystemZ::BI__builtin_s390_vstrchs:
6650 case SystemZ::BI__builtin_s390_vstrcfs:
6651 case SystemZ::BI__builtin_s390_vstrczbs:
6652 case SystemZ::BI__builtin_s390_vstrczhs:
6653 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
6654 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
6655 case SystemZ::BI__builtin_s390_vfminsb:
6656 case SystemZ::BI__builtin_s390_vfmaxsb:
6657 case SystemZ::BI__builtin_s390_vfmindb:
6658 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
6659 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
6660 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
6661 case SystemZ::BI__builtin_s390_vclfnhs:
6662 case SystemZ::BI__builtin_s390_vclfnls:
6663 case SystemZ::BI__builtin_s390_vcfn:
6664 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
6665 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
6666 }
6667 return BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u);
6668}
6669
6670bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
6671 unsigned BuiltinID,
6672 CallExpr *TheCall) {
6673 switch (BuiltinID) {
6674 case WebAssembly::BI__builtin_wasm_ref_null_extern:
6675 return BuiltinWasmRefNullExtern(TheCall);
6676 case WebAssembly::BI__builtin_wasm_ref_null_func:
6677 return BuiltinWasmRefNullFunc(TheCall);
6678 case WebAssembly::BI__builtin_wasm_table_get:
6679 return BuiltinWasmTableGet(TheCall);
6680 case WebAssembly::BI__builtin_wasm_table_set:
6681 return BuiltinWasmTableSet(TheCall);
6682 case WebAssembly::BI__builtin_wasm_table_size:
6683 return BuiltinWasmTableSize(TheCall);
6684 case WebAssembly::BI__builtin_wasm_table_grow:
6685 return BuiltinWasmTableGrow(TheCall);
6686 case WebAssembly::BI__builtin_wasm_table_fill:
6687 return BuiltinWasmTableFill(TheCall);
6688 case WebAssembly::BI__builtin_wasm_table_copy:
6689 return BuiltinWasmTableCopy(TheCall);
6690 }
6691
6692 return false;
6693}
6694
6695void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
6696 const llvm::StringMap<bool> &FeatureMap) {
6697 ASTContext::BuiltinVectorTypeInfo Info =
6698 Context.getBuiltinVectorTypeInfo(VecTy: Ty->castAs<BuiltinType>());
6699 unsigned EltSize = Context.getTypeSize(Info.ElementType);
6700 unsigned MinElts = Info.EC.getKnownMinValue();
6701
6702 if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
6703 !FeatureMap.lookup("zve64d"))
6704 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
6705 // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
6706 // least zve64x
6707 else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
6708 MinElts == 1) &&
6709 !FeatureMap.lookup("zve64x"))
6710 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
6711 else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
6712 !FeatureMap.lookup("zvfhmin"))
6713 Diag(Loc, diag::err_riscv_type_requires_extension, D)
6714 << Ty << "zvfh or zvfhmin";
6715 else if (Info.ElementType->isBFloat16Type() &&
6716 !FeatureMap.lookup("experimental-zvfbfmin"))
6717 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
6718 else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
6719 !FeatureMap.lookup("zve32f"))
6720 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
6721 // Given that caller already checked isRVVType() before calling this function,
6722 // if we don't have at least zve32x supported, then we need to emit error.
6723 else if (!FeatureMap.lookup("zve32x"))
6724 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
6725}
6726
6727bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
6728 unsigned BuiltinID,
6729 CallExpr *TheCall) {
6730 switch (BuiltinID) {
6731 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
6732 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
6733 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
6734 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
6735 return checkArgCountAtMost(S&: *this, Call: TheCall, MaxArgCount: 3);
6736 }
6737
6738 return false;
6739}
6740
6741// Check if the rounding mode is legal.
6742bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
6743 // Indicates if this instruction has rounding control or just SAE.
6744 bool HasRC = false;
6745
6746 unsigned ArgNum = 0;
6747 switch (BuiltinID) {
6748 default:
6749 return false;
6750 case X86::BI__builtin_ia32_vcvttsd2si32:
6751 case X86::BI__builtin_ia32_vcvttsd2si64:
6752 case X86::BI__builtin_ia32_vcvttsd2usi32:
6753 case X86::BI__builtin_ia32_vcvttsd2usi64:
6754 case X86::BI__builtin_ia32_vcvttss2si32:
6755 case X86::BI__builtin_ia32_vcvttss2si64:
6756 case X86::BI__builtin_ia32_vcvttss2usi32:
6757 case X86::BI__builtin_ia32_vcvttss2usi64:
6758 case X86::BI__builtin_ia32_vcvttsh2si32:
6759 case X86::BI__builtin_ia32_vcvttsh2si64:
6760 case X86::BI__builtin_ia32_vcvttsh2usi32:
6761 case X86::BI__builtin_ia32_vcvttsh2usi64:
6762 ArgNum = 1;
6763 break;
6764 case X86::BI__builtin_ia32_maxpd512:
6765 case X86::BI__builtin_ia32_maxps512:
6766 case X86::BI__builtin_ia32_minpd512:
6767 case X86::BI__builtin_ia32_minps512:
6768 case X86::BI__builtin_ia32_maxph512:
6769 case X86::BI__builtin_ia32_minph512:
6770 ArgNum = 2;
6771 break;
6772 case X86::BI__builtin_ia32_vcvtph2pd512_mask:
6773 case X86::BI__builtin_ia32_vcvtph2psx512_mask:
6774 case X86::BI__builtin_ia32_cvtps2pd512_mask:
6775 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
6776 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
6777 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
6778 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
6779 case X86::BI__builtin_ia32_cvttps2dq512_mask:
6780 case X86::BI__builtin_ia32_cvttps2qq512_mask:
6781 case X86::BI__builtin_ia32_cvttps2udq512_mask:
6782 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
6783 case X86::BI__builtin_ia32_vcvttph2w512_mask:
6784 case X86::BI__builtin_ia32_vcvttph2uw512_mask:
6785 case X86::BI__builtin_ia32_vcvttph2dq512_mask:
6786 case X86::BI__builtin_ia32_vcvttph2udq512_mask:
6787 case X86::BI__builtin_ia32_vcvttph2qq512_mask:
6788 case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
6789 case X86::BI__builtin_ia32_exp2pd_mask:
6790 case X86::BI__builtin_ia32_exp2ps_mask:
6791 case X86::BI__builtin_ia32_getexppd512_mask:
6792 case X86::BI__builtin_ia32_getexpps512_mask:
6793 case X86::BI__builtin_ia32_getexpph512_mask:
6794 case X86::BI__builtin_ia32_rcp28pd_mask:
6795 case X86::BI__builtin_ia32_rcp28ps_mask:
6796 case X86::BI__builtin_ia32_rsqrt28pd_mask:
6797 case X86::BI__builtin_ia32_rsqrt28ps_mask:
6798 case X86::BI__builtin_ia32_vcomisd:
6799 case X86::BI__builtin_ia32_vcomiss:
6800 case X86::BI__builtin_ia32_vcomish:
6801 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
6802 ArgNum = 3;
6803 break;
6804 case X86::BI__builtin_ia32_cmppd512_mask:
6805 case X86::BI__builtin_ia32_cmpps512_mask:
6806 case X86::BI__builtin_ia32_cmpsd_mask:
6807 case X86::BI__builtin_ia32_cmpss_mask:
6808 case X86::BI__builtin_ia32_cmpsh_mask:
6809 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
6810 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
6811 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
6812 case X86::BI__builtin_ia32_getexpsd128_round_mask:
6813 case X86::BI__builtin_ia32_getexpss128_round_mask:
6814 case X86::BI__builtin_ia32_getexpsh128_round_mask:
6815 case X86::BI__builtin_ia32_getmantpd512_mask:
6816 case X86::BI__builtin_ia32_getmantps512_mask:
6817 case X86::BI__builtin_ia32_getmantph512_mask:
6818 case X86::BI__builtin_ia32_maxsd_round_mask:
6819 case X86::BI__builtin_ia32_maxss_round_mask:
6820 case X86::BI__builtin_ia32_maxsh_round_mask:
6821 case X86::BI__builtin_ia32_minsd_round_mask:
6822 case X86::BI__builtin_ia32_minss_round_mask:
6823 case X86::BI__builtin_ia32_minsh_round_mask:
6824 case X86::BI__builtin_ia32_rcp28sd_round_mask:
6825 case X86::BI__builtin_ia32_rcp28ss_round_mask:
6826 case X86::BI__builtin_ia32_reducepd512_mask:
6827 case X86::BI__builtin_ia32_reduceps512_mask:
6828 case X86::BI__builtin_ia32_reduceph512_mask:
6829 case X86::BI__builtin_ia32_rndscalepd_mask:
6830 case X86::BI__builtin_ia32_rndscaleps_mask:
6831 case X86::BI__builtin_ia32_rndscaleph_mask:
6832 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
6833 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
6834 ArgNum = 4;
6835 break;
6836 case X86::BI__builtin_ia32_fixupimmpd512_mask:
6837 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
6838 case X86::BI__builtin_ia32_fixupimmps512_mask:
6839 case X86::BI__builtin_ia32_fixupimmps512_maskz:
6840 case X86::BI__builtin_ia32_fixupimmsd_mask:
6841 case X86::BI__builtin_ia32_fixupimmsd_maskz:
6842 case X86::BI__builtin_ia32_fixupimmss_mask:
6843 case X86::BI__builtin_ia32_fixupimmss_maskz:
6844 case X86::BI__builtin_ia32_getmantsd_round_mask:
6845 case X86::BI__builtin_ia32_getmantss_round_mask:
6846 case X86::BI__builtin_ia32_getmantsh_round_mask:
6847 case X86::BI__builtin_ia32_rangepd512_mask:
6848 case X86::BI__builtin_ia32_rangeps512_mask:
6849 case X86::BI__builtin_ia32_rangesd128_round_mask:
6850 case X86::BI__builtin_ia32_rangess128_round_mask:
6851 case X86::BI__builtin_ia32_reducesd_mask:
6852 case X86::BI__builtin_ia32_reducess_mask:
6853 case X86::BI__builtin_ia32_reducesh_mask:
6854 case X86::BI__builtin_ia32_rndscalesd_round_mask:
6855 case X86::BI__builtin_ia32_rndscaless_round_mask:
6856 case X86::BI__builtin_ia32_rndscalesh_round_mask:
6857 ArgNum = 5;
6858 break;
6859 case X86::BI__builtin_ia32_vcvtsd2si64:
6860 case X86::BI__builtin_ia32_vcvtsd2si32:
6861 case X86::BI__builtin_ia32_vcvtsd2usi32:
6862 case X86::BI__builtin_ia32_vcvtsd2usi64:
6863 case X86::BI__builtin_ia32_vcvtss2si32:
6864 case X86::BI__builtin_ia32_vcvtss2si64:
6865 case X86::BI__builtin_ia32_vcvtss2usi32:
6866 case X86::BI__builtin_ia32_vcvtss2usi64:
6867 case X86::BI__builtin_ia32_vcvtsh2si32:
6868 case X86::BI__builtin_ia32_vcvtsh2si64:
6869 case X86::BI__builtin_ia32_vcvtsh2usi32:
6870 case X86::BI__builtin_ia32_vcvtsh2usi64:
6871 case X86::BI__builtin_ia32_sqrtpd512:
6872 case X86::BI__builtin_ia32_sqrtps512:
6873 case X86::BI__builtin_ia32_sqrtph512:
6874 ArgNum = 1;
6875 HasRC = true;
6876 break;
6877 case X86::BI__builtin_ia32_addph512:
6878 case X86::BI__builtin_ia32_divph512:
6879 case X86::BI__builtin_ia32_mulph512:
6880 case X86::BI__builtin_ia32_subph512:
6881 case X86::BI__builtin_ia32_addpd512:
6882 case X86::BI__builtin_ia32_addps512:
6883 case X86::BI__builtin_ia32_divpd512:
6884 case X86::BI__builtin_ia32_divps512:
6885 case X86::BI__builtin_ia32_mulpd512:
6886 case X86::BI__builtin_ia32_mulps512:
6887 case X86::BI__builtin_ia32_subpd512:
6888 case X86::BI__builtin_ia32_subps512:
6889 case X86::BI__builtin_ia32_cvtsi2sd64:
6890 case X86::BI__builtin_ia32_cvtsi2ss32:
6891 case X86::BI__builtin_ia32_cvtsi2ss64:
6892 case X86::BI__builtin_ia32_cvtusi2sd64:
6893 case X86::BI__builtin_ia32_cvtusi2ss32:
6894 case X86::BI__builtin_ia32_cvtusi2ss64:
6895 case X86::BI__builtin_ia32_vcvtusi2sh:
6896 case X86::BI__builtin_ia32_vcvtusi642sh:
6897 case X86::BI__builtin_ia32_vcvtsi2sh:
6898 case X86::BI__builtin_ia32_vcvtsi642sh:
6899 ArgNum = 2;
6900 HasRC = true;
6901 break;
6902 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
6903 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
6904 case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
6905 case X86::BI__builtin_ia32_vcvtps2phx512_mask:
6906 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
6907 case X86::BI__builtin_ia32_cvtpd2dq512_mask:
6908 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
6909 case X86::BI__builtin_ia32_cvtpd2udq512_mask:
6910 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
6911 case X86::BI__builtin_ia32_cvtps2dq512_mask:
6912 case X86::BI__builtin_ia32_cvtps2qq512_mask:
6913 case X86::BI__builtin_ia32_cvtps2udq512_mask:
6914 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
6915 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
6916 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
6917 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
6918 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
6919 case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
6920 case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
6921 case X86::BI__builtin_ia32_vcvtw2ph512_mask:
6922 case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
6923 case X86::BI__builtin_ia32_vcvtph2w512_mask:
6924 case X86::BI__builtin_ia32_vcvtph2uw512_mask:
6925 case X86::BI__builtin_ia32_vcvtph2dq512_mask:
6926 case X86::BI__builtin_ia32_vcvtph2udq512_mask:
6927 case X86::BI__builtin_ia32_vcvtph2qq512_mask:
6928 case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
6929 case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
6930 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
6931 ArgNum = 3;
6932 HasRC = true;
6933 break;
6934 case X86::BI__builtin_ia32_addsh_round_mask:
6935 case X86::BI__builtin_ia32_addss_round_mask:
6936 case X86::BI__builtin_ia32_addsd_round_mask:
6937 case X86::BI__builtin_ia32_divsh_round_mask:
6938 case X86::BI__builtin_ia32_divss_round_mask:
6939 case X86::BI__builtin_ia32_divsd_round_mask:
6940 case X86::BI__builtin_ia32_mulsh_round_mask:
6941 case X86::BI__builtin_ia32_mulss_round_mask:
6942 case X86::BI__builtin_ia32_mulsd_round_mask:
6943 case X86::BI__builtin_ia32_subsh_round_mask:
6944 case X86::BI__builtin_ia32_subss_round_mask:
6945 case X86::BI__builtin_ia32_subsd_round_mask:
6946 case X86::BI__builtin_ia32_scalefph512_mask:
6947 case X86::BI__builtin_ia32_scalefpd512_mask:
6948 case X86::BI__builtin_ia32_scalefps512_mask:
6949 case X86::BI__builtin_ia32_scalefsd_round_mask:
6950 case X86::BI__builtin_ia32_scalefss_round_mask:
6951 case X86::BI__builtin_ia32_scalefsh_round_mask:
6952 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
6953 case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
6954 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
6955 case X86::BI__builtin_ia32_sqrtsd_round_mask:
6956 case X86::BI__builtin_ia32_sqrtss_round_mask:
6957 case X86::BI__builtin_ia32_sqrtsh_round_mask:
6958 case X86::BI__builtin_ia32_vfmaddsd3_mask:
6959 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
6960 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
6961 case X86::BI__builtin_ia32_vfmaddss3_mask:
6962 case X86::BI__builtin_ia32_vfmaddss3_maskz:
6963 case X86::BI__builtin_ia32_vfmaddss3_mask3:
6964 case X86::BI__builtin_ia32_vfmaddsh3_mask:
6965 case X86::BI__builtin_ia32_vfmaddsh3_maskz:
6966 case X86::BI__builtin_ia32_vfmaddsh3_mask3:
6967 case X86::BI__builtin_ia32_vfmaddpd512_mask:
6968 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
6969 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
6970 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
6971 case X86::BI__builtin_ia32_vfmaddps512_mask:
6972 case X86::BI__builtin_ia32_vfmaddps512_maskz:
6973 case X86::BI__builtin_ia32_vfmaddps512_mask3:
6974 case X86::BI__builtin_ia32_vfmsubps512_mask3:
6975 case X86::BI__builtin_ia32_vfmaddph512_mask:
6976 case X86::BI__builtin_ia32_vfmaddph512_maskz:
6977 case X86::BI__builtin_ia32_vfmaddph512_mask3:
6978 case X86::BI__builtin_ia32_vfmsubph512_mask3:
6979 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
6980 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
6981 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
6982 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
6983 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
6984 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
6985 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
6986 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
6987 case X86::BI__builtin_ia32_vfmaddsubph512_mask:
6988 case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
6989 case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
6990 case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
6991 case X86::BI__builtin_ia32_vfmaddcsh_mask:
6992 case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
6993 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
6994 case X86::BI__builtin_ia32_vfmaddcph512_mask:
6995 case X86::BI__builtin_ia32_vfmaddcph512_maskz:
6996 case X86::BI__builtin_ia32_vfmaddcph512_mask3:
6997 case X86::BI__builtin_ia32_vfcmaddcsh_mask:
6998 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
6999 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
7000 case X86::BI__builtin_ia32_vfcmaddcph512_mask:
7001 case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
7002 case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
7003 case X86::BI__builtin_ia32_vfmulcsh_mask:
7004 case X86::BI__builtin_ia32_vfmulcph512_mask:
7005 case X86::BI__builtin_ia32_vfcmulcsh_mask:
7006 case X86::BI__builtin_ia32_vfcmulcph512_mask:
7007 ArgNum = 4;
7008 HasRC = true;
7009 break;
7010 }
7011
7012 llvm::APSInt Result;
7013
7014 // We can't check the value of a dependent argument.
7015 Expr *Arg = TheCall->getArg(Arg: ArgNum);
7016 if (Arg->isTypeDependent() || Arg->isValueDependent())
7017 return false;
7018
7019 // Check constant-ness first.
7020 if (BuiltinConstantArg(TheCall, ArgNum, Result))
7021 return true;
7022
7023 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
7024 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
7025 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
7026 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
7027 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
7028 Result == 8/*ROUND_NO_EXC*/ ||
7029 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
7030 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
7031 return false;
7032
7033 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
7034 << Arg->getSourceRange();
7035}
7036
7037// Check if the gather/scatter scale is legal.
7038bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
7039 CallExpr *TheCall) {
7040 unsigned ArgNum = 0;
7041 switch (BuiltinID) {
7042 default:
7043 return false;
7044 case X86::BI__builtin_ia32_gatherpfdpd:
7045 case X86::BI__builtin_ia32_gatherpfdps:
7046 case X86::BI__builtin_ia32_gatherpfqpd:
7047 case X86::BI__builtin_ia32_gatherpfqps:
7048 case X86::BI__builtin_ia32_scatterpfdpd:
7049 case X86::BI__builtin_ia32_scatterpfdps:
7050 case X86::BI__builtin_ia32_scatterpfqpd:
7051 case X86::BI__builtin_ia32_scatterpfqps:
7052 ArgNum = 3;
7053 break;
7054 case X86::BI__builtin_ia32_gatherd_pd:
7055 case X86::BI__builtin_ia32_gatherd_pd256:
7056 case X86::BI__builtin_ia32_gatherq_pd:
7057 case X86::BI__builtin_ia32_gatherq_pd256:
7058 case X86::BI__builtin_ia32_gatherd_ps:
7059 case X86::BI__builtin_ia32_gatherd_ps256:
7060 case X86::BI__builtin_ia32_gatherq_ps:
7061 case X86::BI__builtin_ia32_gatherq_ps256:
7062 case X86::BI__builtin_ia32_gatherd_q:
7063 case X86::BI__builtin_ia32_gatherd_q256:
7064 case X86::BI__builtin_ia32_gatherq_q:
7065 case X86::BI__builtin_ia32_gatherq_q256:
7066 case X86::BI__builtin_ia32_gatherd_d:
7067 case X86::BI__builtin_ia32_gatherd_d256:
7068 case X86::BI__builtin_ia32_gatherq_d:
7069 case X86::BI__builtin_ia32_gatherq_d256:
7070 case X86::BI__builtin_ia32_gather3div2df:
7071 case X86::BI__builtin_ia32_gather3div2di:
7072 case X86::BI__builtin_ia32_gather3div4df:
7073 case X86::BI__builtin_ia32_gather3div4di:
7074 case X86::BI__builtin_ia32_gather3div4sf:
7075 case X86::BI__builtin_ia32_gather3div4si:
7076 case X86::BI__builtin_ia32_gather3div8sf:
7077 case X86::BI__builtin_ia32_gather3div8si:
7078 case X86::BI__builtin_ia32_gather3siv2df:
7079 case X86::BI__builtin_ia32_gather3siv2di:
7080 case X86::BI__builtin_ia32_gather3siv4df:
7081 case X86::BI__builtin_ia32_gather3siv4di:
7082 case X86::BI__builtin_ia32_gather3siv4sf:
7083 case X86::BI__builtin_ia32_gather3siv4si:
7084 case X86::BI__builtin_ia32_gather3siv8sf:
7085 case X86::BI__builtin_ia32_gather3siv8si:
7086 case X86::BI__builtin_ia32_gathersiv8df:
7087 case X86::BI__builtin_ia32_gathersiv16sf:
7088 case X86::BI__builtin_ia32_gatherdiv8df:
7089 case X86::BI__builtin_ia32_gatherdiv16sf:
7090 case X86::BI__builtin_ia32_gathersiv8di:
7091 case X86::BI__builtin_ia32_gathersiv16si:
7092 case X86::BI__builtin_ia32_gatherdiv8di:
7093 case X86::BI__builtin_ia32_gatherdiv16si:
7094 case X86::BI__builtin_ia32_scatterdiv2df:
7095 case X86::BI__builtin_ia32_scatterdiv2di:
7096 case X86::BI__builtin_ia32_scatterdiv4df:
7097 case X86::BI__builtin_ia32_scatterdiv4di:
7098 case X86::BI__builtin_ia32_scatterdiv4sf:
7099 case X86::BI__builtin_ia32_scatterdiv4si:
7100 case X86::BI__builtin_ia32_scatterdiv8sf:
7101 case X86::BI__builtin_ia32_scatterdiv8si:
7102 case X86::BI__builtin_ia32_scattersiv2df:
7103 case X86::BI__builtin_ia32_scattersiv2di:
7104 case X86::BI__builtin_ia32_scattersiv4df:
7105 case X86::BI__builtin_ia32_scattersiv4di:
7106 case X86::BI__builtin_ia32_scattersiv4sf:
7107 case X86::BI__builtin_ia32_scattersiv4si:
7108 case X86::BI__builtin_ia32_scattersiv8sf:
7109 case X86::BI__builtin_ia32_scattersiv8si:
7110 case X86::BI__builtin_ia32_scattersiv8df:
7111 case X86::BI__builtin_ia32_scattersiv16sf:
7112 case X86::BI__builtin_ia32_scatterdiv8df:
7113 case X86::BI__builtin_ia32_scatterdiv16sf:
7114 case X86::BI__builtin_ia32_scattersiv8di:
7115 case X86::BI__builtin_ia32_scattersiv16si:
7116 case X86::BI__builtin_ia32_scatterdiv8di:
7117 case X86::BI__builtin_ia32_scatterdiv16si:
7118 ArgNum = 4;
7119 break;
7120 }
7121
7122 llvm::APSInt Result;
7123
7124 // We can't check the value of a dependent argument.
7125 Expr *Arg = TheCall->getArg(Arg: ArgNum);
7126 if (Arg->isTypeDependent() || Arg->isValueDependent())
7127 return false;
7128
7129 // Check constant-ness first.
7130 if (BuiltinConstantArg(TheCall, ArgNum, Result))
7131 return true;
7132
7133 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
7134 return false;
7135
7136 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
7137 << Arg->getSourceRange();
7138}
7139
7140enum { TileRegLow = 0, TileRegHigh = 7 };
7141
7142bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
7143 ArrayRef<int> ArgNums) {
7144 for (int ArgNum : ArgNums) {
7145 if (BuiltinConstantArgRange(TheCall, ArgNum, Low: TileRegLow, High: TileRegHigh))
7146 return true;
7147 }
7148 return false;
7149}
7150
7151bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
7152 ArrayRef<int> ArgNums) {
7153 // Because the max number of tile register is TileRegHigh + 1, so here we use
7154 // each bit to represent the usage of them in bitset.
7155 std::bitset<TileRegHigh + 1> ArgValues;
7156 for (int ArgNum : ArgNums) {
7157 Expr *Arg = TheCall->getArg(Arg: ArgNum);
7158 if (Arg->isTypeDependent() || Arg->isValueDependent())
7159 continue;
7160
7161 llvm::APSInt Result;
7162 if (BuiltinConstantArg(TheCall, ArgNum, Result))
7163 return true;
7164 int ArgExtValue = Result.getExtValue();
7165 assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) &&
7166 "Incorrect tile register num.");
7167 if (ArgValues.test(ArgExtValue))
7168 return Diag(TheCall->getBeginLoc(),
7169 diag::err_x86_builtin_tile_arg_duplicate)
7170 << TheCall->getArg(ArgNum)->getSourceRange();
7171 ArgValues.set(position: ArgExtValue);
7172 }
7173 return false;
7174}
7175
7176bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
7177 ArrayRef<int> ArgNums) {
7178 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
7179 CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
7180}
7181
7182bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
7183 switch (BuiltinID) {
7184 default:
7185 return false;
7186 case X86::BI__builtin_ia32_tileloadd64:
7187 case X86::BI__builtin_ia32_tileloaddt164:
7188 case X86::BI__builtin_ia32_tilestored64:
7189 case X86::BI__builtin_ia32_tilezero:
7190 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums: 0);
7191 case X86::BI__builtin_ia32_tdpbssd:
7192 case X86::BI__builtin_ia32_tdpbsud:
7193 case X86::BI__builtin_ia32_tdpbusd:
7194 case X86::BI__builtin_ia32_tdpbuud:
7195 case X86::BI__builtin_ia32_tdpbf16ps:
7196 case X86::BI__builtin_ia32_tdpfp16ps:
7197 case X86::BI__builtin_ia32_tcmmimfp16ps:
7198 case X86::BI__builtin_ia32_tcmmrlfp16ps:
7199 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, ArgNums: {0, 1, 2});
7200 }
7201}
7202static bool isX86_32Builtin(unsigned BuiltinID) {
7203 // These builtins only work on x86-32 targets.
7204 switch (BuiltinID) {
7205 case X86::BI__builtin_ia32_readeflags_u32:
7206 case X86::BI__builtin_ia32_writeeflags_u32:
7207 return true;
7208 }
7209
7210 return false;
7211}
7212
7213bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
7214 CallExpr *TheCall) {
7215 // Check for 32-bit only builtins on a 64-bit target.
7216 const llvm::Triple &TT = TI.getTriple();
7217 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
7218 return Diag(TheCall->getCallee()->getBeginLoc(),
7219 diag::err_32_bit_builtin_64_bit_tgt);
7220
7221 // If the intrinsic has rounding or SAE make sure its valid.
7222 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
7223 return true;
7224
7225 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
7226 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
7227 return true;
7228
7229 // If the intrinsic has a tile arguments, make sure they are valid.
7230 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
7231 return true;
7232
7233 // For intrinsics which take an immediate value as part of the instruction,
7234 // range check them here.
7235 int i = 0, l = 0, u = 0;
7236 switch (BuiltinID) {
7237 default:
7238 return false;
7239 case X86::BI__builtin_ia32_vec_ext_v2si:
7240 case X86::BI__builtin_ia32_vec_ext_v2di:
7241 case X86::BI__builtin_ia32_vextractf128_pd256:
7242 case X86::BI__builtin_ia32_vextractf128_ps256:
7243 case X86::BI__builtin_ia32_vextractf128_si256:
7244 case X86::BI__builtin_ia32_extract128i256:
7245 case X86::BI__builtin_ia32_extractf64x4_mask:
7246 case X86::BI__builtin_ia32_extracti64x4_mask:
7247 case X86::BI__builtin_ia32_extractf32x8_mask:
7248 case X86::BI__builtin_ia32_extracti32x8_mask:
7249 case X86::BI__builtin_ia32_extractf64x2_256_mask:
7250 case X86::BI__builtin_ia32_extracti64x2_256_mask:
7251 case X86::BI__builtin_ia32_extractf32x4_256_mask:
7252 case X86::BI__builtin_ia32_extracti32x4_256_mask:
7253 i = 1; l = 0; u = 1;
7254 break;
7255 case X86::BI__builtin_ia32_vec_set_v2di:
7256 case X86::BI__builtin_ia32_vinsertf128_pd256:
7257 case X86::BI__builtin_ia32_vinsertf128_ps256:
7258 case X86::BI__builtin_ia32_vinsertf128_si256:
7259 case X86::BI__builtin_ia32_insert128i256:
7260 case X86::BI__builtin_ia32_insertf32x8:
7261 case X86::BI__builtin_ia32_inserti32x8:
7262 case X86::BI__builtin_ia32_insertf64x4:
7263 case X86::BI__builtin_ia32_inserti64x4:
7264 case X86::BI__builtin_ia32_insertf64x2_256:
7265 case X86::BI__builtin_ia32_inserti64x2_256:
7266 case X86::BI__builtin_ia32_insertf32x4_256:
7267 case X86::BI__builtin_ia32_inserti32x4_256:
7268 i = 2; l = 0; u = 1;
7269 break;
7270 case X86::BI__builtin_ia32_vpermilpd:
7271 case X86::BI__builtin_ia32_vec_ext_v4hi:
7272 case X86::BI__builtin_ia32_vec_ext_v4si:
7273 case X86::BI__builtin_ia32_vec_ext_v4sf:
7274 case X86::BI__builtin_ia32_vec_ext_v4di:
7275 case X86::BI__builtin_ia32_extractf32x4_mask:
7276 case X86::BI__builtin_ia32_extracti32x4_mask:
7277 case X86::BI__builtin_ia32_extractf64x2_512_mask:
7278 case X86::BI__builtin_ia32_extracti64x2_512_mask:
7279 i = 1; l = 0; u = 3;
7280 break;
7281 case X86::BI_mm_prefetch:
7282 case X86::BI__builtin_ia32_vec_ext_v8hi:
7283 case X86::BI__builtin_ia32_vec_ext_v8si:
7284 i = 1; l = 0; u = 7;
7285 break;
7286 case X86::BI__builtin_ia32_sha1rnds4:
7287 case X86::BI__builtin_ia32_blendpd:
7288 case X86::BI__builtin_ia32_shufpd:
7289 case X86::BI__builtin_ia32_vec_set_v4hi:
7290 case X86::BI__builtin_ia32_vec_set_v4si:
7291 case X86::BI__builtin_ia32_vec_set_v4di:
7292 case X86::BI__builtin_ia32_shuf_f32x4_256:
7293 case X86::BI__builtin_ia32_shuf_f64x2_256:
7294 case X86::BI__builtin_ia32_shuf_i32x4_256:
7295 case X86::BI__builtin_ia32_shuf_i64x2_256:
7296 case X86::BI__builtin_ia32_insertf64x2_512:
7297 case X86::BI__builtin_ia32_inserti64x2_512:
7298 case X86::BI__builtin_ia32_insertf32x4:
7299 case X86::BI__builtin_ia32_inserti32x4:
7300 i = 2; l = 0; u = 3;
7301 break;
7302 case X86::BI__builtin_ia32_vpermil2pd:
7303 case X86::BI__builtin_ia32_vpermil2pd256:
7304 case X86::BI__builtin_ia32_vpermil2ps:
7305 case X86::BI__builtin_ia32_vpermil2ps256:
7306 i = 3; l = 0; u = 3;
7307 break;
7308 case X86::BI__builtin_ia32_cmpb128_mask:
7309 case X86::BI__builtin_ia32_cmpw128_mask:
7310 case X86::BI__builtin_ia32_cmpd128_mask:
7311 case X86::BI__builtin_ia32_cmpq128_mask:
7312 case X86::BI__builtin_ia32_cmpb256_mask:
7313 case X86::BI__builtin_ia32_cmpw256_mask:
7314 case X86::BI__builtin_ia32_cmpd256_mask:
7315 case X86::BI__builtin_ia32_cmpq256_mask:
7316 case X86::BI__builtin_ia32_cmpb512_mask:
7317 case X86::BI__builtin_ia32_cmpw512_mask:
7318 case X86::BI__builtin_ia32_cmpd512_mask:
7319 case X86::BI__builtin_ia32_cmpq512_mask:
7320 case X86::BI__builtin_ia32_ucmpb128_mask:
7321 case X86::BI__builtin_ia32_ucmpw128_mask:
7322 case X86::BI__builtin_ia32_ucmpd128_mask:
7323 case X86::BI__builtin_ia32_ucmpq128_mask:
7324 case X86::BI__builtin_ia32_ucmpb256_mask:
7325 case X86::BI__builtin_ia32_ucmpw256_mask:
7326 case X86::BI__builtin_ia32_ucmpd256_mask:
7327 case X86::BI__builtin_ia32_ucmpq256_mask:
7328 case X86::BI__builtin_ia32_ucmpb512_mask:
7329 case X86::BI__builtin_ia32_ucmpw512_mask:
7330 case X86::BI__builtin_ia32_ucmpd512_mask:
7331 case X86::BI__builtin_ia32_ucmpq512_mask:
7332 case X86::BI__builtin_ia32_vpcomub:
7333 case X86::BI__builtin_ia32_vpcomuw:
7334 case X86::BI__builtin_ia32_vpcomud:
7335 case X86::BI__builtin_ia32_vpcomuq:
7336 case X86::BI__builtin_ia32_vpcomb:
7337 case X86::BI__builtin_ia32_vpcomw:
7338 case X86::BI__builtin_ia32_vpcomd:
7339 case X86::BI__builtin_ia32_vpcomq:
7340 case X86::BI__builtin_ia32_vec_set_v8hi:
7341 case X86::BI__builtin_ia32_vec_set_v8si:
7342 i = 2; l = 0; u = 7;
7343 break;
7344 case X86::BI__builtin_ia32_vpermilpd256:
7345 case X86::BI__builtin_ia32_roundps:
7346 case X86::BI__builtin_ia32_roundpd:
7347 case X86::BI__builtin_ia32_roundps256:
7348 case X86::BI__builtin_ia32_roundpd256:
7349 case X86::BI__builtin_ia32_getmantpd128_mask:
7350 case X86::BI__builtin_ia32_getmantpd256_mask:
7351 case X86::BI__builtin_ia32_getmantps128_mask:
7352 case X86::BI__builtin_ia32_getmantps256_mask:
7353 case X86::BI__builtin_ia32_getmantpd512_mask:
7354 case X86::BI__builtin_ia32_getmantps512_mask:
7355 case X86::BI__builtin_ia32_getmantph128_mask:
7356 case X86::BI__builtin_ia32_getmantph256_mask:
7357 case X86::BI__builtin_ia32_getmantph512_mask:
7358 case X86::BI__builtin_ia32_vec_ext_v16qi:
7359 case X86::BI__builtin_ia32_vec_ext_v16hi:
7360 i = 1; l = 0; u = 15;
7361 break;
7362 case X86::BI__builtin_ia32_pblendd128:
7363 case X86::BI__builtin_ia32_blendps:
7364 case X86::BI__builtin_ia32_blendpd256:
7365 case X86::BI__builtin_ia32_shufpd256:
7366 case X86::BI__builtin_ia32_roundss:
7367 case X86::BI__builtin_ia32_roundsd:
7368 case X86::BI__builtin_ia32_rangepd128_mask:
7369 case X86::BI__builtin_ia32_rangepd256_mask:
7370 case X86::BI__builtin_ia32_rangepd512_mask:
7371 case X86::BI__builtin_ia32_rangeps128_mask:
7372 case X86::BI__builtin_ia32_rangeps256_mask:
7373 case X86::BI__builtin_ia32_rangeps512_mask:
7374 case X86::BI__builtin_ia32_getmantsd_round_mask:
7375 case X86::BI__builtin_ia32_getmantss_round_mask:
7376 case X86::BI__builtin_ia32_getmantsh_round_mask:
7377 case X86::BI__builtin_ia32_vec_set_v16qi:
7378 case X86::BI__builtin_ia32_vec_set_v16hi:
7379 i = 2; l = 0; u = 15;
7380 break;
7381 case X86::BI__builtin_ia32_vec_ext_v32qi:
7382 i = 1; l = 0; u = 31;
7383 break;
7384 case X86::BI__builtin_ia32_cmpps:
7385 case X86::BI__builtin_ia32_cmpss:
7386 case X86::BI__builtin_ia32_cmppd:
7387 case X86::BI__builtin_ia32_cmpsd:
7388 case X86::BI__builtin_ia32_cmpps256:
7389 case X86::BI__builtin_ia32_cmppd256:
7390 case X86::BI__builtin_ia32_cmpps128_mask:
7391 case X86::BI__builtin_ia32_cmppd128_mask:
7392 case X86::BI__builtin_ia32_cmpps256_mask:
7393 case X86::BI__builtin_ia32_cmppd256_mask:
7394 case X86::BI__builtin_ia32_cmpps512_mask:
7395 case X86::BI__builtin_ia32_cmppd512_mask:
7396 case X86::BI__builtin_ia32_cmpsd_mask:
7397 case X86::BI__builtin_ia32_cmpss_mask:
7398 case X86::BI__builtin_ia32_vec_set_v32qi:
7399 i = 2; l = 0; u = 31;
7400 break;
7401 case X86::BI__builtin_ia32_permdf256:
7402 case X86::BI__builtin_ia32_permdi256:
7403 case X86::BI__builtin_ia32_permdf512:
7404 case X86::BI__builtin_ia32_permdi512:
7405 case X86::BI__builtin_ia32_vpermilps:
7406 case X86::BI__builtin_ia32_vpermilps256:
7407 case X86::BI__builtin_ia32_vpermilpd512:
7408 case X86::BI__builtin_ia32_vpermilps512:
7409 case X86::BI__builtin_ia32_pshufd:
7410 case X86::BI__builtin_ia32_pshufd256:
7411 case X86::BI__builtin_ia32_pshufd512:
7412 case X86::BI__builtin_ia32_pshufhw:
7413 case X86::BI__builtin_ia32_pshufhw256:
7414 case X86::BI__builtin_ia32_pshufhw512:
7415 case X86::BI__builtin_ia32_pshuflw:
7416 case X86::BI__builtin_ia32_pshuflw256:
7417 case X86::BI__builtin_ia32_pshuflw512:
7418 case X86::BI__builtin_ia32_vcvtps2ph:
7419 case X86::BI__builtin_ia32_vcvtps2ph_mask:
7420 case X86::BI__builtin_ia32_vcvtps2ph256:
7421 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
7422 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
7423 case X86::BI__builtin_ia32_rndscaleps_128_mask:
7424 case X86::BI__builtin_ia32_rndscalepd_128_mask:
7425 case X86::BI__builtin_ia32_rndscaleps_256_mask:
7426 case X86::BI__builtin_ia32_rndscalepd_256_mask:
7427 case X86::BI__builtin_ia32_rndscaleps_mask:
7428 case X86::BI__builtin_ia32_rndscalepd_mask:
7429 case X86::BI__builtin_ia32_rndscaleph_mask:
7430 case X86::BI__builtin_ia32_reducepd128_mask:
7431 case X86::BI__builtin_ia32_reducepd256_mask:
7432 case X86::BI__builtin_ia32_reducepd512_mask:
7433 case X86::BI__builtin_ia32_reduceps128_mask:
7434 case X86::BI__builtin_ia32_reduceps256_mask:
7435 case X86::BI__builtin_ia32_reduceps512_mask:
7436 case X86::BI__builtin_ia32_reduceph128_mask:
7437 case X86::BI__builtin_ia32_reduceph256_mask:
7438 case X86::BI__builtin_ia32_reduceph512_mask:
7439 case X86::BI__builtin_ia32_prold512:
7440 case X86::BI__builtin_ia32_prolq512:
7441 case X86::BI__builtin_ia32_prold128:
7442 case X86::BI__builtin_ia32_prold256:
7443 case X86::BI__builtin_ia32_prolq128:
7444 case X86::BI__builtin_ia32_prolq256:
7445 case X86::BI__builtin_ia32_prord512:
7446 case X86::BI__builtin_ia32_prorq512:
7447 case X86::BI__builtin_ia32_prord128:
7448 case X86::BI__builtin_ia32_prord256:
7449 case X86::BI__builtin_ia32_prorq128:
7450 case X86::BI__builtin_ia32_prorq256:
7451 case X86::BI__builtin_ia32_fpclasspd128_mask:
7452 case X86::BI__builtin_ia32_fpclasspd256_mask:
7453 case X86::BI__builtin_ia32_fpclassps128_mask:
7454 case X86::BI__builtin_ia32_fpclassps256_mask:
7455 case X86::BI__builtin_ia32_fpclassps512_mask:
7456 case X86::BI__builtin_ia32_fpclasspd512_mask:
7457 case X86::BI__builtin_ia32_fpclassph128_mask:
7458 case X86::BI__builtin_ia32_fpclassph256_mask:
7459 case X86::BI__builtin_ia32_fpclassph512_mask:
7460 case X86::BI__builtin_ia32_fpclasssd_mask:
7461 case X86::BI__builtin_ia32_fpclassss_mask:
7462 case X86::BI__builtin_ia32_fpclasssh_mask:
7463 case X86::BI__builtin_ia32_pslldqi128_byteshift:
7464 case X86::BI__builtin_ia32_pslldqi256_byteshift:
7465 case X86::BI__builtin_ia32_pslldqi512_byteshift:
7466 case X86::BI__builtin_ia32_psrldqi128_byteshift:
7467 case X86::BI__builtin_ia32_psrldqi256_byteshift:
7468 case X86::BI__builtin_ia32_psrldqi512_byteshift:
7469 case X86::BI__builtin_ia32_kshiftliqi:
7470 case X86::BI__builtin_ia32_kshiftlihi:
7471 case X86::BI__builtin_ia32_kshiftlisi:
7472 case X86::BI__builtin_ia32_kshiftlidi:
7473 case X86::BI__builtin_ia32_kshiftriqi:
7474 case X86::BI__builtin_ia32_kshiftrihi:
7475 case X86::BI__builtin_ia32_kshiftrisi:
7476 case X86::BI__builtin_ia32_kshiftridi:
7477 i = 1; l = 0; u = 255;
7478 break;
7479 case X86::BI__builtin_ia32_vperm2f128_pd256:
7480 case X86::BI__builtin_ia32_vperm2f128_ps256:
7481 case X86::BI__builtin_ia32_vperm2f128_si256:
7482 case X86::BI__builtin_ia32_permti256:
7483 case X86::BI__builtin_ia32_pblendw128:
7484 case X86::BI__builtin_ia32_pblendw256:
7485 case X86::BI__builtin_ia32_blendps256:
7486 case X86::BI__builtin_ia32_pblendd256:
7487 case X86::BI__builtin_ia32_palignr128:
7488 case X86::BI__builtin_ia32_palignr256:
7489 case X86::BI__builtin_ia32_palignr512:
7490 case X86::BI__builtin_ia32_alignq512:
7491 case X86::BI__builtin_ia32_alignd512:
7492 case X86::BI__builtin_ia32_alignd128:
7493 case X86::BI__builtin_ia32_alignd256:
7494 case X86::BI__builtin_ia32_alignq128:
7495 case X86::BI__builtin_ia32_alignq256:
7496 case X86::BI__builtin_ia32_vcomisd:
7497 case X86::BI__builtin_ia32_vcomiss:
7498 case X86::BI__builtin_ia32_shuf_f32x4:
7499 case X86::BI__builtin_ia32_shuf_f64x2:
7500 case X86::BI__builtin_ia32_shuf_i32x4:
7501 case X86::BI__builtin_ia32_shuf_i64x2:
7502 case X86::BI__builtin_ia32_shufpd512:
7503 case X86::BI__builtin_ia32_shufps:
7504 case X86::BI__builtin_ia32_shufps256:
7505 case X86::BI__builtin_ia32_shufps512:
7506 case X86::BI__builtin_ia32_dbpsadbw128:
7507 case X86::BI__builtin_ia32_dbpsadbw256:
7508 case X86::BI__builtin_ia32_dbpsadbw512:
7509 case X86::BI__builtin_ia32_vpshldd128:
7510 case X86::BI__builtin_ia32_vpshldd256:
7511 case X86::BI__builtin_ia32_vpshldd512:
7512 case X86::BI__builtin_ia32_vpshldq128:
7513 case X86::BI__builtin_ia32_vpshldq256:
7514 case X86::BI__builtin_ia32_vpshldq512:
7515 case X86::BI__builtin_ia32_vpshldw128:
7516 case X86::BI__builtin_ia32_vpshldw256:
7517 case X86::BI__builtin_ia32_vpshldw512:
7518 case X86::BI__builtin_ia32_vpshrdd128:
7519 case X86::BI__builtin_ia32_vpshrdd256:
7520 case X86::BI__builtin_ia32_vpshrdd512:
7521 case X86::BI__builtin_ia32_vpshrdq128:
7522 case X86::BI__builtin_ia32_vpshrdq256:
7523 case X86::BI__builtin_ia32_vpshrdq512:
7524 case X86::BI__builtin_ia32_vpshrdw128:
7525 case X86::BI__builtin_ia32_vpshrdw256:
7526 case X86::BI__builtin_ia32_vpshrdw512:
7527 i = 2; l = 0; u = 255;
7528 break;
7529 case X86::BI__builtin_ia32_fixupimmpd512_mask:
7530 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
7531 case X86::BI__builtin_ia32_fixupimmps512_mask:
7532 case X86::BI__builtin_ia32_fixupimmps512_maskz:
7533 case X86::BI__builtin_ia32_fixupimmsd_mask:
7534 case X86::BI__builtin_ia32_fixupimmsd_maskz:
7535 case X86::BI__builtin_ia32_fixupimmss_mask:
7536 case X86::BI__builtin_ia32_fixupimmss_maskz:
7537 case X86::BI__builtin_ia32_fixupimmpd128_mask:
7538 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
7539 case X86::BI__builtin_ia32_fixupimmpd256_mask:
7540 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
7541 case X86::BI__builtin_ia32_fixupimmps128_mask:
7542 case X86::BI__builtin_ia32_fixupimmps128_maskz:
7543 case X86::BI__builtin_ia32_fixupimmps256_mask:
7544 case X86::BI__builtin_ia32_fixupimmps256_maskz:
7545 case X86::BI__builtin_ia32_pternlogd512_mask:
7546 case X86::BI__builtin_ia32_pternlogd512_maskz:
7547 case X86::BI__builtin_ia32_pternlogq512_mask:
7548 case X86::BI__builtin_ia32_pternlogq512_maskz:
7549 case X86::BI__builtin_ia32_pternlogd128_mask:
7550 case X86::BI__builtin_ia32_pternlogd128_maskz:
7551 case X86::BI__builtin_ia32_pternlogd256_mask:
7552 case X86::BI__builtin_ia32_pternlogd256_maskz:
7553 case X86::BI__builtin_ia32_pternlogq128_mask:
7554 case X86::BI__builtin_ia32_pternlogq128_maskz:
7555 case X86::BI__builtin_ia32_pternlogq256_mask:
7556 case X86::BI__builtin_ia32_pternlogq256_maskz:
7557 case X86::BI__builtin_ia32_vsm3rnds2:
7558 i = 3; l = 0; u = 255;
7559 break;
7560 case X86::BI__builtin_ia32_gatherpfdpd:
7561 case X86::BI__builtin_ia32_gatherpfdps:
7562 case X86::BI__builtin_ia32_gatherpfqpd:
7563 case X86::BI__builtin_ia32_gatherpfqps:
7564 case X86::BI__builtin_ia32_scatterpfdpd:
7565 case X86::BI__builtin_ia32_scatterpfdps:
7566 case X86::BI__builtin_ia32_scatterpfqpd:
7567 case X86::BI__builtin_ia32_scatterpfqps:
7568 i = 4; l = 2; u = 3;
7569 break;
7570 case X86::BI__builtin_ia32_reducesd_mask:
7571 case X86::BI__builtin_ia32_reducess_mask:
7572 case X86::BI__builtin_ia32_rndscalesd_round_mask:
7573 case X86::BI__builtin_ia32_rndscaless_round_mask:
7574 case X86::BI__builtin_ia32_rndscalesh_round_mask:
7575 case X86::BI__builtin_ia32_reducesh_mask:
7576 i = 4; l = 0; u = 255;
7577 break;
7578 case X86::BI__builtin_ia32_cmpccxadd32:
7579 case X86::BI__builtin_ia32_cmpccxadd64:
7580 i = 3; l = 0; u = 15;
7581 break;
7582 }
7583
7584 // Note that we don't force a hard error on the range check here, allowing
7585 // template-generated or macro-generated dead code to potentially have out-of-
7586 // range values. These need to code generate, but don't need to necessarily
7587 // make any sense. We use a warning that defaults to an error.
7588 return BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u, /*RangeIsError*/ false);
7589}
7590
7591/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
7592/// parameter with the FormatAttr's correct format_idx and firstDataArg.
7593/// Returns true when the format fits the function and the FormatStringInfo has
7594/// been populated.
7595bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
7596 bool IsVariadic, FormatStringInfo *FSI) {
7597 if (Format->getFirstArg() == 0)
7598 FSI->ArgPassingKind = FAPK_VAList;
7599 else if (IsVariadic)
7600 FSI->ArgPassingKind = FAPK_Variadic;
7601 else
7602 FSI->ArgPassingKind = FAPK_Fixed;
7603 FSI->FormatIdx = Format->getFormatIdx() - 1;
7604 FSI->FirstDataArg =
7605 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1;
7606
7607 // The way the format attribute works in GCC, the implicit this argument
7608 // of member functions is counted. However, it doesn't appear in our own
7609 // lists, so decrement format_idx in that case.
7610 if (IsCXXMember) {
7611 if(FSI->FormatIdx == 0)
7612 return false;
7613 --FSI->FormatIdx;
7614 if (FSI->FirstDataArg != 0)
7615 --FSI->FirstDataArg;
7616 }
7617 return true;
7618}
7619
7620/// Checks if a the given expression evaluates to null.
7621///
7622/// Returns true if the value evaluates to null.
7623static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
7624 // Treat (smart) pointers constructed from nullptr as null, whether we can
7625 // const-evaluate them or not.
7626 // This must happen first: the smart pointer expr might have _Nonnull type!
7627 if (isa<CXXNullPtrLiteralExpr>(
7628 Val: IgnoreExprNodes(E: Expr, Fns&: IgnoreImplicitAsWrittenSingleStep,
7629 Fns&: IgnoreElidableImplicitConstructorSingleStep)))
7630 return true;
7631
7632 // If the expression has non-null type, it doesn't evaluate to null.
7633 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) {
7634 if (*nullability == NullabilityKind::NonNull)
7635 return false;
7636 }
7637
7638 // As a special case, transparent unions initialized with zero are
7639 // considered null for the purposes of the nonnull attribute.
7640 if (const RecordType *UT = Expr->getType()->getAsUnionType();
7641 UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
7642 if (const auto *CLE = dyn_cast<CompoundLiteralExpr>(Val: Expr))
7643 if (const auto *ILE = dyn_cast<InitListExpr>(Val: CLE->getInitializer()))
7644 Expr = ILE->getInit(Init: 0);
7645 }
7646
7647 bool Result;
7648 return (!Expr->isValueDependent() &&
7649 Expr->EvaluateAsBooleanCondition(Result, Ctx: S.Context) &&
7650 !Result);
7651}
7652
7653static void CheckNonNullArgument(Sema &S,
7654 const Expr *ArgExpr,
7655 SourceLocation CallSiteLoc) {
7656 if (CheckNonNullExpr(S, ArgExpr))
7657 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
7658 S.PDiag(diag::warn_null_arg)
7659 << ArgExpr->getSourceRange());
7660}
7661
7662bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
7663 FormatStringInfo FSI;
7664 if ((GetFormatStringType(Format) == FST_NSString) &&
7665 getFormatStringInfo(Format, IsCXXMember: false, IsVariadic: true, FSI: &FSI)) {
7666 Idx = FSI.FormatIdx;
7667 return true;
7668 }
7669 return false;
7670}
7671
7672/// Diagnose use of %s directive in an NSString which is being passed
7673/// as formatting string to formatting method.
7674static void
7675DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
7676 const NamedDecl *FDecl,
7677 Expr **Args,
7678 unsigned NumArgs) {
7679 unsigned Idx = 0;
7680 bool Format = false;
7681 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
7682 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
7683 Idx = 2;
7684 Format = true;
7685 }
7686 else
7687 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
7688 if (S.GetFormatNSStringIdx(I, Idx)) {
7689 Format = true;
7690 break;
7691 }
7692 }
7693 if (!Format || NumArgs <= Idx)
7694 return;
7695 const Expr *FormatExpr = Args[Idx];
7696 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(Val: FormatExpr))
7697 FormatExpr = CSCE->getSubExpr();
7698 const StringLiteral *FormatString;
7699 if (const ObjCStringLiteral *OSL =
7700 dyn_cast<ObjCStringLiteral>(Val: FormatExpr->IgnoreParenImpCasts()))
7701 FormatString = OSL->getString();
7702 else
7703 FormatString = dyn_cast<StringLiteral>(Val: FormatExpr->IgnoreParenImpCasts());
7704 if (!FormatString)
7705 return;
7706 if (S.FormatStringHasSArg(FExpr: FormatString)) {
7707 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
7708 << "%s" << 1 << 1;
7709 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
7710 << FDecl->getDeclName();
7711 }
7712}
7713
7714/// Determine whether the given type has a non-null nullability annotation.
7715static bool isNonNullType(QualType type) {
7716 if (auto nullability = type->getNullability())
7717 return *nullability == NullabilityKind::NonNull;
7718
7719 return false;
7720}
7721
7722static void CheckNonNullArguments(Sema &S,
7723 const NamedDecl *FDecl,
7724 const FunctionProtoType *Proto,
7725 ArrayRef<const Expr *> Args,
7726 SourceLocation CallSiteLoc) {
7727 assert((FDecl || Proto) && "Need a function declaration or prototype");
7728
7729 // Already checked by constant evaluator.
7730 if (S.isConstantEvaluatedContext())
7731 return;
7732 // Check the attributes attached to the method/function itself.
7733 llvm::SmallBitVector NonNullArgs;
7734 if (FDecl) {
7735 // Handle the nonnull attribute on the function/method declaration itself.
7736 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
7737 if (!NonNull->args_size()) {
7738 // Easy case: all pointer arguments are nonnull.
7739 for (const auto *Arg : Args)
7740 if (S.isValidPointerAttrType(Arg->getType()))
7741 CheckNonNullArgument(S, Arg, CallSiteLoc);
7742 return;
7743 }
7744
7745 for (const ParamIdx &Idx : NonNull->args()) {
7746 unsigned IdxAST = Idx.getASTIndex();
7747 if (IdxAST >= Args.size())
7748 continue;
7749 if (NonNullArgs.empty())
7750 NonNullArgs.resize(Args.size());
7751 NonNullArgs.set(IdxAST);
7752 }
7753 }
7754 }
7755
7756 if (FDecl && (isa<FunctionDecl>(Val: FDecl) || isa<ObjCMethodDecl>(Val: FDecl))) {
7757 // Handle the nonnull attribute on the parameters of the
7758 // function/method.
7759 ArrayRef<ParmVarDecl*> parms;
7760 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: FDecl))
7761 parms = FD->parameters();
7762 else
7763 parms = cast<ObjCMethodDecl>(Val: FDecl)->parameters();
7764
7765 unsigned ParamIndex = 0;
7766 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
7767 I != E; ++I, ++ParamIndex) {
7768 const ParmVarDecl *PVD = *I;
7769 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) {
7770 if (NonNullArgs.empty())
7771 NonNullArgs.resize(N: Args.size());
7772
7773 NonNullArgs.set(ParamIndex);
7774 }
7775 }
7776 } else {
7777 // If we have a non-function, non-method declaration but no
7778 // function prototype, try to dig out the function prototype.
7779 if (!Proto) {
7780 if (const ValueDecl *VD = dyn_cast<ValueDecl>(Val: FDecl)) {
7781 QualType type = VD->getType().getNonReferenceType();
7782 if (auto pointerType = type->getAs<PointerType>())
7783 type = pointerType->getPointeeType();
7784 else if (auto blockType = type->getAs<BlockPointerType>())
7785 type = blockType->getPointeeType();
7786 // FIXME: data member pointers?
7787
7788 // Dig out the function prototype, if there is one.
7789 Proto = type->getAs<FunctionProtoType>();
7790 }
7791 }
7792
7793 // Fill in non-null argument information from the nullability
7794 // information on the parameter types (if we have them).
7795 if (Proto) {
7796 unsigned Index = 0;
7797 for (auto paramType : Proto->getParamTypes()) {
7798 if (isNonNullType(type: paramType)) {
7799 if (NonNullArgs.empty())
7800 NonNullArgs.resize(N: Args.size());
7801
7802 NonNullArgs.set(Index);
7803 }
7804
7805 ++Index;
7806 }
7807 }
7808 }
7809
7810 // Check for non-null arguments.
7811 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
7812 ArgIndex != ArgIndexEnd; ++ArgIndex) {
7813 if (NonNullArgs[ArgIndex])
7814 CheckNonNullArgument(S, ArgExpr: Args[ArgIndex], CallSiteLoc: Args[ArgIndex]->getExprLoc());
7815 }
7816}
7817
7818// 16 byte ByVal alignment not due to a vector member is not honoured by XL
7819// on AIX. Emit a warning here that users are generating binary incompatible
7820// code to be safe.
7821// Here we try to get information about the alignment of the struct member
7822// from the struct passed to the caller function. We only warn when the struct
7823// is passed byval, hence the series of checks and early returns if we are a not
7824// passing a struct byval.
7825void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
7826 const auto *ICE = dyn_cast<ImplicitCastExpr>(Val: Arg->IgnoreParens());
7827 if (!ICE)
7828 return;
7829
7830 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
7831 if (!DR)
7832 return;
7833
7834 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl());
7835 if (!PD || !PD->getType()->isRecordType())
7836 return;
7837
7838 QualType ArgType = Arg->getType();
7839 for (const FieldDecl *FD :
7840 ArgType->castAs<RecordType>()->getDecl()->fields()) {
7841 if (const auto *AA = FD->getAttr<AlignedAttr>()) {
7842 CharUnits Alignment =
7843 Context.toCharUnitsFromBits(BitSize: AA->getAlignment(Context));
7844 if (Alignment.getQuantity() == 16) {
7845 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
7846 Diag(Loc, diag::note_misaligned_member_used_here) << PD;
7847 }
7848 }
7849 }
7850}
7851
7852/// Warn if a pointer or reference argument passed to a function points to an
7853/// object that is less aligned than the parameter. This can happen when
7854/// creating a typedef with a lower alignment than the original type and then
7855/// calling functions defined in terms of the original type.
7856void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
7857 StringRef ParamName, QualType ArgTy,
7858 QualType ParamTy) {
7859
7860 // If a function accepts a pointer or reference type
7861 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
7862 return;
7863
7864 // If the parameter is a pointer type, get the pointee type for the
7865 // argument too. If the parameter is a reference type, don't try to get
7866 // the pointee type for the argument.
7867 if (ParamTy->isPointerType())
7868 ArgTy = ArgTy->getPointeeType();
7869
7870 // Remove reference or pointer
7871 ParamTy = ParamTy->getPointeeType();
7872
7873 // Find expected alignment, and the actual alignment of the passed object.
7874 // getTypeAlignInChars requires complete types
7875 if (ArgTy.isNull() || ParamTy->isDependentType() ||
7876 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() ||
7877 ParamTy->isUndeducedType() || ArgTy->isUndeducedType())
7878 return;
7879
7880 CharUnits ParamAlign = Context.getTypeAlignInChars(T: ParamTy);
7881 CharUnits ArgAlign = Context.getTypeAlignInChars(T: ArgTy);
7882
7883 // If the argument is less aligned than the parameter, there is a
7884 // potential alignment issue.
7885 if (ArgAlign < ParamAlign)
7886 Diag(Loc, diag::warn_param_mismatched_alignment)
7887 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
7888 << ParamName << (FDecl != nullptr) << FDecl;
7889}
7890
7891/// Handles the checks for format strings, non-POD arguments to vararg
7892/// functions, NULL arguments passed to non-NULL parameters, diagnose_if
7893/// attributes and AArch64 SME attributes.
7894void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
7895 const Expr *ThisArg, ArrayRef<const Expr *> Args,
7896 bool IsMemberFunction, SourceLocation Loc,
7897 SourceRange Range, VariadicCallType CallType) {
7898 // FIXME: We should check as much as we can in the template definition.
7899 if (CurContext->isDependentContext())
7900 return;
7901
7902 // Printf and scanf checking.
7903 llvm::SmallBitVector CheckedVarArgs;
7904 if (FDecl) {
7905 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
7906 // Only create vector if there are format attributes.
7907 CheckedVarArgs.resize(Args.size());
7908
7909 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
7910 CheckedVarArgs);
7911 }
7912 }
7913
7914 // Refuse POD arguments that weren't caught by the format string
7915 // checks above.
7916 auto *FD = dyn_cast_or_null<FunctionDecl>(Val: FDecl);
7917 if (CallType != VariadicDoesNotApply &&
7918 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
7919 unsigned NumParams = Proto ? Proto->getNumParams()
7920 : FDecl && isa<FunctionDecl>(Val: FDecl)
7921 ? cast<FunctionDecl>(Val: FDecl)->getNumParams()
7922 : FDecl && isa<ObjCMethodDecl>(Val: FDecl)
7923 ? cast<ObjCMethodDecl>(Val: FDecl)->param_size()
7924 : 0;
7925
7926 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
7927 // Args[ArgIdx] can be null in malformed code.
7928 if (const Expr *Arg = Args[ArgIdx]) {
7929 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
7930 checkVariadicArgument(E: Arg, CT: CallType);
7931 }
7932 }
7933 }
7934
7935 if (FDecl || Proto) {
7936 CheckNonNullArguments(S&: *this, FDecl, Proto, Args, CallSiteLoc: Loc);
7937
7938 // Type safety checking.
7939 if (FDecl) {
7940 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
7941 CheckArgumentWithTypeTag(I, Args, Loc);
7942 }
7943 }
7944
7945 // Check that passed arguments match the alignment of original arguments.
7946 // Try to get the missing prototype from the declaration.
7947 if (!Proto && FDecl) {
7948 const auto *FT = FDecl->getFunctionType();
7949 if (isa_and_nonnull<FunctionProtoType>(FT))
7950 Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
7951 }
7952 if (Proto) {
7953 // For variadic functions, we may have more args than parameters.
7954 // For some K&R functions, we may have less args than parameters.
7955 const auto N = std::min<unsigned>(a: Proto->getNumParams(), b: Args.size());
7956 bool IsScalableRet = Proto->getReturnType()->isSizelessVectorType();
7957 bool IsScalableArg = false;
7958 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
7959 // Args[ArgIdx] can be null in malformed code.
7960 if (const Expr *Arg = Args[ArgIdx]) {
7961 if (Arg->containsErrors())
7962 continue;
7963
7964 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg &&
7965 FDecl->hasLinkage() &&
7966 FDecl->getFormalLinkage() != Linkage::Internal &&
7967 CallType == VariadicDoesNotApply)
7968 checkAIXMemberAlignment(Loc: (Arg->getExprLoc()), Arg);
7969
7970 QualType ParamTy = Proto->getParamType(i: ArgIdx);
7971 if (ParamTy->isSizelessVectorType())
7972 IsScalableArg = true;
7973 QualType ArgTy = Arg->getType();
7974 CheckArgAlignment(Loc: Arg->getExprLoc(), FDecl, ParamName: std::to_string(val: ArgIdx + 1),
7975 ArgTy, ParamTy);
7976 }
7977 }
7978
7979 // If the callee has an AArch64 SME attribute to indicate that it is an
7980 // __arm_streaming function, then the caller requires SME to be available.
7981 FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
7982 if (ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask) {
7983 if (auto *CallerFD = dyn_cast<FunctionDecl>(Val: CurContext)) {
7984 llvm::StringMap<bool> CallerFeatureMap;
7985 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, CallerFD);
7986 if (!CallerFeatureMap.contains("sme"))
7987 Diag(Loc, diag::err_sme_call_in_non_sme_target);
7988 } else if (!Context.getTargetInfo().hasFeature(Feature: "sme")) {
7989 Diag(Loc, diag::err_sme_call_in_non_sme_target);
7990 }
7991 }
7992
7993 // If the call requires a streaming-mode change and has scalable vector
7994 // arguments or return values, then warn the user that the streaming and
7995 // non-streaming vector lengths may be different.
7996 const auto *CallerFD = dyn_cast<FunctionDecl>(Val: CurContext);
7997 if (CallerFD && (!FD || !FD->getBuiltinID()) &&
7998 (IsScalableArg || IsScalableRet)) {
7999 bool IsCalleeStreaming =
8000 ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask;
8001 bool IsCalleeStreamingCompatible =
8002 ExtInfo.AArch64SMEAttributes &
8003 FunctionType::SME_PStateSMCompatibleMask;
8004 ArmStreamingType CallerFnType = getArmStreamingFnType(FD: CallerFD);
8005 if (!IsCalleeStreamingCompatible &&
8006 (CallerFnType == ArmStreamingCompatible ||
8007 ((CallerFnType == ArmStreaming) ^ IsCalleeStreaming))) {
8008 if (IsScalableArg)
8009 Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
8010 << /*IsArg=*/true;
8011 if (IsScalableRet)
8012 Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
8013 << /*IsArg=*/false;
8014 }
8015 }
8016
8017 FunctionType::ArmStateValue CalleeArmZAState =
8018 FunctionType::getArmZAState(AttrBits: ExtInfo.AArch64SMEAttributes);
8019 FunctionType::ArmStateValue CalleeArmZT0State =
8020 FunctionType::getArmZT0State(AttrBits: ExtInfo.AArch64SMEAttributes);
8021 if (CalleeArmZAState != FunctionType::ARM_None ||
8022 CalleeArmZT0State != FunctionType::ARM_None) {
8023 bool CallerHasZAState = false;
8024 bool CallerHasZT0State = false;
8025 if (CallerFD) {
8026 auto *Attr = CallerFD->getAttr<ArmNewAttr>();
8027 if (Attr && Attr->isNewZA())
8028 CallerHasZAState = true;
8029 if (Attr && Attr->isNewZT0())
8030 CallerHasZT0State = true;
8031 if (const auto *FPT = CallerFD->getType()->getAs<FunctionProtoType>()) {
8032 CallerHasZAState |=
8033 FunctionType::getArmZAState(
8034 AttrBits: FPT->getExtProtoInfo().AArch64SMEAttributes) !=
8035 FunctionType::ARM_None;
8036 CallerHasZT0State |=
8037 FunctionType::getArmZT0State(
8038 AttrBits: FPT->getExtProtoInfo().AArch64SMEAttributes) !=
8039 FunctionType::ARM_None;
8040 }
8041 }
8042
8043 if (CalleeArmZAState != FunctionType::ARM_None && !CallerHasZAState)
8044 Diag(Loc, diag::err_sme_za_call_no_za_state);
8045
8046 if (CalleeArmZT0State != FunctionType::ARM_None && !CallerHasZT0State)
8047 Diag(Loc, diag::err_sme_zt0_call_no_zt0_state);
8048
8049 if (CallerHasZAState && CalleeArmZAState == FunctionType::ARM_None &&
8050 CalleeArmZT0State != FunctionType::ARM_None) {
8051 Diag(Loc, diag::err_sme_unimplemented_za_save_restore);
8052 Diag(Loc, diag::note_sme_use_preserves_za);
8053 }
8054 }
8055 }
8056
8057 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
8058 auto *AA = FDecl->getAttr<AllocAlignAttr>();
8059 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
8060 if (!Arg->isValueDependent()) {
8061 Expr::EvalResult Align;
8062 if (Arg->EvaluateAsInt(Result&: Align, Ctx: Context)) {
8063 const llvm::APSInt &I = Align.Val.getInt();
8064 if (!I.isPowerOf2())
8065 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
8066 << Arg->getSourceRange();
8067
8068 if (I > Sema::MaximumAlignment)
8069 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
8070 << Arg->getSourceRange() << Sema::MaximumAlignment;
8071 }
8072 }
8073 }
8074
8075 if (FD)
8076 diagnoseArgDependentDiagnoseIfAttrs(Function: FD, ThisArg, Args, Loc);
8077}
8078
8079/// CheckConstructorCall - Check a constructor call for correctness and safety
8080/// properties not enforced by the C type system.
8081void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
8082 ArrayRef<const Expr *> Args,
8083 const FunctionProtoType *Proto,
8084 SourceLocation Loc) {
8085 VariadicCallType CallType =
8086 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
8087
8088 auto *Ctor = cast<CXXConstructorDecl>(Val: FDecl);
8089 CheckArgAlignment(
8090 Loc, FDecl, ParamName: "'this'", ArgTy: Context.getPointerType(T: ThisType),
8091 ParamTy: Context.getPointerType(Ctor->getFunctionObjectParameterType()));
8092
8093 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
8094 Loc, SourceRange(), CallType);
8095}
8096
8097/// CheckFunctionCall - Check a direct function call for various correctness
8098/// and safety properties not strictly enforced by the C type system.
8099bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
8100 const FunctionProtoType *Proto) {
8101 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(Val: TheCall) &&
8102 isa<CXXMethodDecl>(Val: FDecl);
8103 bool IsMemberFunction = isa<CXXMemberCallExpr>(Val: TheCall) ||
8104 IsMemberOperatorCall;
8105 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
8106 Fn: TheCall->getCallee());
8107 Expr** Args = TheCall->getArgs();
8108 unsigned NumArgs = TheCall->getNumArgs();
8109
8110 Expr *ImplicitThis = nullptr;
8111 if (IsMemberOperatorCall && !FDecl->hasCXXExplicitFunctionObjectParameter()) {
8112 // If this is a call to a member operator, hide the first
8113 // argument from checkCall.
8114 // FIXME: Our choice of AST representation here is less than ideal.
8115 ImplicitThis = Args[0];
8116 ++Args;
8117 --NumArgs;
8118 } else if (IsMemberFunction && !FDecl->isStatic() &&
8119 !FDecl->hasCXXExplicitFunctionObjectParameter())
8120 ImplicitThis =
8121 cast<CXXMemberCallExpr>(Val: TheCall)->getImplicitObjectArgument();
8122
8123 if (ImplicitThis) {
8124 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
8125 // used.
8126 QualType ThisType = ImplicitThis->getType();
8127 if (!ThisType->isPointerType()) {
8128 assert(!ThisType->isReferenceType());
8129 ThisType = Context.getPointerType(T: ThisType);
8130 }
8131
8132 QualType ThisTypeFromDecl = Context.getPointerType(
8133 T: cast<CXXMethodDecl>(Val: FDecl)->getFunctionObjectParameterType());
8134
8135 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
8136 ThisTypeFromDecl);
8137 }
8138
8139 checkCall(FDecl, Proto, ThisArg: ImplicitThis, Args: llvm::ArrayRef(Args, NumArgs),
8140 IsMemberFunction, Loc: TheCall->getRParenLoc(),
8141 Range: TheCall->getCallee()->getSourceRange(), CallType);
8142
8143 IdentifierInfo *FnInfo = FDecl->getIdentifier();
8144 // None of the checks below are needed for functions that don't have
8145 // simple names (e.g., C++ conversion functions).
8146 if (!FnInfo)
8147 return false;
8148
8149 // Enforce TCB except for builtin calls, which are always allowed.
8150 if (FDecl->getBuiltinID() == 0)
8151 CheckTCBEnforcement(CallExprLoc: TheCall->getExprLoc(), Callee: FDecl);
8152
8153 CheckAbsoluteValueFunction(Call: TheCall, FDecl);
8154 CheckMaxUnsignedZero(Call: TheCall, FDecl);
8155 CheckInfNaNFunction(Call: TheCall, FDecl);
8156
8157 if (getLangOpts().ObjC)
8158 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
8159
8160 unsigned CMId = FDecl->getMemoryFunctionKind();
8161
8162 // Handle memory setting and copying functions.
8163 switch (CMId) {
8164 case 0:
8165 return false;
8166 case Builtin::BIstrlcpy: // fallthrough
8167 case Builtin::BIstrlcat:
8168 CheckStrlcpycatArguments(Call: TheCall, FnName: FnInfo);
8169 break;
8170 case Builtin::BIstrncat:
8171 CheckStrncatArguments(Call: TheCall, FnName: FnInfo);
8172 break;
8173 case Builtin::BIfree:
8174 CheckFreeArguments(E: TheCall);
8175 break;
8176 default:
8177 CheckMemaccessArguments(Call: TheCall, BId: CMId, FnName: FnInfo);
8178 }
8179
8180 return false;
8181}
8182
8183bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
8184 ArrayRef<const Expr *> Args) {
8185 VariadicCallType CallType =
8186 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
8187
8188 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
8189 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
8190 CallType);
8191
8192 CheckTCBEnforcement(lbrac, Method);
8193
8194 return false;
8195}
8196
8197bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
8198 const FunctionProtoType *Proto) {
8199 QualType Ty;
8200 if (const auto *V = dyn_cast<VarDecl>(Val: NDecl))
8201 Ty = V->getType().getNonReferenceType();
8202 else if (const auto *F = dyn_cast<FieldDecl>(Val: NDecl))
8203 Ty = F->getType().getNonReferenceType();
8204 else
8205 return false;
8206
8207 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
8208 !Ty->isFunctionProtoType())
8209 return false;
8210
8211 VariadicCallType CallType;
8212 if (!Proto || !Proto->isVariadic()) {
8213 CallType = VariadicDoesNotApply;
8214 } else if (Ty->isBlockPointerType()) {
8215 CallType = VariadicBlock;
8216 } else { // Ty->isFunctionPointerType()
8217 CallType = VariadicFunction;
8218 }
8219
8220 checkCall(FDecl: NDecl, Proto, /*ThisArg=*/nullptr,
8221 Args: llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
8222 /*IsMemberFunction=*/false, Loc: TheCall->getRParenLoc(),
8223 Range: TheCall->getCallee()->getSourceRange(), CallType);
8224
8225 return false;
8226}
8227
8228/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
8229/// such as function pointers returned from functions.
8230bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
8231 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
8232 Fn: TheCall->getCallee());
8233 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
8234 Args: llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
8235 /*IsMemberFunction=*/false, Loc: TheCall->getRParenLoc(),
8236 Range: TheCall->getCallee()->getSourceRange(), CallType);
8237
8238 return false;
8239}
8240
8241static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
8242 if (!llvm::isValidAtomicOrderingCABI(I: Ordering))
8243 return false;
8244
8245 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
8246 switch (Op) {
8247 case AtomicExpr::AO__c11_atomic_init:
8248 case AtomicExpr::AO__opencl_atomic_init:
8249 llvm_unreachable("There is no ordering argument for an init");
8250
8251 case AtomicExpr::AO__c11_atomic_load:
8252 case AtomicExpr::AO__opencl_atomic_load:
8253 case AtomicExpr::AO__hip_atomic_load:
8254 case AtomicExpr::AO__atomic_load_n:
8255 case AtomicExpr::AO__atomic_load:
8256 case AtomicExpr::AO__scoped_atomic_load_n:
8257 case AtomicExpr::AO__scoped_atomic_load:
8258 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
8259 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
8260
8261 case AtomicExpr::AO__c11_atomic_store:
8262 case AtomicExpr::AO__opencl_atomic_store:
8263 case AtomicExpr::AO__hip_atomic_store:
8264 case AtomicExpr::AO__atomic_store:
8265 case AtomicExpr::AO__atomic_store_n:
8266 case AtomicExpr::AO__scoped_atomic_store:
8267 case AtomicExpr::AO__scoped_atomic_store_n:
8268 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
8269 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
8270 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
8271
8272 default:
8273 return true;
8274 }
8275}
8276
8277ExprResult Sema::AtomicOpsOverloaded(ExprResult TheCallResult,
8278 AtomicExpr::AtomicOp Op) {
8279 CallExpr *TheCall = cast<CallExpr>(Val: TheCallResult.get());
8280 DeclRefExpr *DRE =cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
8281 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
8282 return BuildAtomicExpr(CallRange: {TheCall->getBeginLoc(), TheCall->getEndLoc()},
8283 ExprRange: DRE->getSourceRange(), RParenLoc: TheCall->getRParenLoc(), Args,
8284 Op);
8285}
8286
8287ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
8288 SourceLocation RParenLoc, MultiExprArg Args,
8289 AtomicExpr::AtomicOp Op,
8290 AtomicArgumentOrder ArgOrder) {
8291 // All the non-OpenCL operations take one of the following forms.
8292 // The OpenCL operations take the __c11 forms with one extra argument for
8293 // synchronization scope.
8294 enum {
8295 // C __c11_atomic_init(A *, C)
8296 Init,
8297
8298 // C __c11_atomic_load(A *, int)
8299 Load,
8300
8301 // void __atomic_load(A *, CP, int)
8302 LoadCopy,
8303
8304 // void __atomic_store(A *, CP, int)
8305 Copy,
8306
8307 // C __c11_atomic_add(A *, M, int)
8308 Arithmetic,
8309
8310 // C __atomic_exchange_n(A *, CP, int)
8311 Xchg,
8312
8313 // void __atomic_exchange(A *, C *, CP, int)
8314 GNUXchg,
8315
8316 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
8317 C11CmpXchg,
8318
8319 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
8320 GNUCmpXchg
8321 } Form = Init;
8322
8323 const unsigned NumForm = GNUCmpXchg + 1;
8324 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
8325 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
8326 // where:
8327 // C is an appropriate type,
8328 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
8329 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
8330 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
8331 // the int parameters are for orderings.
8332
8333 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
8334 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
8335 "need to update code for modified forms");
8336 static_assert(AtomicExpr::AO__atomic_add_fetch == 0 &&
8337 AtomicExpr::AO__atomic_xor_fetch + 1 ==
8338 AtomicExpr::AO__c11_atomic_compare_exchange_strong,
8339 "need to update code for modified C11 atomics");
8340 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_compare_exchange_strong &&
8341 Op <= AtomicExpr::AO__opencl_atomic_store;
8342 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_compare_exchange_strong &&
8343 Op <= AtomicExpr::AO__hip_atomic_store;
8344 bool IsScoped = Op >= AtomicExpr::AO__scoped_atomic_add_fetch &&
8345 Op <= AtomicExpr::AO__scoped_atomic_xor_fetch;
8346 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_compare_exchange_strong &&
8347 Op <= AtomicExpr::AO__c11_atomic_store) ||
8348 IsOpenCL;
8349 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
8350 Op == AtomicExpr::AO__atomic_store_n ||
8351 Op == AtomicExpr::AO__atomic_exchange_n ||
8352 Op == AtomicExpr::AO__atomic_compare_exchange_n ||
8353 Op == AtomicExpr::AO__scoped_atomic_load_n ||
8354 Op == AtomicExpr::AO__scoped_atomic_store_n ||
8355 Op == AtomicExpr::AO__scoped_atomic_exchange_n ||
8356 Op == AtomicExpr::AO__scoped_atomic_compare_exchange_n;
8357 // Bit mask for extra allowed value types other than integers for atomic
8358 // arithmetic operations. Add/sub allow pointer and floating point. Min/max
8359 // allow floating point.
8360 enum ArithOpExtraValueType {
8361 AOEVT_None = 0,
8362 AOEVT_Pointer = 1,
8363 AOEVT_FP = 2,
8364 };
8365 unsigned ArithAllows = AOEVT_None;
8366
8367 switch (Op) {
8368 case AtomicExpr::AO__c11_atomic_init:
8369 case AtomicExpr::AO__opencl_atomic_init:
8370 Form = Init;
8371 break;
8372
8373 case AtomicExpr::AO__c11_atomic_load:
8374 case AtomicExpr::AO__opencl_atomic_load:
8375 case AtomicExpr::AO__hip_atomic_load:
8376 case AtomicExpr::AO__atomic_load_n:
8377 case AtomicExpr::AO__scoped_atomic_load_n:
8378 Form = Load;
8379 break;
8380
8381 case AtomicExpr::AO__atomic_load:
8382 case AtomicExpr::AO__scoped_atomic_load:
8383 Form = LoadCopy;
8384 break;
8385
8386 case AtomicExpr::AO__c11_atomic_store:
8387 case AtomicExpr::AO__opencl_atomic_store:
8388 case AtomicExpr::AO__hip_atomic_store:
8389 case AtomicExpr::AO__atomic_store:
8390 case AtomicExpr::AO__atomic_store_n:
8391 case AtomicExpr::AO__scoped_atomic_store:
8392 case AtomicExpr::AO__scoped_atomic_store_n:
8393 Form = Copy;
8394 break;
8395 case AtomicExpr::AO__atomic_fetch_add:
8396 case AtomicExpr::AO__atomic_fetch_sub:
8397 case AtomicExpr::AO__atomic_add_fetch:
8398 case AtomicExpr::AO__atomic_sub_fetch:
8399 case AtomicExpr::AO__scoped_atomic_fetch_add:
8400 case AtomicExpr::AO__scoped_atomic_fetch_sub:
8401 case AtomicExpr::AO__scoped_atomic_add_fetch:
8402 case AtomicExpr::AO__scoped_atomic_sub_fetch:
8403 case AtomicExpr::AO__c11_atomic_fetch_add:
8404 case AtomicExpr::AO__c11_atomic_fetch_sub:
8405 case AtomicExpr::AO__opencl_atomic_fetch_add:
8406 case AtomicExpr::AO__opencl_atomic_fetch_sub:
8407 case AtomicExpr::AO__hip_atomic_fetch_add:
8408 case AtomicExpr::AO__hip_atomic_fetch_sub:
8409 ArithAllows = AOEVT_Pointer | AOEVT_FP;
8410 Form = Arithmetic;
8411 break;
8412 case AtomicExpr::AO__atomic_fetch_max:
8413 case AtomicExpr::AO__atomic_fetch_min:
8414 case AtomicExpr::AO__atomic_max_fetch:
8415 case AtomicExpr::AO__atomic_min_fetch:
8416 case AtomicExpr::AO__scoped_atomic_fetch_max:
8417 case AtomicExpr::AO__scoped_atomic_fetch_min:
8418 case AtomicExpr::AO__scoped_atomic_max_fetch:
8419 case AtomicExpr::AO__scoped_atomic_min_fetch:
8420 case AtomicExpr::AO__c11_atomic_fetch_max:
8421 case AtomicExpr::AO__c11_atomic_fetch_min:
8422 case AtomicExpr::AO__opencl_atomic_fetch_max:
8423 case AtomicExpr::AO__opencl_atomic_fetch_min:
8424 case AtomicExpr::AO__hip_atomic_fetch_max:
8425 case AtomicExpr::AO__hip_atomic_fetch_min:
8426 ArithAllows = AOEVT_FP;
8427 Form = Arithmetic;
8428 break;
8429 case AtomicExpr::AO__c11_atomic_fetch_and:
8430 case AtomicExpr::AO__c11_atomic_fetch_or:
8431 case AtomicExpr::AO__c11_atomic_fetch_xor:
8432 case AtomicExpr::AO__hip_atomic_fetch_and:
8433 case AtomicExpr::AO__hip_atomic_fetch_or:
8434 case AtomicExpr::AO__hip_atomic_fetch_xor:
8435 case AtomicExpr::AO__c11_atomic_fetch_nand:
8436 case AtomicExpr::AO__opencl_atomic_fetch_and:
8437 case AtomicExpr::AO__opencl_atomic_fetch_or:
8438 case AtomicExpr::AO__opencl_atomic_fetch_xor:
8439 case AtomicExpr::AO__atomic_fetch_and:
8440 case AtomicExpr::AO__atomic_fetch_or:
8441 case AtomicExpr::AO__atomic_fetch_xor:
8442 case AtomicExpr::AO__atomic_fetch_nand:
8443 case AtomicExpr::AO__atomic_and_fetch:
8444 case AtomicExpr::AO__atomic_or_fetch:
8445 case AtomicExpr::AO__atomic_xor_fetch:
8446 case AtomicExpr::AO__atomic_nand_fetch:
8447 case AtomicExpr::AO__scoped_atomic_fetch_and:
8448 case AtomicExpr::AO__scoped_atomic_fetch_or:
8449 case AtomicExpr::AO__scoped_atomic_fetch_xor:
8450 case AtomicExpr::AO__scoped_atomic_fetch_nand:
8451 case AtomicExpr::AO__scoped_atomic_and_fetch:
8452 case AtomicExpr::AO__scoped_atomic_or_fetch:
8453 case AtomicExpr::AO__scoped_atomic_xor_fetch:
8454 case AtomicExpr::AO__scoped_atomic_nand_fetch:
8455 Form = Arithmetic;
8456 break;
8457
8458 case AtomicExpr::AO__c11_atomic_exchange:
8459 case AtomicExpr::AO__hip_atomic_exchange:
8460 case AtomicExpr::AO__opencl_atomic_exchange:
8461 case AtomicExpr::AO__atomic_exchange_n:
8462 case AtomicExpr::AO__scoped_atomic_exchange_n:
8463 Form = Xchg;
8464 break;
8465
8466 case AtomicExpr::AO__atomic_exchange:
8467 case AtomicExpr::AO__scoped_atomic_exchange:
8468 Form = GNUXchg;
8469 break;
8470
8471 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
8472 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
8473 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
8474 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
8475 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
8476 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
8477 Form = C11CmpXchg;
8478 break;
8479
8480 case AtomicExpr::AO__atomic_compare_exchange:
8481 case AtomicExpr::AO__atomic_compare_exchange_n:
8482 case AtomicExpr::AO__scoped_atomic_compare_exchange:
8483 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
8484 Form = GNUCmpXchg;
8485 break;
8486 }
8487
8488 unsigned AdjustedNumArgs = NumArgs[Form];
8489 if ((IsOpenCL || IsHIP || IsScoped) &&
8490 Op != AtomicExpr::AO__opencl_atomic_init)
8491 ++AdjustedNumArgs;
8492 // Check we have the right number of arguments.
8493 if (Args.size() < AdjustedNumArgs) {
8494 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
8495 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
8496 << /*is non object*/ 0 << ExprRange;
8497 return ExprError();
8498 } else if (Args.size() > AdjustedNumArgs) {
8499 Diag(Args[AdjustedNumArgs]->getBeginLoc(),
8500 diag::err_typecheck_call_too_many_args)
8501 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
8502 << /*is non object*/ 0 << ExprRange;
8503 return ExprError();
8504 }
8505
8506 // Inspect the first argument of the atomic operation.
8507 Expr *Ptr = Args[0];
8508 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(E: Ptr);
8509 if (ConvertedPtr.isInvalid())
8510 return ExprError();
8511
8512 Ptr = ConvertedPtr.get();
8513 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
8514 if (!pointerType) {
8515 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
8516 << Ptr->getType() << Ptr->getSourceRange();
8517 return ExprError();
8518 }
8519
8520 // For a __c11 builtin, this should be a pointer to an _Atomic type.
8521 QualType AtomTy = pointerType->getPointeeType(); // 'A'
8522 QualType ValType = AtomTy; // 'C'
8523 if (IsC11) {
8524 if (!AtomTy->isAtomicType()) {
8525 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
8526 << Ptr->getType() << Ptr->getSourceRange();
8527 return ExprError();
8528 }
8529 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
8530 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
8531 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
8532 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
8533 << Ptr->getSourceRange();
8534 return ExprError();
8535 }
8536 ValType = AtomTy->castAs<AtomicType>()->getValueType();
8537 } else if (Form != Load && Form != LoadCopy) {
8538 if (ValType.isConstQualified()) {
8539 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
8540 << Ptr->getType() << Ptr->getSourceRange();
8541 return ExprError();
8542 }
8543 }
8544
8545 // For an arithmetic operation, the implied arithmetic must be well-formed.
8546 if (Form == Arithmetic) {
8547 // GCC does not enforce these rules for GNU atomics, but we do to help catch
8548 // trivial type errors.
8549 auto IsAllowedValueType = [&](QualType ValType,
8550 unsigned AllowedType) -> bool {
8551 if (ValType->isIntegerType())
8552 return true;
8553 if (ValType->isPointerType())
8554 return AllowedType & AOEVT_Pointer;
8555 if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP)))
8556 return false;
8557 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
8558 if (ValType->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
8559 &Context.getTargetInfo().getLongDoubleFormat() ==
8560 &llvm::APFloat::x87DoubleExtended())
8561 return false;
8562 return true;
8563 };
8564 if (!IsAllowedValueType(ValType, ArithAllows)) {
8565 auto DID = ArithAllows & AOEVT_FP
8566 ? (ArithAllows & AOEVT_Pointer
8567 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp
8568 : diag::err_atomic_op_needs_atomic_int_or_fp)
8569 : diag::err_atomic_op_needs_atomic_int;
8570 Diag(ExprRange.getBegin(), DID)
8571 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
8572 return ExprError();
8573 }
8574 if (IsC11 && ValType->isPointerType() &&
8575 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
8576 diag::err_incomplete_type)) {
8577 return ExprError();
8578 }
8579 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
8580 // For __atomic_*_n operations, the value type must be a scalar integral or
8581 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
8582 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
8583 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
8584 return ExprError();
8585 }
8586
8587 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
8588 !AtomTy->isScalarType()) {
8589 // For GNU atomics, require a trivially-copyable type. This is not part of
8590 // the GNU atomics specification but we enforce it for consistency with
8591 // other atomics which generally all require a trivially-copyable type. This
8592 // is because atomics just copy bits.
8593 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
8594 << Ptr->getType() << Ptr->getSourceRange();
8595 return ExprError();
8596 }
8597
8598 switch (ValType.getObjCLifetime()) {
8599 case Qualifiers::OCL_None:
8600 case Qualifiers::OCL_ExplicitNone:
8601 // okay
8602 break;
8603
8604 case Qualifiers::OCL_Weak:
8605 case Qualifiers::OCL_Strong:
8606 case Qualifiers::OCL_Autoreleasing:
8607 // FIXME: Can this happen? By this point, ValType should be known
8608 // to be trivially copyable.
8609 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
8610 << ValType << Ptr->getSourceRange();
8611 return ExprError();
8612 }
8613
8614 // All atomic operations have an overload which takes a pointer to a volatile
8615 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
8616 // into the result or the other operands. Similarly atomic_load takes a
8617 // pointer to a const 'A'.
8618 ValType.removeLocalVolatile();
8619 ValType.removeLocalConst();
8620 QualType ResultType = ValType;
8621 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
8622 Form == Init)
8623 ResultType = Context.VoidTy;
8624 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
8625 ResultType = Context.BoolTy;
8626
8627 // The type of a parameter passed 'by value'. In the GNU atomics, such
8628 // arguments are actually passed as pointers.
8629 QualType ByValType = ValType; // 'CP'
8630 bool IsPassedByAddress = false;
8631 if (!IsC11 && !IsHIP && !IsN) {
8632 ByValType = Ptr->getType();
8633 IsPassedByAddress = true;
8634 }
8635
8636 SmallVector<Expr *, 5> APIOrderedArgs;
8637 if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
8638 APIOrderedArgs.push_back(Elt: Args[0]);
8639 switch (Form) {
8640 case Init:
8641 case Load:
8642 APIOrderedArgs.push_back(Elt: Args[1]); // Val1/Order
8643 break;
8644 case LoadCopy:
8645 case Copy:
8646 case Arithmetic:
8647 case Xchg:
8648 APIOrderedArgs.push_back(Elt: Args[2]); // Val1
8649 APIOrderedArgs.push_back(Elt: Args[1]); // Order
8650 break;
8651 case GNUXchg:
8652 APIOrderedArgs.push_back(Elt: Args[2]); // Val1
8653 APIOrderedArgs.push_back(Elt: Args[3]); // Val2
8654 APIOrderedArgs.push_back(Elt: Args[1]); // Order
8655 break;
8656 case C11CmpXchg:
8657 APIOrderedArgs.push_back(Elt: Args[2]); // Val1
8658 APIOrderedArgs.push_back(Elt: Args[4]); // Val2
8659 APIOrderedArgs.push_back(Elt: Args[1]); // Order
8660 APIOrderedArgs.push_back(Elt: Args[3]); // OrderFail
8661 break;
8662 case GNUCmpXchg:
8663 APIOrderedArgs.push_back(Elt: Args[2]); // Val1
8664 APIOrderedArgs.push_back(Elt: Args[4]); // Val2
8665 APIOrderedArgs.push_back(Elt: Args[5]); // Weak
8666 APIOrderedArgs.push_back(Elt: Args[1]); // Order
8667 APIOrderedArgs.push_back(Elt: Args[3]); // OrderFail
8668 break;
8669 }
8670 } else
8671 APIOrderedArgs.append(in_start: Args.begin(), in_end: Args.end());
8672
8673 // The first argument's non-CV pointer type is used to deduce the type of
8674 // subsequent arguments, except for:
8675 // - weak flag (always converted to bool)
8676 // - memory order (always converted to int)
8677 // - scope (always converted to int)
8678 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
8679 QualType Ty;
8680 if (i < NumVals[Form] + 1) {
8681 switch (i) {
8682 case 0:
8683 // The first argument is always a pointer. It has a fixed type.
8684 // It is always dereferenced, a nullptr is undefined.
8685 CheckNonNullArgument(S&: *this, ArgExpr: APIOrderedArgs[i], CallSiteLoc: ExprRange.getBegin());
8686 // Nothing else to do: we already know all we want about this pointer.
8687 continue;
8688 case 1:
8689 // The second argument is the non-atomic operand. For arithmetic, this
8690 // is always passed by value, and for a compare_exchange it is always
8691 // passed by address. For the rest, GNU uses by-address and C11 uses
8692 // by-value.
8693 assert(Form != Load);
8694 if (Form == Arithmetic && ValType->isPointerType())
8695 Ty = Context.getPointerDiffType();
8696 else if (Form == Init || Form == Arithmetic)
8697 Ty = ValType;
8698 else if (Form == Copy || Form == Xchg) {
8699 if (IsPassedByAddress) {
8700 // The value pointer is always dereferenced, a nullptr is undefined.
8701 CheckNonNullArgument(S&: *this, ArgExpr: APIOrderedArgs[i],
8702 CallSiteLoc: ExprRange.getBegin());
8703 }
8704 Ty = ByValType;
8705 } else {
8706 Expr *ValArg = APIOrderedArgs[i];
8707 // The value pointer is always dereferenced, a nullptr is undefined.
8708 CheckNonNullArgument(S&: *this, ArgExpr: ValArg, CallSiteLoc: ExprRange.getBegin());
8709 LangAS AS = LangAS::Default;
8710 // Keep address space of non-atomic pointer type.
8711 if (const PointerType *PtrTy =
8712 ValArg->getType()->getAs<PointerType>()) {
8713 AS = PtrTy->getPointeeType().getAddressSpace();
8714 }
8715 Ty = Context.getPointerType(
8716 T: Context.getAddrSpaceQualType(T: ValType.getUnqualifiedType(), AddressSpace: AS));
8717 }
8718 break;
8719 case 2:
8720 // The third argument to compare_exchange / GNU exchange is the desired
8721 // value, either by-value (for the C11 and *_n variant) or as a pointer.
8722 if (IsPassedByAddress)
8723 CheckNonNullArgument(S&: *this, ArgExpr: APIOrderedArgs[i], CallSiteLoc: ExprRange.getBegin());
8724 Ty = ByValType;
8725 break;
8726 case 3:
8727 // The fourth argument to GNU compare_exchange is a 'weak' flag.
8728 Ty = Context.BoolTy;
8729 break;
8730 }
8731 } else {
8732 // The order(s) and scope are always converted to int.
8733 Ty = Context.IntTy;
8734 }
8735
8736 InitializedEntity Entity =
8737 InitializedEntity::InitializeParameter(Context, Type: Ty, Consumed: false);
8738 ExprResult Arg = APIOrderedArgs[i];
8739 Arg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Arg);
8740 if (Arg.isInvalid())
8741 return true;
8742 APIOrderedArgs[i] = Arg.get();
8743 }
8744
8745 // Permute the arguments into a 'consistent' order.
8746 SmallVector<Expr*, 5> SubExprs;
8747 SubExprs.push_back(Elt: Ptr);
8748 switch (Form) {
8749 case Init:
8750 // Note, AtomicExpr::getVal1() has a special case for this atomic.
8751 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8752 break;
8753 case Load:
8754 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Order
8755 break;
8756 case LoadCopy:
8757 case Copy:
8758 case Arithmetic:
8759 case Xchg:
8760 SubExprs.push_back(Elt: APIOrderedArgs[2]); // Order
8761 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8762 break;
8763 case GNUXchg:
8764 // Note, AtomicExpr::getVal2() has a special case for this atomic.
8765 SubExprs.push_back(Elt: APIOrderedArgs[3]); // Order
8766 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8767 SubExprs.push_back(Elt: APIOrderedArgs[2]); // Val2
8768 break;
8769 case C11CmpXchg:
8770 SubExprs.push_back(Elt: APIOrderedArgs[3]); // Order
8771 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8772 SubExprs.push_back(Elt: APIOrderedArgs[4]); // OrderFail
8773 SubExprs.push_back(Elt: APIOrderedArgs[2]); // Val2
8774 break;
8775 case GNUCmpXchg:
8776 SubExprs.push_back(Elt: APIOrderedArgs[4]); // Order
8777 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8778 SubExprs.push_back(Elt: APIOrderedArgs[5]); // OrderFail
8779 SubExprs.push_back(Elt: APIOrderedArgs[2]); // Val2
8780 SubExprs.push_back(Elt: APIOrderedArgs[3]); // Weak
8781 break;
8782 }
8783
8784 // If the memory orders are constants, check they are valid.
8785 if (SubExprs.size() >= 2 && Form != Init) {
8786 std::optional<llvm::APSInt> Success =
8787 SubExprs[1]->getIntegerConstantExpr(Ctx: Context);
8788 if (Success && !isValidOrderingForOp(Ordering: Success->getSExtValue(), Op)) {
8789 Diag(SubExprs[1]->getBeginLoc(),
8790 diag::warn_atomic_op_has_invalid_memory_order)
8791 << /*success=*/(Form == C11CmpXchg || Form == GNUCmpXchg)
8792 << SubExprs[1]->getSourceRange();
8793 }
8794 if (SubExprs.size() >= 5) {
8795 if (std::optional<llvm::APSInt> Failure =
8796 SubExprs[3]->getIntegerConstantExpr(Ctx: Context)) {
8797 if (!llvm::is_contained(
8798 Set: {llvm::AtomicOrderingCABI::relaxed,
8799 llvm::AtomicOrderingCABI::consume,
8800 llvm::AtomicOrderingCABI::acquire,
8801 llvm::AtomicOrderingCABI::seq_cst},
8802 Element: (llvm::AtomicOrderingCABI)Failure->getSExtValue())) {
8803 Diag(SubExprs[3]->getBeginLoc(),
8804 diag::warn_atomic_op_has_invalid_memory_order)
8805 << /*failure=*/2 << SubExprs[3]->getSourceRange();
8806 }
8807 }
8808 }
8809 }
8810
8811 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
8812 auto *Scope = Args[Args.size() - 1];
8813 if (std::optional<llvm::APSInt> Result =
8814 Scope->getIntegerConstantExpr(Ctx: Context)) {
8815 if (!ScopeModel->isValid(Result->getZExtValue()))
8816 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
8817 << Scope->getSourceRange();
8818 }
8819 SubExprs.push_back(Elt: Scope);
8820 }
8821
8822 AtomicExpr *AE = new (Context)
8823 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
8824
8825 if ((Op == AtomicExpr::AO__c11_atomic_load ||
8826 Op == AtomicExpr::AO__c11_atomic_store ||
8827 Op == AtomicExpr::AO__opencl_atomic_load ||
8828 Op == AtomicExpr::AO__hip_atomic_load ||
8829 Op == AtomicExpr::AO__opencl_atomic_store ||
8830 Op == AtomicExpr::AO__hip_atomic_store) &&
8831 Context.AtomicUsesUnsupportedLibcall(AE))
8832 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
8833 << ((Op == AtomicExpr::AO__c11_atomic_load ||
8834 Op == AtomicExpr::AO__opencl_atomic_load ||
8835 Op == AtomicExpr::AO__hip_atomic_load)
8836 ? 0
8837 : 1);
8838
8839 if (ValType->isBitIntType()) {
8840 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit);
8841 return ExprError();
8842 }
8843
8844 return AE;
8845}
8846
8847/// checkBuiltinArgument - Given a call to a builtin function, perform
8848/// normal type-checking on the given argument, updating the call in
8849/// place. This is useful when a builtin function requires custom
8850/// type-checking for some of its arguments but not necessarily all of
8851/// them.
8852///
8853/// Returns true on error.
8854static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
8855 FunctionDecl *Fn = E->getDirectCallee();
8856 assert(Fn && "builtin call without direct callee!");
8857
8858 ParmVarDecl *Param = Fn->getParamDecl(i: ArgIndex);
8859 InitializedEntity Entity =
8860 InitializedEntity::InitializeParameter(Context&: S.Context, Parm: Param);
8861
8862 ExprResult Arg = E->getArg(Arg: ArgIndex);
8863 Arg = S.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Arg);
8864 if (Arg.isInvalid())
8865 return true;
8866
8867 E->setArg(Arg: ArgIndex, ArgExpr: Arg.get());
8868 return false;
8869}
8870
8871bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
8872 if (TheCall->getNumArgs() != 0)
8873 return true;
8874
8875 TheCall->setType(Context.getWebAssemblyExternrefType());
8876
8877 return false;
8878}
8879
8880bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
8881 if (TheCall->getNumArgs() != 0) {
8882 Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
8883 << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs()
8884 << /*is non object*/ 0;
8885 return true;
8886 }
8887
8888 // This custom type checking code ensures that the nodes are as expected
8889 // in order to later on generate the necessary builtin.
8890 QualType Pointee = Context.getFunctionType(ResultTy: Context.VoidTy, Args: {}, EPI: {});
8891 QualType Type = Context.getPointerType(T: Pointee);
8892 Pointee = Context.getAddrSpaceQualType(T: Pointee, AddressSpace: LangAS::wasm_funcref);
8893 Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
8894 Context.getPointerType(Pointee));
8895 TheCall->setType(Type);
8896
8897 return false;
8898}
8899
8900/// We have a call to a function like __sync_fetch_and_add, which is an
8901/// overloaded function based on the pointer type of its first argument.
8902/// The main BuildCallExpr routines have already promoted the types of
8903/// arguments because all of these calls are prototyped as void(...).
8904///
8905/// This function goes through and does final semantic checking for these
8906/// builtins, as well as generating any warnings.
8907ExprResult Sema::BuiltinAtomicOverloaded(ExprResult TheCallResult) {
8908 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
8909 Expr *Callee = TheCall->getCallee();
8910 DeclRefExpr *DRE = cast<DeclRefExpr>(Val: Callee->IgnoreParenCasts());
8911 FunctionDecl *FDecl = cast<FunctionDecl>(Val: DRE->getDecl());
8912
8913 // Ensure that we have at least one argument to do type inference from.
8914 if (TheCall->getNumArgs() < 1) {
8915 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
8916 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
8917 << Callee->getSourceRange();
8918 return ExprError();
8919 }
8920
8921 // Inspect the first argument of the atomic builtin. This should always be
8922 // a pointer type, whose element is an integral scalar or pointer type.
8923 // Because it is a pointer type, we don't have to worry about any implicit
8924 // casts here.
8925 // FIXME: We don't allow floating point scalars as input.
8926 Expr *FirstArg = TheCall->getArg(Arg: 0);
8927 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(E: FirstArg);
8928 if (FirstArgResult.isInvalid())
8929 return ExprError();
8930 FirstArg = FirstArgResult.get();
8931 TheCall->setArg(Arg: 0, ArgExpr: FirstArg);
8932
8933 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
8934 if (!pointerType) {
8935 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
8936 << FirstArg->getType() << FirstArg->getSourceRange();
8937 return ExprError();
8938 }
8939
8940 QualType ValType = pointerType->getPointeeType();
8941 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
8942 !ValType->isBlockPointerType()) {
8943 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
8944 << FirstArg->getType() << FirstArg->getSourceRange();
8945 return ExprError();
8946 }
8947
8948 if (ValType.isConstQualified()) {
8949 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
8950 << FirstArg->getType() << FirstArg->getSourceRange();
8951 return ExprError();
8952 }
8953
8954 switch (ValType.getObjCLifetime()) {
8955 case Qualifiers::OCL_None:
8956 case Qualifiers::OCL_ExplicitNone:
8957 // okay
8958 break;
8959
8960 case Qualifiers::OCL_Weak:
8961 case Qualifiers::OCL_Strong:
8962 case Qualifiers::OCL_Autoreleasing:
8963 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
8964 << ValType << FirstArg->getSourceRange();
8965 return ExprError();
8966 }
8967
8968 // Strip any qualifiers off ValType.
8969 ValType = ValType.getUnqualifiedType();
8970
8971 // The majority of builtins return a value, but a few have special return
8972 // types, so allow them to override appropriately below.
8973 QualType ResultType = ValType;
8974
8975 // We need to figure out which concrete builtin this maps onto. For example,
8976 // __sync_fetch_and_add with a 2 byte object turns into
8977 // __sync_fetch_and_add_2.
8978#define BUILTIN_ROW(x) \
8979 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
8980 Builtin::BI##x##_8, Builtin::BI##x##_16 }
8981
8982 static const unsigned BuiltinIndices[][5] = {
8983 BUILTIN_ROW(__sync_fetch_and_add),
8984 BUILTIN_ROW(__sync_fetch_and_sub),
8985 BUILTIN_ROW(__sync_fetch_and_or),
8986 BUILTIN_ROW(__sync_fetch_and_and),
8987 BUILTIN_ROW(__sync_fetch_and_xor),
8988 BUILTIN_ROW(__sync_fetch_and_nand),
8989
8990 BUILTIN_ROW(__sync_add_and_fetch),
8991 BUILTIN_ROW(__sync_sub_and_fetch),
8992 BUILTIN_ROW(__sync_and_and_fetch),
8993 BUILTIN_ROW(__sync_or_and_fetch),
8994 BUILTIN_ROW(__sync_xor_and_fetch),
8995 BUILTIN_ROW(__sync_nand_and_fetch),
8996
8997 BUILTIN_ROW(__sync_val_compare_and_swap),
8998 BUILTIN_ROW(__sync_bool_compare_and_swap),
8999 BUILTIN_ROW(__sync_lock_test_and_set),
9000 BUILTIN_ROW(__sync_lock_release),
9001 BUILTIN_ROW(__sync_swap)
9002 };
9003#undef BUILTIN_ROW
9004
9005 // Determine the index of the size.
9006 unsigned SizeIndex;
9007 switch (Context.getTypeSizeInChars(T: ValType).getQuantity()) {
9008 case 1: SizeIndex = 0; break;
9009 case 2: SizeIndex = 1; break;
9010 case 4: SizeIndex = 2; break;
9011 case 8: SizeIndex = 3; break;
9012 case 16: SizeIndex = 4; break;
9013 default:
9014 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
9015 << FirstArg->getType() << FirstArg->getSourceRange();
9016 return ExprError();
9017 }
9018
9019 // Each of these builtins has one pointer argument, followed by some number of
9020 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
9021 // that we ignore. Find out which row of BuiltinIndices to read from as well
9022 // as the number of fixed args.
9023 unsigned BuiltinID = FDecl->getBuiltinID();
9024 unsigned BuiltinIndex, NumFixed = 1;
9025 bool WarnAboutSemanticsChange = false;
9026 switch (BuiltinID) {
9027 default: llvm_unreachable("Unknown overloaded atomic builtin!");
9028 case Builtin::BI__sync_fetch_and_add:
9029 case Builtin::BI__sync_fetch_and_add_1:
9030 case Builtin::BI__sync_fetch_and_add_2:
9031 case Builtin::BI__sync_fetch_and_add_4:
9032 case Builtin::BI__sync_fetch_and_add_8:
9033 case Builtin::BI__sync_fetch_and_add_16:
9034 BuiltinIndex = 0;
9035 break;
9036
9037 case Builtin::BI__sync_fetch_and_sub:
9038 case Builtin::BI__sync_fetch_and_sub_1:
9039 case Builtin::BI__sync_fetch_and_sub_2:
9040 case Builtin::BI__sync_fetch_and_sub_4:
9041 case Builtin::BI__sync_fetch_and_sub_8:
9042 case Builtin::BI__sync_fetch_and_sub_16:
9043 BuiltinIndex = 1;
9044 break;
9045
9046 case Builtin::BI__sync_fetch_and_or:
9047 case Builtin::BI__sync_fetch_and_or_1:
9048 case Builtin::BI__sync_fetch_and_or_2:
9049 case Builtin::BI__sync_fetch_and_or_4:
9050 case Builtin::BI__sync_fetch_and_or_8:
9051 case Builtin::BI__sync_fetch_and_or_16:
9052 BuiltinIndex = 2;
9053 break;
9054
9055 case Builtin::BI__sync_fetch_and_and:
9056 case Builtin::BI__sync_fetch_and_and_1:
9057 case Builtin::BI__sync_fetch_and_and_2:
9058 case Builtin::BI__sync_fetch_and_and_4:
9059 case Builtin::BI__sync_fetch_and_and_8:
9060 case Builtin::BI__sync_fetch_and_and_16:
9061 BuiltinIndex = 3;
9062 break;
9063
9064 case Builtin::BI__sync_fetch_and_xor:
9065 case Builtin::BI__sync_fetch_and_xor_1:
9066 case Builtin::BI__sync_fetch_and_xor_2:
9067 case Builtin::BI__sync_fetch_and_xor_4:
9068 case Builtin::BI__sync_fetch_and_xor_8:
9069 case Builtin::BI__sync_fetch_and_xor_16:
9070 BuiltinIndex = 4;
9071 break;
9072
9073 case Builtin::BI__sync_fetch_and_nand:
9074 case Builtin::BI__sync_fetch_and_nand_1:
9075 case Builtin::BI__sync_fetch_and_nand_2:
9076 case Builtin::BI__sync_fetch_and_nand_4:
9077 case Builtin::BI__sync_fetch_and_nand_8:
9078 case Builtin::BI__sync_fetch_and_nand_16:
9079 BuiltinIndex = 5;
9080 WarnAboutSemanticsChange = true;
9081 break;
9082
9083 case Builtin::BI__sync_add_and_fetch:
9084 case Builtin::BI__sync_add_and_fetch_1:
9085 case Builtin::BI__sync_add_and_fetch_2:
9086 case Builtin::BI__sync_add_and_fetch_4:
9087 case Builtin::BI__sync_add_and_fetch_8:
9088 case Builtin::BI__sync_add_and_fetch_16:
9089 BuiltinIndex = 6;
9090 break;
9091
9092 case Builtin::BI__sync_sub_and_fetch:
9093 case Builtin::BI__sync_sub_and_fetch_1:
9094 case Builtin::BI__sync_sub_and_fetch_2:
9095 case Builtin::BI__sync_sub_and_fetch_4:
9096 case Builtin::BI__sync_sub_and_fetch_8:
9097 case Builtin::BI__sync_sub_and_fetch_16:
9098 BuiltinIndex = 7;
9099 break;
9100
9101 case Builtin::BI__sync_and_and_fetch:
9102 case Builtin::BI__sync_and_and_fetch_1:
9103 case Builtin::BI__sync_and_and_fetch_2:
9104 case Builtin::BI__sync_and_and_fetch_4:
9105 case Builtin::BI__sync_and_and_fetch_8:
9106 case Builtin::BI__sync_and_and_fetch_16:
9107 BuiltinIndex = 8;
9108 break;
9109
9110 case Builtin::BI__sync_or_and_fetch:
9111 case Builtin::BI__sync_or_and_fetch_1:
9112 case Builtin::BI__sync_or_and_fetch_2:
9113 case Builtin::BI__sync_or_and_fetch_4:
9114 case Builtin::BI__sync_or_and_fetch_8:
9115 case Builtin::BI__sync_or_and_fetch_16:
9116 BuiltinIndex = 9;
9117 break;
9118
9119 case Builtin::BI__sync_xor_and_fetch:
9120 case Builtin::BI__sync_xor_and_fetch_1:
9121 case Builtin::BI__sync_xor_and_fetch_2:
9122 case Builtin::BI__sync_xor_and_fetch_4:
9123 case Builtin::BI__sync_xor_and_fetch_8:
9124 case Builtin::BI__sync_xor_and_fetch_16:
9125 BuiltinIndex = 10;
9126 break;
9127
9128 case Builtin::BI__sync_nand_and_fetch:
9129 case Builtin::BI__sync_nand_and_fetch_1:
9130 case Builtin::BI__sync_nand_and_fetch_2:
9131 case Builtin::BI__sync_nand_and_fetch_4:
9132 case Builtin::BI__sync_nand_and_fetch_8:
9133 case Builtin::BI__sync_nand_and_fetch_16:
9134 BuiltinIndex = 11;
9135 WarnAboutSemanticsChange = true;
9136 break;
9137
9138 case Builtin::BI__sync_val_compare_and_swap:
9139 case Builtin::BI__sync_val_compare_and_swap_1:
9140 case Builtin::BI__sync_val_compare_and_swap_2:
9141 case Builtin::BI__sync_val_compare_and_swap_4:
9142 case Builtin::BI__sync_val_compare_and_swap_8:
9143 case Builtin::BI__sync_val_compare_and_swap_16:
9144 BuiltinIndex = 12;
9145 NumFixed = 2;
9146 break;
9147
9148 case Builtin::BI__sync_bool_compare_and_swap:
9149 case Builtin::BI__sync_bool_compare_and_swap_1:
9150 case Builtin::BI__sync_bool_compare_and_swap_2:
9151 case Builtin::BI__sync_bool_compare_and_swap_4:
9152 case Builtin::BI__sync_bool_compare_and_swap_8:
9153 case Builtin::BI__sync_bool_compare_and_swap_16:
9154 BuiltinIndex = 13;
9155 NumFixed = 2;
9156 ResultType = Context.BoolTy;
9157 break;
9158
9159 case Builtin::BI__sync_lock_test_and_set:
9160 case Builtin::BI__sync_lock_test_and_set_1:
9161 case Builtin::BI__sync_lock_test_and_set_2:
9162 case Builtin::BI__sync_lock_test_and_set_4:
9163 case Builtin::BI__sync_lock_test_and_set_8:
9164 case Builtin::BI__sync_lock_test_and_set_16:
9165 BuiltinIndex = 14;
9166 break;
9167
9168 case Builtin::BI__sync_lock_release:
9169 case Builtin::BI__sync_lock_release_1:
9170 case Builtin::BI__sync_lock_release_2:
9171 case Builtin::BI__sync_lock_release_4:
9172 case Builtin::BI__sync_lock_release_8:
9173 case Builtin::BI__sync_lock_release_16:
9174 BuiltinIndex = 15;
9175 NumFixed = 0;
9176 ResultType = Context.VoidTy;
9177 break;
9178
9179 case Builtin::BI__sync_swap:
9180 case Builtin::BI__sync_swap_1:
9181 case Builtin::BI__sync_swap_2:
9182 case Builtin::BI__sync_swap_4:
9183 case Builtin::BI__sync_swap_8:
9184 case Builtin::BI__sync_swap_16:
9185 BuiltinIndex = 16;
9186 break;
9187 }
9188
9189 // Now that we know how many fixed arguments we expect, first check that we
9190 // have at least that many.
9191 if (TheCall->getNumArgs() < 1+NumFixed) {
9192 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
9193 << 0 << 1 + NumFixed << TheCall->getNumArgs() << /*is non object*/ 0
9194 << Callee->getSourceRange();
9195 return ExprError();
9196 }
9197
9198 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
9199 << Callee->getSourceRange();
9200
9201 if (WarnAboutSemanticsChange) {
9202 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
9203 << Callee->getSourceRange();
9204 }
9205
9206 // Get the decl for the concrete builtin from this, we can tell what the
9207 // concrete integer type we should convert to is.
9208 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
9209 StringRef NewBuiltinName = Context.BuiltinInfo.getName(ID: NewBuiltinID);
9210 FunctionDecl *NewBuiltinDecl;
9211 if (NewBuiltinID == BuiltinID)
9212 NewBuiltinDecl = FDecl;
9213 else {
9214 // Perform builtin lookup to avoid redeclaring it.
9215 DeclarationName DN(&Context.Idents.get(Name: NewBuiltinName));
9216 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
9217 LookupName(R&: Res, S: TUScope, /*AllowBuiltinCreation=*/true);
9218 assert(Res.getFoundDecl());
9219 NewBuiltinDecl = dyn_cast<FunctionDecl>(Val: Res.getFoundDecl());
9220 if (!NewBuiltinDecl)
9221 return ExprError();
9222 }
9223
9224 // The first argument --- the pointer --- has a fixed type; we
9225 // deduce the types of the rest of the arguments accordingly. Walk
9226 // the remaining arguments, converting them to the deduced value type.
9227 for (unsigned i = 0; i != NumFixed; ++i) {
9228 ExprResult Arg = TheCall->getArg(Arg: i+1);
9229
9230 // GCC does an implicit conversion to the pointer or integer ValType. This
9231 // can fail in some cases (1i -> int**), check for this error case now.
9232 // Initialize the argument.
9233 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
9234 Type: ValType, /*consume*/ Consumed: false);
9235 Arg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Arg);
9236 if (Arg.isInvalid())
9237 return ExprError();
9238
9239 // Okay, we have something that *can* be converted to the right type. Check
9240 // to see if there is a potentially weird extension going on here. This can
9241 // happen when you do an atomic operation on something like an char* and
9242 // pass in 42. The 42 gets converted to char. This is even more strange
9243 // for things like 45.123 -> char, etc.
9244 // FIXME: Do this check.
9245 TheCall->setArg(Arg: i+1, ArgExpr: Arg.get());
9246 }
9247
9248 // Create a new DeclRefExpr to refer to the new decl.
9249 DeclRefExpr *NewDRE = DeclRefExpr::Create(
9250 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
9251 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
9252 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
9253
9254 // Set the callee in the CallExpr.
9255 // FIXME: This loses syntactic information.
9256 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
9257 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
9258 CK_BuiltinFnToFnPtr);
9259 TheCall->setCallee(PromotedCall.get());
9260
9261 // Change the result type of the call to match the original value type. This
9262 // is arbitrary, but the codegen for these builtins ins design to handle it
9263 // gracefully.
9264 TheCall->setType(ResultType);
9265
9266 // Prohibit problematic uses of bit-precise integer types with atomic
9267 // builtins. The arguments would have already been converted to the first
9268 // argument's type, so only need to check the first argument.
9269 const auto *BitIntValType = ValType->getAs<BitIntType>();
9270 if (BitIntValType && !llvm::isPowerOf2_64(Value: BitIntValType->getNumBits())) {
9271 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
9272 return ExprError();
9273 }
9274
9275 return TheCallResult;
9276}
9277
9278/// BuiltinNontemporalOverloaded - We have a call to
9279/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
9280/// overloaded function based on the pointer type of its last argument.
9281///
9282/// This function goes through and does final semantic checking for these
9283/// builtins.
9284ExprResult Sema::BuiltinNontemporalOverloaded(ExprResult TheCallResult) {
9285 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
9286 DeclRefExpr *DRE =
9287 cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
9288 FunctionDecl *FDecl = cast<FunctionDecl>(Val: DRE->getDecl());
9289 unsigned BuiltinID = FDecl->getBuiltinID();
9290 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
9291 BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
9292 "Unexpected nontemporal load/store builtin!");
9293 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
9294 unsigned numArgs = isStore ? 2 : 1;
9295
9296 // Ensure that we have the proper number of arguments.
9297 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: numArgs))
9298 return ExprError();
9299
9300 // Inspect the last argument of the nontemporal builtin. This should always
9301 // be a pointer type, from which we imply the type of the memory access.
9302 // Because it is a pointer type, we don't have to worry about any implicit
9303 // casts here.
9304 Expr *PointerArg = TheCall->getArg(Arg: numArgs - 1);
9305 ExprResult PointerArgResult =
9306 DefaultFunctionArrayLvalueConversion(E: PointerArg);
9307
9308 if (PointerArgResult.isInvalid())
9309 return ExprError();
9310 PointerArg = PointerArgResult.get();
9311 TheCall->setArg(Arg: numArgs - 1, ArgExpr: PointerArg);
9312
9313 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
9314 if (!pointerType) {
9315 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
9316 << PointerArg->getType() << PointerArg->getSourceRange();
9317 return ExprError();
9318 }
9319
9320 QualType ValType = pointerType->getPointeeType();
9321
9322 // Strip any qualifiers off ValType.
9323 ValType = ValType.getUnqualifiedType();
9324 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
9325 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
9326 !ValType->isVectorType()) {
9327 Diag(DRE->getBeginLoc(),
9328 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
9329 << PointerArg->getType() << PointerArg->getSourceRange();
9330 return ExprError();
9331 }
9332
9333 if (!isStore) {
9334 TheCall->setType(ValType);
9335 return TheCallResult;
9336 }
9337
9338 ExprResult ValArg = TheCall->getArg(Arg: 0);
9339 InitializedEntity Entity = InitializedEntity::InitializeParameter(
9340 Context, Type: ValType, /*consume*/ Consumed: false);
9341 ValArg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
9342 if (ValArg.isInvalid())
9343 return ExprError();
9344
9345 TheCall->setArg(Arg: 0, ArgExpr: ValArg.get());
9346 TheCall->setType(Context.VoidTy);
9347 return TheCallResult;
9348}
9349
9350/// CheckObjCString - Checks that the argument to the builtin
9351/// CFString constructor is correct
9352/// Note: It might also make sense to do the UTF-16 conversion here (would
9353/// simplify the backend).
9354bool Sema::CheckObjCString(Expr *Arg) {
9355 Arg = Arg->IgnoreParenCasts();
9356 StringLiteral *Literal = dyn_cast<StringLiteral>(Val: Arg);
9357
9358 if (!Literal || !Literal->isOrdinary()) {
9359 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
9360 << Arg->getSourceRange();
9361 return true;
9362 }
9363
9364 if (Literal->containsNonAsciiOrNull()) {
9365 StringRef String = Literal->getString();
9366 unsigned NumBytes = String.size();
9367 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
9368 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
9369 llvm::UTF16 *ToPtr = &ToBuf[0];
9370
9371 llvm::ConversionResult Result =
9372 llvm::ConvertUTF8toUTF16(sourceStart: &FromPtr, sourceEnd: FromPtr + NumBytes, targetStart: &ToPtr,
9373 targetEnd: ToPtr + NumBytes, flags: llvm::strictConversion);
9374 // Check for conversion failure.
9375 if (Result != llvm::conversionOK)
9376 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
9377 << Arg->getSourceRange();
9378 }
9379 return false;
9380}
9381
9382/// CheckObjCString - Checks that the format string argument to the os_log()
9383/// and os_trace() functions is correct, and converts it to const char *.
9384ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
9385 Arg = Arg->IgnoreParenCasts();
9386 auto *Literal = dyn_cast<StringLiteral>(Val: Arg);
9387 if (!Literal) {
9388 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Val: Arg)) {
9389 Literal = ObjcLiteral->getString();
9390 }
9391 }
9392
9393 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) {
9394 return ExprError(
9395 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
9396 << Arg->getSourceRange());
9397 }
9398
9399 ExprResult Result(Literal);
9400 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
9401 InitializedEntity Entity =
9402 InitializedEntity::InitializeParameter(Context, Type: ResultTy, Consumed: false);
9403 Result = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Result);
9404 return Result;
9405}
9406
9407/// Check that the user is calling the appropriate va_start builtin for the
9408/// target and calling convention.
9409static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
9410 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
9411 bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
9412 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 ||
9413 TT.getArch() == llvm::Triple::aarch64_32);
9414 bool IsWindows = TT.isOSWindows();
9415 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
9416 if (IsX64 || IsAArch64) {
9417 CallingConv CC = CC_C;
9418 if (const FunctionDecl *FD = S.getCurFunctionDecl())
9419 CC = FD->getType()->castAs<FunctionType>()->getCallConv();
9420 if (IsMSVAStart) {
9421 // Don't allow this in System V ABI functions.
9422 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
9423 return S.Diag(Fn->getBeginLoc(),
9424 diag::err_ms_va_start_used_in_sysv_function);
9425 } else {
9426 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
9427 // On x64 Windows, don't allow this in System V ABI functions.
9428 // (Yes, that means there's no corresponding way to support variadic
9429 // System V ABI functions on Windows.)
9430 if ((IsWindows && CC == CC_X86_64SysV) ||
9431 (!IsWindows && CC == CC_Win64))
9432 return S.Diag(Fn->getBeginLoc(),
9433 diag::err_va_start_used_in_wrong_abi_function)
9434 << !IsWindows;
9435 }
9436 return false;
9437 }
9438
9439 if (IsMSVAStart)
9440 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only);
9441 return false;
9442}
9443
9444static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
9445 ParmVarDecl **LastParam = nullptr) {
9446 // Determine whether the current function, block, or obj-c method is variadic
9447 // and get its parameter list.
9448 bool IsVariadic = false;
9449 ArrayRef<ParmVarDecl *> Params;
9450 DeclContext *Caller = S.CurContext;
9451 if (auto *Block = dyn_cast<BlockDecl>(Val: Caller)) {
9452 IsVariadic = Block->isVariadic();
9453 Params = Block->parameters();
9454 } else if (auto *FD = dyn_cast<FunctionDecl>(Val: Caller)) {
9455 IsVariadic = FD->isVariadic();
9456 Params = FD->parameters();
9457 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Val: Caller)) {
9458 IsVariadic = MD->isVariadic();
9459 // FIXME: This isn't correct for methods (results in bogus warning).
9460 Params = MD->parameters();
9461 } else if (isa<CapturedDecl>(Val: Caller)) {
9462 // We don't support va_start in a CapturedDecl.
9463 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt);
9464 return true;
9465 } else {
9466 // This must be some other declcontext that parses exprs.
9467 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function);
9468 return true;
9469 }
9470
9471 if (!IsVariadic) {
9472 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function);
9473 return true;
9474 }
9475
9476 if (LastParam)
9477 *LastParam = Params.empty() ? nullptr : Params.back();
9478
9479 return false;
9480}
9481
9482/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
9483/// for validity. Emit an error and return true on failure; return false
9484/// on success.
9485bool Sema::BuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
9486 Expr *Fn = TheCall->getCallee();
9487
9488 if (checkVAStartABI(S&: *this, BuiltinID, Fn))
9489 return true;
9490
9491 // In C23 mode, va_start only needs one argument. However, the builtin still
9492 // requires two arguments (which matches the behavior of the GCC builtin),
9493 // <stdarg.h> passes `0` as the second argument in C23 mode.
9494 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
9495 return true;
9496
9497 // Type-check the first argument normally.
9498 if (checkBuiltinArgument(S&: *this, E: TheCall, ArgIndex: 0))
9499 return true;
9500
9501 // Check that the current function is variadic, and get its last parameter.
9502 ParmVarDecl *LastParam;
9503 if (checkVAStartIsInVariadicFunction(S&: *this, Fn, LastParam: &LastParam))
9504 return true;
9505
9506 // Verify that the second argument to the builtin is the last argument of the
9507 // current function or method. In C23 mode, if the second argument is an
9508 // integer constant expression with value 0, then we don't bother with this
9509 // check.
9510 bool SecondArgIsLastNamedArgument = false;
9511 const Expr *Arg = TheCall->getArg(Arg: 1)->IgnoreParenCasts();
9512 if (std::optional<llvm::APSInt> Val =
9513 TheCall->getArg(Arg: 1)->getIntegerConstantExpr(Ctx: Context);
9514 Val && LangOpts.C23 && *Val == 0)
9515 return false;
9516
9517 // These are valid if SecondArgIsLastNamedArgument is false after the next
9518 // block.
9519 QualType Type;
9520 SourceLocation ParamLoc;
9521 bool IsCRegister = false;
9522
9523 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Val: Arg)) {
9524 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(Val: DR->getDecl())) {
9525 SecondArgIsLastNamedArgument = PV == LastParam;
9526
9527 Type = PV->getType();
9528 ParamLoc = PV->getLocation();
9529 IsCRegister =
9530 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
9531 }
9532 }
9533
9534 if (!SecondArgIsLastNamedArgument)
9535 Diag(TheCall->getArg(1)->getBeginLoc(),
9536 diag::warn_second_arg_of_va_start_not_last_named_param);
9537 else if (IsCRegister || Type->isReferenceType() ||
9538 Type->isSpecificBuiltinType(K: BuiltinType::Float) || [=] {
9539 // Promotable integers are UB, but enumerations need a bit of
9540 // extra checking to see what their promotable type actually is.
9541 if (!Context.isPromotableIntegerType(T: Type))
9542 return false;
9543 if (!Type->isEnumeralType())
9544 return true;
9545 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
9546 return !(ED &&
9547 Context.typesAreCompatible(T1: ED->getPromotionType(), T2: Type));
9548 }()) {
9549 unsigned Reason = 0;
9550 if (Type->isReferenceType()) Reason = 1;
9551 else if (IsCRegister) Reason = 2;
9552 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason;
9553 Diag(ParamLoc, diag::note_parameter_type) << Type;
9554 }
9555
9556 return false;
9557}
9558
9559bool Sema::BuiltinVAStartARMMicrosoft(CallExpr *Call) {
9560 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool {
9561 const LangOptions &LO = getLangOpts();
9562
9563 if (LO.CPlusPlus)
9564 return Arg->getType()
9565 .getCanonicalType()
9566 .getTypePtr()
9567 ->getPointeeType()
9568 .withoutLocalFastQualifiers() == Context.CharTy;
9569
9570 // In C, allow aliasing through `char *`, this is required for AArch64 at
9571 // least.
9572 return true;
9573 };
9574
9575 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
9576 // const char *named_addr);
9577
9578 Expr *Func = Call->getCallee();
9579
9580 if (Call->getNumArgs() < 3)
9581 return Diag(Call->getEndLoc(),
9582 diag::err_typecheck_call_too_few_args_at_least)
9583 << 0 /*function call*/ << 3 << Call->getNumArgs()
9584 << /*is non object*/ 0;
9585
9586 // Type-check the first argument normally.
9587 if (checkBuiltinArgument(S&: *this, E: Call, ArgIndex: 0))
9588 return true;
9589
9590 // Check that the current function is variadic.
9591 if (checkVAStartIsInVariadicFunction(S&: *this, Fn: Func))
9592 return true;
9593
9594 // __va_start on Windows does not validate the parameter qualifiers
9595
9596 const Expr *Arg1 = Call->getArg(Arg: 1)->IgnoreParens();
9597 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
9598
9599 const Expr *Arg2 = Call->getArg(Arg: 2)->IgnoreParens();
9600 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
9601
9602 const QualType &ConstCharPtrTy =
9603 Context.getPointerType(Context.CharTy.withConst());
9604 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1))
9605 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
9606 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */
9607 << 0 /* qualifier difference */
9608 << 3 /* parameter mismatch */
9609 << 2 << Arg1->getType() << ConstCharPtrTy;
9610
9611 const QualType SizeTy = Context.getSizeType();
9612 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
9613 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible)
9614 << Arg2->getType() << SizeTy << 1 /* different class */
9615 << 0 /* qualifier difference */
9616 << 3 /* parameter mismatch */
9617 << 3 << Arg2->getType() << SizeTy;
9618
9619 return false;
9620}
9621
9622/// BuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
9623/// friends. This is declared to take (...), so we have to check everything.
9624bool Sema::BuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
9625 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
9626 return true;
9627
9628 if (BuiltinID == Builtin::BI__builtin_isunordered &&
9629 TheCall->getFPFeaturesInEffect(getLangOpts()).getNoHonorNaNs())
9630 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
9631 << 1 << 0 << TheCall->getSourceRange();
9632
9633 ExprResult OrigArg0 = TheCall->getArg(Arg: 0);
9634 ExprResult OrigArg1 = TheCall->getArg(Arg: 1);
9635
9636 // Do standard promotions between the two arguments, returning their common
9637 // type.
9638 QualType Res = UsualArithmeticConversions(
9639 LHS&: OrigArg0, RHS&: OrigArg1, Loc: TheCall->getExprLoc(), ACK: ACK_Comparison);
9640 if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
9641 return true;
9642
9643 // Make sure any conversions are pushed back into the call; this is
9644 // type safe since unordered compare builtins are declared as "_Bool
9645 // foo(...)".
9646 TheCall->setArg(Arg: 0, ArgExpr: OrigArg0.get());
9647 TheCall->setArg(Arg: 1, ArgExpr: OrigArg1.get());
9648
9649 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
9650 return false;
9651
9652 // If the common type isn't a real floating type, then the arguments were
9653 // invalid for this operation.
9654 if (Res.isNull() || !Res->isRealFloatingType())
9655 return Diag(OrigArg0.get()->getBeginLoc(),
9656 diag::err_typecheck_call_invalid_ordered_compare)
9657 << OrigArg0.get()->getType() << OrigArg1.get()->getType()
9658 << SourceRange(OrigArg0.get()->getBeginLoc(),
9659 OrigArg1.get()->getEndLoc());
9660
9661 return false;
9662}
9663
9664/// BuiltinSemaBuiltinFPClassification - Handle functions like
9665/// __builtin_isnan and friends. This is declared to take (...), so we have
9666/// to check everything.
9667bool Sema::BuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
9668 unsigned BuiltinID) {
9669 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: NumArgs))
9670 return true;
9671
9672 FPOptions FPO = TheCall->getFPFeaturesInEffect(LO: getLangOpts());
9673 if (FPO.getNoHonorInfs() && (BuiltinID == Builtin::BI__builtin_isfinite ||
9674 BuiltinID == Builtin::BI__builtin_isinf ||
9675 BuiltinID == Builtin::BI__builtin_isinf_sign))
9676 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
9677 << 0 << 0 << TheCall->getSourceRange();
9678
9679 if (FPO.getNoHonorNaNs() && (BuiltinID == Builtin::BI__builtin_isnan ||
9680 BuiltinID == Builtin::BI__builtin_isunordered))
9681 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
9682 << 1 << 0 << TheCall->getSourceRange();
9683
9684 bool IsFPClass = NumArgs == 2;
9685
9686 // Find out position of floating-point argument.
9687 unsigned FPArgNo = IsFPClass ? 0 : NumArgs - 1;
9688
9689 // We can count on all parameters preceding the floating-point just being int.
9690 // Try all of those.
9691 for (unsigned i = 0; i < FPArgNo; ++i) {
9692 Expr *Arg = TheCall->getArg(Arg: i);
9693
9694 if (Arg->isTypeDependent())
9695 return false;
9696
9697 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing);
9698
9699 if (Res.isInvalid())
9700 return true;
9701 TheCall->setArg(Arg: i, ArgExpr: Res.get());
9702 }
9703
9704 Expr *OrigArg = TheCall->getArg(Arg: FPArgNo);
9705
9706 if (OrigArg->isTypeDependent())
9707 return false;
9708
9709 // Usual Unary Conversions will convert half to float, which we want for
9710 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the
9711 // type how it is, but do normal L->Rvalue conversions.
9712 if (Context.getTargetInfo().useFP16ConversionIntrinsics())
9713 OrigArg = UsualUnaryConversions(E: OrigArg).get();
9714 else
9715 OrigArg = DefaultFunctionArrayLvalueConversion(E: OrigArg).get();
9716 TheCall->setArg(Arg: FPArgNo, ArgExpr: OrigArg);
9717
9718 QualType VectorResultTy;
9719 QualType ElementTy = OrigArg->getType();
9720 // TODO: When all classification function are implemented with is_fpclass,
9721 // vector argument can be supported in all of them.
9722 if (ElementTy->isVectorType() && IsFPClass) {
9723 VectorResultTy = GetSignedVectorType(V: ElementTy);
9724 ElementTy = ElementTy->castAs<VectorType>()->getElementType();
9725 }
9726
9727 // This operation requires a non-_Complex floating-point number.
9728 if (!ElementTy->isRealFloatingType())
9729 return Diag(OrigArg->getBeginLoc(),
9730 diag::err_typecheck_call_invalid_unary_fp)
9731 << OrigArg->getType() << OrigArg->getSourceRange();
9732
9733 // __builtin_isfpclass has integer parameter that specify test mask. It is
9734 // passed in (...), so it should be analyzed completely here.
9735 if (IsFPClass)
9736 if (BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: llvm::fcAllFlags))
9737 return true;
9738
9739 // TODO: enable this code to all classification functions.
9740 if (IsFPClass) {
9741 QualType ResultTy;
9742 if (!VectorResultTy.isNull())
9743 ResultTy = VectorResultTy;
9744 else
9745 ResultTy = Context.IntTy;
9746 TheCall->setType(ResultTy);
9747 }
9748
9749 return false;
9750}
9751
9752/// Perform semantic analysis for a call to __builtin_complex.
9753bool Sema::BuiltinComplex(CallExpr *TheCall) {
9754 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
9755 return true;
9756
9757 bool Dependent = false;
9758 for (unsigned I = 0; I != 2; ++I) {
9759 Expr *Arg = TheCall->getArg(Arg: I);
9760 QualType T = Arg->getType();
9761 if (T->isDependentType()) {
9762 Dependent = true;
9763 continue;
9764 }
9765
9766 // Despite supporting _Complex int, GCC requires a real floating point type
9767 // for the operands of __builtin_complex.
9768 if (!T->isRealFloatingType()) {
9769 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp)
9770 << Arg->getType() << Arg->getSourceRange();
9771 }
9772
9773 ExprResult Converted = DefaultLvalueConversion(E: Arg);
9774 if (Converted.isInvalid())
9775 return true;
9776 TheCall->setArg(Arg: I, ArgExpr: Converted.get());
9777 }
9778
9779 if (Dependent) {
9780 TheCall->setType(Context.DependentTy);
9781 return false;
9782 }
9783
9784 Expr *Real = TheCall->getArg(Arg: 0);
9785 Expr *Imag = TheCall->getArg(Arg: 1);
9786 if (!Context.hasSameType(T1: Real->getType(), T2: Imag->getType())) {
9787 return Diag(Real->getBeginLoc(),
9788 diag::err_typecheck_call_different_arg_types)
9789 << Real->getType() << Imag->getType()
9790 << Real->getSourceRange() << Imag->getSourceRange();
9791 }
9792
9793 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers;
9794 // don't allow this builtin to form those types either.
9795 // FIXME: Should we allow these types?
9796 if (Real->getType()->isFloat16Type())
9797 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
9798 << "_Float16";
9799 if (Real->getType()->isHalfType())
9800 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
9801 << "half";
9802
9803 TheCall->setType(Context.getComplexType(T: Real->getType()));
9804 return false;
9805}
9806
9807// Customized Sema Checking for VSX builtins that have the following signature:
9808// vector [...] builtinName(vector [...], vector [...], const int);
9809// Which takes the same type of vectors (any legal vector type) for the first
9810// two arguments and takes compile time constant for the third argument.
9811// Example builtins are :
9812// vector double vec_xxpermdi(vector double, vector double, int);
9813// vector short vec_xxsldwi(vector short, vector short, int);
9814bool Sema::BuiltinVSX(CallExpr *TheCall) {
9815 unsigned ExpectedNumArgs = 3;
9816 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: ExpectedNumArgs))
9817 return true;
9818
9819 // Check the third argument is a compile time constant
9820 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context))
9821 return Diag(TheCall->getBeginLoc(),
9822 diag::err_vsx_builtin_nonconstant_argument)
9823 << 3 /* argument index */ << TheCall->getDirectCallee()
9824 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
9825 TheCall->getArg(2)->getEndLoc());
9826
9827 QualType Arg1Ty = TheCall->getArg(Arg: 0)->getType();
9828 QualType Arg2Ty = TheCall->getArg(Arg: 1)->getType();
9829
9830 // Check the type of argument 1 and argument 2 are vectors.
9831 SourceLocation BuiltinLoc = TheCall->getBeginLoc();
9832 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
9833 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
9834 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
9835 << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
9836 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9837 TheCall->getArg(1)->getEndLoc());
9838 }
9839
9840 // Check the first two arguments are the same type.
9841 if (!Context.hasSameUnqualifiedType(T1: Arg1Ty, T2: Arg2Ty)) {
9842 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
9843 << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
9844 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9845 TheCall->getArg(1)->getEndLoc());
9846 }
9847
9848 // When default clang type checking is turned off and the customized type
9849 // checking is used, the returning type of the function must be explicitly
9850 // set. Otherwise it is _Bool by default.
9851 TheCall->setType(Arg1Ty);
9852
9853 return false;
9854}
9855
9856/// BuiltinShuffleVector - Handle __builtin_shufflevector.
9857// This is declared to take (...), so we have to check everything.
9858ExprResult Sema::BuiltinShuffleVector(CallExpr *TheCall) {
9859 if (TheCall->getNumArgs() < 2)
9860 return ExprError(Diag(TheCall->getEndLoc(),
9861 diag::err_typecheck_call_too_few_args_at_least)
9862 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
9863 << /*is non object*/ 0 << TheCall->getSourceRange());
9864
9865 // Determine which of the following types of shufflevector we're checking:
9866 // 1) unary, vector mask: (lhs, mask)
9867 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
9868 QualType resType = TheCall->getArg(Arg: 0)->getType();
9869 unsigned numElements = 0;
9870
9871 if (!TheCall->getArg(Arg: 0)->isTypeDependent() &&
9872 !TheCall->getArg(Arg: 1)->isTypeDependent()) {
9873 QualType LHSType = TheCall->getArg(Arg: 0)->getType();
9874 QualType RHSType = TheCall->getArg(Arg: 1)->getType();
9875
9876 if (!LHSType->isVectorType() || !RHSType->isVectorType())
9877 return ExprError(
9878 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
9879 << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
9880 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9881 TheCall->getArg(1)->getEndLoc()));
9882
9883 numElements = LHSType->castAs<VectorType>()->getNumElements();
9884 unsigned numResElements = TheCall->getNumArgs() - 2;
9885
9886 // Check to see if we have a call with 2 vector arguments, the unary shuffle
9887 // with mask. If so, verify that RHS is an integer vector type with the
9888 // same number of elts as lhs.
9889 if (TheCall->getNumArgs() == 2) {
9890 if (!RHSType->hasIntegerRepresentation() ||
9891 RHSType->castAs<VectorType>()->getNumElements() != numElements)
9892 return ExprError(Diag(TheCall->getBeginLoc(),
9893 diag::err_vec_builtin_incompatible_vector)
9894 << TheCall->getDirectCallee()
9895 << /*isMorethantwoArgs*/ false
9896 << SourceRange(TheCall->getArg(1)->getBeginLoc(),
9897 TheCall->getArg(1)->getEndLoc()));
9898 } else if (!Context.hasSameUnqualifiedType(T1: LHSType, T2: RHSType)) {
9899 return ExprError(Diag(TheCall->getBeginLoc(),
9900 diag::err_vec_builtin_incompatible_vector)
9901 << TheCall->getDirectCallee()
9902 << /*isMorethantwoArgs*/ false
9903 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9904 TheCall->getArg(1)->getEndLoc()));
9905 } else if (numElements != numResElements) {
9906 QualType eltType = LHSType->castAs<VectorType>()->getElementType();
9907 resType =
9908 Context.getVectorType(VectorType: eltType, NumElts: numResElements, VecKind: VectorKind::Generic);
9909 }
9910 }
9911
9912 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
9913 if (TheCall->getArg(Arg: i)->isTypeDependent() ||
9914 TheCall->getArg(Arg: i)->isValueDependent())
9915 continue;
9916
9917 std::optional<llvm::APSInt> Result;
9918 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context)))
9919 return ExprError(Diag(TheCall->getBeginLoc(),
9920 diag::err_shufflevector_nonconstant_argument)
9921 << TheCall->getArg(i)->getSourceRange());
9922
9923 // Allow -1 which will be translated to undef in the IR.
9924 if (Result->isSigned() && Result->isAllOnes())
9925 continue;
9926
9927 if (Result->getActiveBits() > 64 ||
9928 Result->getZExtValue() >= numElements * 2)
9929 return ExprError(Diag(TheCall->getBeginLoc(),
9930 diag::err_shufflevector_argument_too_large)
9931 << TheCall->getArg(i)->getSourceRange());
9932 }
9933
9934 SmallVector<Expr*, 32> exprs;
9935
9936 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
9937 exprs.push_back(Elt: TheCall->getArg(Arg: i));
9938 TheCall->setArg(Arg: i, ArgExpr: nullptr);
9939 }
9940
9941 return new (Context) ShuffleVectorExpr(Context, exprs, resType,
9942 TheCall->getCallee()->getBeginLoc(),
9943 TheCall->getRParenLoc());
9944}
9945
9946/// ConvertVectorExpr - Handle __builtin_convertvector
9947ExprResult Sema::ConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
9948 SourceLocation BuiltinLoc,
9949 SourceLocation RParenLoc) {
9950 ExprValueKind VK = VK_PRValue;
9951 ExprObjectKind OK = OK_Ordinary;
9952 QualType DstTy = TInfo->getType();
9953 QualType SrcTy = E->getType();
9954
9955 if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
9956 return ExprError(Diag(BuiltinLoc,
9957 diag::err_convertvector_non_vector)
9958 << E->getSourceRange());
9959 if (!DstTy->isVectorType() && !DstTy->isDependentType())
9960 return ExprError(Diag(BuiltinLoc, diag::err_builtin_non_vector_type)
9961 << "second"
9962 << "__builtin_convertvector");
9963
9964 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
9965 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements();
9966 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements();
9967 if (SrcElts != DstElts)
9968 return ExprError(Diag(BuiltinLoc,
9969 diag::err_convertvector_incompatible_vector)
9970 << E->getSourceRange());
9971 }
9972
9973 return new (Context) class ConvertVectorExpr(E, TInfo, DstTy, VK, OK,
9974 BuiltinLoc, RParenLoc);
9975}
9976
9977/// BuiltinPrefetch - Handle __builtin_prefetch.
9978// This is declared to take (const void*, ...) and can take two
9979// optional constant int args.
9980bool Sema::BuiltinPrefetch(CallExpr *TheCall) {
9981 unsigned NumArgs = TheCall->getNumArgs();
9982
9983 if (NumArgs > 3)
9984 return Diag(TheCall->getEndLoc(),
9985 diag::err_typecheck_call_too_many_args_at_most)
9986 << 0 /*function call*/ << 3 << NumArgs << /*is non object*/ 0
9987 << TheCall->getSourceRange();
9988
9989 // Argument 0 is checked for us and the remaining arguments must be
9990 // constant integers.
9991 for (unsigned i = 1; i != NumArgs; ++i)
9992 if (BuiltinConstantArgRange(TheCall, ArgNum: i, Low: 0, High: i == 1 ? 1 : 3))
9993 return true;
9994
9995 return false;
9996}
9997
9998/// BuiltinArithmeticFence - Handle __arithmetic_fence.
9999bool Sema::BuiltinArithmeticFence(CallExpr *TheCall) {
10000 if (!Context.getTargetInfo().checkArithmeticFenceSupported())
10001 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
10002 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
10003 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
10004 return true;
10005 Expr *Arg = TheCall->getArg(Arg: 0);
10006 if (Arg->isInstantiationDependent())
10007 return false;
10008
10009 QualType ArgTy = Arg->getType();
10010 if (!ArgTy->hasFloatingRepresentation())
10011 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector)
10012 << ArgTy;
10013 if (Arg->isLValue()) {
10014 ExprResult FirstArg = DefaultLvalueConversion(E: Arg);
10015 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
10016 }
10017 TheCall->setType(TheCall->getArg(Arg: 0)->getType());
10018 return false;
10019}
10020
10021/// BuiltinAssume - Handle __assume (MS Extension).
10022// __assume does not evaluate its arguments, and should warn if its argument
10023// has side effects.
10024bool Sema::BuiltinAssume(CallExpr *TheCall) {
10025 Expr *Arg = TheCall->getArg(Arg: 0);
10026 if (Arg->isInstantiationDependent()) return false;
10027
10028 if (Arg->HasSideEffects(Context))
10029 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects)
10030 << Arg->getSourceRange()
10031 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
10032
10033 return false;
10034}
10035
10036/// Handle __builtin_alloca_with_align. This is declared
10037/// as (size_t, size_t) where the second size_t must be a power of 2 greater
10038/// than 8.
10039bool Sema::BuiltinAllocaWithAlign(CallExpr *TheCall) {
10040 // The alignment must be a constant integer.
10041 Expr *Arg = TheCall->getArg(Arg: 1);
10042
10043 // We can't check the value of a dependent argument.
10044 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
10045 if (const auto *UE =
10046 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
10047 if (UE->getKind() == UETT_AlignOf ||
10048 UE->getKind() == UETT_PreferredAlignOf)
10049 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof)
10050 << Arg->getSourceRange();
10051
10052 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Ctx: Context);
10053
10054 if (!Result.isPowerOf2())
10055 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
10056 << Arg->getSourceRange();
10057
10058 if (Result < Context.getCharWidth())
10059 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small)
10060 << (unsigned)Context.getCharWidth() << Arg->getSourceRange();
10061
10062 if (Result > std::numeric_limits<int32_t>::max())
10063 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big)
10064 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange();
10065 }
10066
10067 return false;
10068}
10069
10070/// Handle __builtin_assume_aligned. This is declared
10071/// as (const void*, size_t, ...) and can take one optional constant int arg.
10072bool Sema::BuiltinAssumeAligned(CallExpr *TheCall) {
10073 if (checkArgCountRange(S&: *this, Call: TheCall, MinArgCount: 2, MaxArgCount: 3))
10074 return true;
10075
10076 unsigned NumArgs = TheCall->getNumArgs();
10077 Expr *FirstArg = TheCall->getArg(Arg: 0);
10078
10079 {
10080 ExprResult FirstArgResult =
10081 DefaultFunctionArrayLvalueConversion(E: FirstArg);
10082 if (checkBuiltinArgument(S&: *this, E: TheCall, ArgIndex: 0))
10083 return true;
10084 /// In-place updation of FirstArg by checkBuiltinArgument is ignored.
10085 TheCall->setArg(Arg: 0, ArgExpr: FirstArgResult.get());
10086 }
10087
10088 // The alignment must be a constant integer.
10089 Expr *SecondArg = TheCall->getArg(Arg: 1);
10090
10091 // We can't check the value of a dependent argument.
10092 if (!SecondArg->isValueDependent()) {
10093 llvm::APSInt Result;
10094 if (BuiltinConstantArg(TheCall, ArgNum: 1, Result))
10095 return true;
10096
10097 if (!Result.isPowerOf2())
10098 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
10099 << SecondArg->getSourceRange();
10100
10101 if (Result > Sema::MaximumAlignment)
10102 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great)
10103 << SecondArg->getSourceRange() << Sema::MaximumAlignment;
10104 }
10105
10106 if (NumArgs > 2) {
10107 Expr *ThirdArg = TheCall->getArg(Arg: 2);
10108 if (convertArgumentToType(S&: *this, Value&: ThirdArg, Ty: Context.getSizeType()))
10109 return true;
10110 TheCall->setArg(Arg: 2, ArgExpr: ThirdArg);
10111 }
10112
10113 return false;
10114}
10115
10116bool Sema::BuiltinOSLogFormat(CallExpr *TheCall) {
10117 unsigned BuiltinID =
10118 cast<FunctionDecl>(Val: TheCall->getCalleeDecl())->getBuiltinID();
10119 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
10120
10121 unsigned NumArgs = TheCall->getNumArgs();
10122 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
10123 if (NumArgs < NumRequiredArgs) {
10124 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
10125 << 0 /* function call */ << NumRequiredArgs << NumArgs
10126 << /*is non object*/ 0 << TheCall->getSourceRange();
10127 }
10128 if (NumArgs >= NumRequiredArgs + 0x100) {
10129 return Diag(TheCall->getEndLoc(),
10130 diag::err_typecheck_call_too_many_args_at_most)
10131 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
10132 << /*is non object*/ 0 << TheCall->getSourceRange();
10133 }
10134 unsigned i = 0;
10135
10136 // For formatting call, check buffer arg.
10137 if (!IsSizeCall) {
10138 ExprResult Arg(TheCall->getArg(Arg: i));
10139 InitializedEntity Entity = InitializedEntity::InitializeParameter(
10140 Context, Context.VoidPtrTy, false);
10141 Arg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Arg);
10142 if (Arg.isInvalid())
10143 return true;
10144 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
10145 i++;
10146 }
10147
10148 // Check string literal arg.
10149 unsigned FormatIdx = i;
10150 {
10151 ExprResult Arg = CheckOSLogFormatStringArg(Arg: TheCall->getArg(Arg: i));
10152 if (Arg.isInvalid())
10153 return true;
10154 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
10155 i++;
10156 }
10157
10158 // Make sure variadic args are scalar.
10159 unsigned FirstDataArg = i;
10160 while (i < NumArgs) {
10161 ExprResult Arg = DefaultVariadicArgumentPromotion(
10162 E: TheCall->getArg(Arg: i), CT: VariadicFunction, FDecl: nullptr);
10163 if (Arg.isInvalid())
10164 return true;
10165 CharUnits ArgSize = Context.getTypeSizeInChars(T: Arg.get()->getType());
10166 if (ArgSize.getQuantity() >= 0x100) {
10167 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big)
10168 << i << (int)ArgSize.getQuantity() << 0xff
10169 << TheCall->getSourceRange();
10170 }
10171 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
10172 i++;
10173 }
10174
10175 // Check formatting specifiers. NOTE: We're only doing this for the non-size
10176 // call to avoid duplicate diagnostics.
10177 if (!IsSizeCall) {
10178 llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
10179 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
10180 bool Success = CheckFormatArguments(
10181 Args, FAPK: FAPK_Variadic, format_idx: FormatIdx, firstDataArg: FirstDataArg, Type: FST_OSLog,
10182 CallType: VariadicFunction, Loc: TheCall->getBeginLoc(), range: SourceRange(),
10183 CheckedVarArgs);
10184 if (!Success)
10185 return true;
10186 }
10187
10188 if (IsSizeCall) {
10189 TheCall->setType(Context.getSizeType());
10190 } else {
10191 TheCall->setType(Context.VoidPtrTy);
10192 }
10193 return false;
10194}
10195
10196/// BuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
10197/// TheCall is a constant expression.
10198bool Sema::BuiltinConstantArg(CallExpr *TheCall, int ArgNum,
10199 llvm::APSInt &Result) {
10200 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10201 DeclRefExpr *DRE =cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
10202 FunctionDecl *FDecl = cast<FunctionDecl>(Val: DRE->getDecl());
10203
10204 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
10205
10206 std::optional<llvm::APSInt> R;
10207 if (!(R = Arg->getIntegerConstantExpr(Context)))
10208 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
10209 << FDecl->getDeclName() << Arg->getSourceRange();
10210 Result = *R;
10211 return false;
10212}
10213
10214/// BuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
10215/// TheCall is a constant expression in the range [Low, High].
10216bool Sema::BuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
10217 int High, bool RangeIsError) {
10218 if (isConstantEvaluatedContext())
10219 return false;
10220 llvm::APSInt Result;
10221
10222 // We can't check the value of a dependent argument.
10223 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10224 if (Arg->isTypeDependent() || Arg->isValueDependent())
10225 return false;
10226
10227 // Check constant-ness first.
10228 if (BuiltinConstantArg(TheCall, ArgNum, Result))
10229 return true;
10230
10231 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
10232 if (RangeIsError)
10233 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range)
10234 << toString(Result, 10) << Low << High << Arg->getSourceRange();
10235 else
10236 // Defer the warning until we know if the code will be emitted so that
10237 // dead code can ignore this.
10238 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
10239 PDiag(diag::warn_argument_invalid_range)
10240 << toString(Result, 10) << Low << High
10241 << Arg->getSourceRange());
10242 }
10243
10244 return false;
10245}
10246
10247/// BuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
10248/// TheCall is a constant expression is a multiple of Num..
10249bool Sema::BuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
10250 unsigned Num) {
10251 llvm::APSInt Result;
10252
10253 // We can't check the value of a dependent argument.
10254 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10255 if (Arg->isTypeDependent() || Arg->isValueDependent())
10256 return false;
10257
10258 // Check constant-ness first.
10259 if (BuiltinConstantArg(TheCall, ArgNum, Result))
10260 return true;
10261
10262 if (Result.getSExtValue() % Num != 0)
10263 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple)
10264 << Num << Arg->getSourceRange();
10265
10266 return false;
10267}
10268
10269/// BuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a
10270/// constant expression representing a power of 2.
10271bool Sema::BuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) {
10272 llvm::APSInt Result;
10273
10274 // We can't check the value of a dependent argument.
10275 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10276 if (Arg->isTypeDependent() || Arg->isValueDependent())
10277 return false;
10278
10279 // Check constant-ness first.
10280 if (BuiltinConstantArg(TheCall, ArgNum, Result))
10281 return true;
10282
10283 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if
10284 // and only if x is a power of 2.
10285 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0)
10286 return false;
10287
10288 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2)
10289 << Arg->getSourceRange();
10290}
10291
10292static bool IsShiftedByte(llvm::APSInt Value) {
10293 if (Value.isNegative())
10294 return false;
10295
10296 // Check if it's a shifted byte, by shifting it down
10297 while (true) {
10298 // If the value fits in the bottom byte, the check passes.
10299 if (Value < 0x100)
10300 return true;
10301
10302 // Otherwise, if the value has _any_ bits in the bottom byte, the check
10303 // fails.
10304 if ((Value & 0xFF) != 0)
10305 return false;
10306
10307 // If the bottom 8 bits are all 0, but something above that is nonzero,
10308 // then shifting the value right by 8 bits won't affect whether it's a
10309 // shifted byte or not. So do that, and go round again.
10310 Value >>= 8;
10311 }
10312}
10313
10314/// BuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
10315/// a constant expression representing an arbitrary byte value shifted left by
10316/// a multiple of 8 bits.
10317bool Sema::BuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
10318 unsigned ArgBits) {
10319 llvm::APSInt Result;
10320
10321 // We can't check the value of a dependent argument.
10322 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10323 if (Arg->isTypeDependent() || Arg->isValueDependent())
10324 return false;
10325
10326 // Check constant-ness first.
10327 if (BuiltinConstantArg(TheCall, ArgNum, Result))
10328 return true;
10329
10330 // Truncate to the given size.
10331 Result = Result.getLoBits(numBits: ArgBits);
10332 Result.setIsUnsigned(true);
10333
10334 if (IsShiftedByte(Value: Result))
10335 return false;
10336
10337 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte)
10338 << Arg->getSourceRange();
10339}
10340
10341/// BuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of
10342/// TheCall is a constant expression representing either a shifted byte value,
10343/// or a value of the form 0x??FF (i.e. a member of the arithmetic progression
10344/// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
10345/// Arm MVE intrinsics.
10346bool Sema::BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
10347 unsigned ArgBits) {
10348 llvm::APSInt Result;
10349
10350 // We can't check the value of a dependent argument.
10351 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10352 if (Arg->isTypeDependent() || Arg->isValueDependent())
10353 return false;
10354
10355 // Check constant-ness first.
10356 if (BuiltinConstantArg(TheCall, ArgNum, Result))
10357 return true;
10358
10359 // Truncate to the given size.
10360 Result = Result.getLoBits(numBits: ArgBits);
10361 Result.setIsUnsigned(true);
10362
10363 // Check to see if it's in either of the required forms.
10364 if (IsShiftedByte(Value: Result) ||
10365 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF))
10366 return false;
10367
10368 return Diag(TheCall->getBeginLoc(),
10369 diag::err_argument_not_shifted_byte_or_xxff)
10370 << Arg->getSourceRange();
10371}
10372
10373/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
10374bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
10375 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
10376 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
10377 return true;
10378 Expr *Arg0 = TheCall->getArg(Arg: 0);
10379 Expr *Arg1 = TheCall->getArg(Arg: 1);
10380
10381 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(E: Arg0);
10382 if (FirstArg.isInvalid())
10383 return true;
10384 QualType FirstArgType = FirstArg.get()->getType();
10385 if (!FirstArgType->isAnyPointerType())
10386 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
10387 << "first" << FirstArgType << Arg0->getSourceRange();
10388 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
10389
10390 ExprResult SecArg = DefaultLvalueConversion(E: Arg1);
10391 if (SecArg.isInvalid())
10392 return true;
10393 QualType SecArgType = SecArg.get()->getType();
10394 if (!SecArgType->isIntegerType())
10395 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
10396 << "second" << SecArgType << Arg1->getSourceRange();
10397
10398 // Derive the return type from the pointer argument.
10399 TheCall->setType(FirstArgType);
10400 return false;
10401 }
10402
10403 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
10404 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
10405 return true;
10406
10407 Expr *Arg0 = TheCall->getArg(Arg: 0);
10408 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(E: Arg0);
10409 if (FirstArg.isInvalid())
10410 return true;
10411 QualType FirstArgType = FirstArg.get()->getType();
10412 if (!FirstArgType->isAnyPointerType())
10413 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
10414 << "first" << FirstArgType << Arg0->getSourceRange();
10415 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
10416
10417 // Derive the return type from the pointer argument.
10418 TheCall->setType(FirstArgType);
10419
10420 // Second arg must be an constant in range [0,15]
10421 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
10422 }
10423
10424 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
10425 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
10426 return true;
10427 Expr *Arg0 = TheCall->getArg(Arg: 0);
10428 Expr *Arg1 = TheCall->getArg(Arg: 1);
10429
10430 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(E: Arg0);
10431 if (FirstArg.isInvalid())
10432 return true;
10433 QualType FirstArgType = FirstArg.get()->getType();
10434 if (!FirstArgType->isAnyPointerType())
10435 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
10436 << "first" << FirstArgType << Arg0->getSourceRange();
10437
10438 QualType SecArgType = Arg1->getType();
10439 if (!SecArgType->isIntegerType())
10440 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
10441 << "second" << SecArgType << Arg1->getSourceRange();
10442 TheCall->setType(Context.IntTy);
10443 return false;
10444 }
10445
10446 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
10447 BuiltinID == AArch64::BI__builtin_arm_stg) {
10448 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
10449 return true;
10450 Expr *Arg0 = TheCall->getArg(Arg: 0);
10451 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(E: Arg0);
10452 if (FirstArg.isInvalid())
10453 return true;
10454
10455 QualType FirstArgType = FirstArg.get()->getType();
10456 if (!FirstArgType->isAnyPointerType())
10457 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
10458 << "first" << FirstArgType << Arg0->getSourceRange();
10459 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
10460
10461 // Derive the return type from the pointer argument.
10462 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
10463 TheCall->setType(FirstArgType);
10464 return false;
10465 }
10466
10467 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
10468 Expr *ArgA = TheCall->getArg(Arg: 0);
10469 Expr *ArgB = TheCall->getArg(Arg: 1);
10470
10471 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(E: ArgA);
10472 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(E: ArgB);
10473
10474 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
10475 return true;
10476
10477 QualType ArgTypeA = ArgExprA.get()->getType();
10478 QualType ArgTypeB = ArgExprB.get()->getType();
10479
10480 auto isNull = [&] (Expr *E) -> bool {
10481 return E->isNullPointerConstant(
10482 Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNotNull); };
10483
10484 // argument should be either a pointer or null
10485 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
10486 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
10487 << "first" << ArgTypeA << ArgA->getSourceRange();
10488
10489 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
10490 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
10491 << "second" << ArgTypeB << ArgB->getSourceRange();
10492
10493 // Ensure Pointee types are compatible
10494 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
10495 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
10496 QualType pointeeA = ArgTypeA->getPointeeType();
10497 QualType pointeeB = ArgTypeB->getPointeeType();
10498 if (!Context.typesAreCompatible(
10499 T1: Context.getCanonicalType(T: pointeeA).getUnqualifiedType(),
10500 T2: Context.getCanonicalType(T: pointeeB).getUnqualifiedType())) {
10501 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
10502 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
10503 << ArgB->getSourceRange();
10504 }
10505 }
10506
10507 // at least one argument should be pointer type
10508 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
10509 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
10510 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
10511
10512 if (isNull(ArgA)) // adopt type of the other pointer
10513 ArgExprA = ImpCastExprToType(E: ArgExprA.get(), Type: ArgTypeB, CK: CK_NullToPointer);
10514
10515 if (isNull(ArgB))
10516 ArgExprB = ImpCastExprToType(E: ArgExprB.get(), Type: ArgTypeA, CK: CK_NullToPointer);
10517
10518 TheCall->setArg(Arg: 0, ArgExpr: ArgExprA.get());
10519 TheCall->setArg(Arg: 1, ArgExpr: ArgExprB.get());
10520 TheCall->setType(Context.LongLongTy);
10521 return false;
10522 }
10523 assert(false && "Unhandled ARM MTE intrinsic");
10524 return true;
10525}
10526
10527/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
10528/// TheCall is an ARM/AArch64 special register string literal.
10529bool Sema::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
10530 int ArgNum, unsigned ExpectedFieldNum,
10531 bool AllowName) {
10532 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
10533 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
10534 BuiltinID == ARM::BI__builtin_arm_rsr ||
10535 BuiltinID == ARM::BI__builtin_arm_rsrp ||
10536 BuiltinID == ARM::BI__builtin_arm_wsr ||
10537 BuiltinID == ARM::BI__builtin_arm_wsrp;
10538 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
10539 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
10540 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
10541 BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
10542 BuiltinID == AArch64::BI__builtin_arm_rsr ||
10543 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
10544 BuiltinID == AArch64::BI__builtin_arm_wsr ||
10545 BuiltinID == AArch64::BI__builtin_arm_wsrp;
10546 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
10547
10548 // We can't check the value of a dependent argument.
10549 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10550 if (Arg->isTypeDependent() || Arg->isValueDependent())
10551 return false;
10552
10553 // Check if the argument is a string literal.
10554 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
10555 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
10556 << Arg->getSourceRange();
10557
10558 // Check the type of special register given.
10559 StringRef Reg = cast<StringLiteral>(Val: Arg->IgnoreParenImpCasts())->getString();
10560 SmallVector<StringRef, 6> Fields;
10561 Reg.split(A&: Fields, Separator: ":");
10562
10563 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
10564 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
10565 << Arg->getSourceRange();
10566
10567 // If the string is the name of a register then we cannot check that it is
10568 // valid here but if the string is of one the forms described in ACLE then we
10569 // can check that the supplied fields are integers and within the valid
10570 // ranges.
10571 if (Fields.size() > 1) {
10572 bool FiveFields = Fields.size() == 5;
10573
10574 bool ValidString = true;
10575 if (IsARMBuiltin) {
10576 ValidString &= Fields[0].starts_with_insensitive(Prefix: "cp") ||
10577 Fields[0].starts_with_insensitive(Prefix: "p");
10578 if (ValidString)
10579 Fields[0] = Fields[0].drop_front(
10580 N: Fields[0].starts_with_insensitive(Prefix: "cp") ? 2 : 1);
10581
10582 ValidString &= Fields[2].starts_with_insensitive(Prefix: "c");
10583 if (ValidString)
10584 Fields[2] = Fields[2].drop_front(N: 1);
10585
10586 if (FiveFields) {
10587 ValidString &= Fields[3].starts_with_insensitive(Prefix: "c");
10588 if (ValidString)
10589 Fields[3] = Fields[3].drop_front(N: 1);
10590 }
10591 }
10592
10593 SmallVector<int, 5> Ranges;
10594 if (FiveFields)
10595 Ranges.append(IL: {IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
10596 else
10597 Ranges.append(IL: {15, 7, 15});
10598
10599 for (unsigned i=0; i<Fields.size(); ++i) {
10600 int IntField;
10601 ValidString &= !Fields[i].getAsInteger(Radix: 10, Result&: IntField);
10602 ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
10603 }
10604
10605 if (!ValidString)
10606 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
10607 << Arg->getSourceRange();
10608 } else if (IsAArch64Builtin && Fields.size() == 1) {
10609 // This code validates writes to PSTATE registers.
10610
10611 // Not a write.
10612 if (TheCall->getNumArgs() != 2)
10613 return false;
10614
10615 // The 128-bit system register accesses do not touch PSTATE.
10616 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
10617 BuiltinID == AArch64::BI__builtin_arm_wsr128)
10618 return false;
10619
10620 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
10621 // along with the upper limit on the immediates allowed.
10622 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
10623 .CaseLower(S: "spsel", Value: 15)
10624 .CaseLower(S: "daifclr", Value: 15)
10625 .CaseLower(S: "daifset", Value: 15)
10626 .CaseLower(S: "pan", Value: 15)
10627 .CaseLower(S: "uao", Value: 15)
10628 .CaseLower(S: "dit", Value: 15)
10629 .CaseLower(S: "ssbs", Value: 15)
10630 .CaseLower(S: "tco", Value: 15)
10631 .CaseLower(S: "allint", Value: 1)
10632 .CaseLower(S: "pm", Value: 1)
10633 .Default(Value: std::nullopt);
10634
10635 // If this is not a named PSTATE, just continue without validating, as this
10636 // will be lowered to an "MSR (register)" instruction directly
10637 if (!MaxLimit)
10638 return false;
10639
10640 // Here we only allow constants in the range for that pstate, as required by
10641 // the ACLE.
10642 //
10643 // While clang also accepts the names of system registers in its ACLE
10644 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
10645 // as the value written via a register is different to the value used as an
10646 // immediate to have the same effect. e.g., for the instruction `msr tco,
10647 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
10648 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
10649 //
10650 // If a programmer wants to codegen the MSR (register) form of `msr tco,
10651 // xN`, they can still do so by specifying the register using five
10652 // colon-separated numbers in a string.
10653 return BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: *MaxLimit);
10654 }
10655
10656 return false;
10657}
10658
10659/// BuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
10660/// Emit an error and return true on failure; return false on success.
10661/// TypeStr is a string containing the type descriptor of the value returned by
10662/// the builtin and the descriptors of the expected type of the arguments.
10663bool Sema::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
10664 const char *TypeStr) {
10665
10666 assert((TypeStr[0] != '\0') &&
10667 "Invalid types in PPC MMA builtin declaration");
10668
10669 unsigned Mask = 0;
10670 unsigned ArgNum = 0;
10671
10672 // The first type in TypeStr is the type of the value returned by the
10673 // builtin. So we first read that type and change the type of TheCall.
10674 QualType type = DecodePPCMMATypeFromStr(Context, Str&: TypeStr, Mask);
10675 TheCall->setType(type);
10676
10677 while (*TypeStr != '\0') {
10678 Mask = 0;
10679 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, Str&: TypeStr, Mask);
10680 if (ArgNum >= TheCall->getNumArgs()) {
10681 ArgNum++;
10682 break;
10683 }
10684
10685 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10686 QualType PassedType = Arg->getType();
10687 QualType StrippedRVType = PassedType.getCanonicalType();
10688
10689 // Strip Restrict/Volatile qualifiers.
10690 if (StrippedRVType.isRestrictQualified() ||
10691 StrippedRVType.isVolatileQualified())
10692 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType();
10693
10694 // The only case where the argument type and expected type are allowed to
10695 // mismatch is if the argument type is a non-void pointer (or array) and
10696 // expected type is a void pointer.
10697 if (StrippedRVType != ExpectedType)
10698 if (!(ExpectedType->isVoidPointerType() &&
10699 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType())))
10700 return Diag(Arg->getBeginLoc(),
10701 diag::err_typecheck_convert_incompatible)
10702 << PassedType << ExpectedType << 1 << 0 << 0;
10703
10704 // If the value of the Mask is not 0, we have a constraint in the size of
10705 // the integer argument so here we ensure the argument is a constant that
10706 // is in the valid range.
10707 if (Mask != 0 && BuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: Mask, RangeIsError: true))
10708 return true;
10709
10710 ArgNum++;
10711 }
10712
10713 // In case we exited early from the previous loop, there are other types to
10714 // read from TypeStr. So we need to read them all to ensure we have the right
10715 // number of arguments in TheCall and if it is not the case, to display a
10716 // better error message.
10717 while (*TypeStr != '\0') {
10718 (void) DecodePPCMMATypeFromStr(Context, Str&: TypeStr, Mask);
10719 ArgNum++;
10720 }
10721 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: ArgNum))
10722 return true;
10723
10724 return false;
10725}
10726
10727/// BuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
10728/// This checks that the target supports __builtin_longjmp and
10729/// that val is a constant 1.
10730bool Sema::BuiltinLongjmp(CallExpr *TheCall) {
10731 if (!Context.getTargetInfo().hasSjLjLowering())
10732 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported)
10733 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
10734
10735 Expr *Arg = TheCall->getArg(Arg: 1);
10736 llvm::APSInt Result;
10737
10738 // TODO: This is less than ideal. Overload this to take a value.
10739 if (BuiltinConstantArg(TheCall, ArgNum: 1, Result))
10740 return true;
10741
10742 if (Result != 1)
10743 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val)
10744 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc());
10745
10746 return false;
10747}
10748
10749/// BuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
10750/// This checks that the target supports __builtin_setjmp.
10751bool Sema::BuiltinSetjmp(CallExpr *TheCall) {
10752 if (!Context.getTargetInfo().hasSjLjLowering())
10753 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported)
10754 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
10755 return false;
10756}
10757
10758namespace {
10759
10760class UncoveredArgHandler {
10761 enum { Unknown = -1, AllCovered = -2 };
10762
10763 signed FirstUncoveredArg = Unknown;
10764 SmallVector<const Expr *, 4> DiagnosticExprs;
10765
10766public:
10767 UncoveredArgHandler() = default;
10768
10769 bool hasUncoveredArg() const {
10770 return (FirstUncoveredArg >= 0);
10771 }
10772
10773 unsigned getUncoveredArg() const {
10774 assert(hasUncoveredArg() && "no uncovered argument");
10775 return FirstUncoveredArg;
10776 }
10777
10778 void setAllCovered() {
10779 // A string has been found with all arguments covered, so clear out
10780 // the diagnostics.
10781 DiagnosticExprs.clear();
10782 FirstUncoveredArg = AllCovered;
10783 }
10784
10785 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) {
10786 assert(NewFirstUncoveredArg >= 0 && "Outside range");
10787
10788 // Don't update if a previous string covers all arguments.
10789 if (FirstUncoveredArg == AllCovered)
10790 return;
10791
10792 // UncoveredArgHandler tracks the highest uncovered argument index
10793 // and with it all the strings that match this index.
10794 if (NewFirstUncoveredArg == FirstUncoveredArg)
10795 DiagnosticExprs.push_back(Elt: StrExpr);
10796 else if (NewFirstUncoveredArg > FirstUncoveredArg) {
10797 DiagnosticExprs.clear();
10798 DiagnosticExprs.push_back(Elt: StrExpr);
10799 FirstUncoveredArg = NewFirstUncoveredArg;
10800 }
10801 }
10802
10803 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr);
10804};
10805
10806enum StringLiteralCheckType {
10807 SLCT_NotALiteral,
10808 SLCT_UncheckedLiteral,
10809 SLCT_CheckedLiteral
10810};
10811
10812} // namespace
10813
10814static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
10815 BinaryOperatorKind BinOpKind,
10816 bool AddendIsRight) {
10817 unsigned BitWidth = Offset.getBitWidth();
10818 unsigned AddendBitWidth = Addend.getBitWidth();
10819 // There might be negative interim results.
10820 if (Addend.isUnsigned()) {
10821 Addend = Addend.zext(width: ++AddendBitWidth);
10822 Addend.setIsSigned(true);
10823 }
10824 // Adjust the bit width of the APSInts.
10825 if (AddendBitWidth > BitWidth) {
10826 Offset = Offset.sext(width: AddendBitWidth);
10827 BitWidth = AddendBitWidth;
10828 } else if (BitWidth > AddendBitWidth) {
10829 Addend = Addend.sext(width: BitWidth);
10830 }
10831
10832 bool Ov = false;
10833 llvm::APSInt ResOffset = Offset;
10834 if (BinOpKind == BO_Add)
10835 ResOffset = Offset.sadd_ov(RHS: Addend, Overflow&: Ov);
10836 else {
10837 assert(AddendIsRight && BinOpKind == BO_Sub &&
10838 "operator must be add or sub with addend on the right");
10839 ResOffset = Offset.ssub_ov(RHS: Addend, Overflow&: Ov);
10840 }
10841
10842 // We add an offset to a pointer here so we should support an offset as big as
10843 // possible.
10844 if (Ov) {
10845 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 &&
10846 "index (intermediate) result too big");
10847 Offset = Offset.sext(width: 2 * BitWidth);
10848 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
10849 return;
10850 }
10851
10852 Offset = ResOffset;
10853}
10854
10855namespace {
10856
10857// This is a wrapper class around StringLiteral to support offsetted string
10858// literals as format strings. It takes the offset into account when returning
10859// the string and its length or the source locations to display notes correctly.
10860class FormatStringLiteral {
10861 const StringLiteral *FExpr;
10862 int64_t Offset;
10863
10864 public:
10865 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
10866 : FExpr(fexpr), Offset(Offset) {}
10867
10868 StringRef getString() const {
10869 return FExpr->getString().drop_front(N: Offset);
10870 }
10871
10872 unsigned getByteLength() const {
10873 return FExpr->getByteLength() - getCharByteWidth() * Offset;
10874 }
10875
10876 unsigned getLength() const { return FExpr->getLength() - Offset; }
10877 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
10878
10879 StringLiteralKind getKind() const { return FExpr->getKind(); }
10880
10881 QualType getType() const { return FExpr->getType(); }
10882
10883 bool isAscii() const { return FExpr->isOrdinary(); }
10884 bool isWide() const { return FExpr->isWide(); }
10885 bool isUTF8() const { return FExpr->isUTF8(); }
10886 bool isUTF16() const { return FExpr->isUTF16(); }
10887 bool isUTF32() const { return FExpr->isUTF32(); }
10888 bool isPascal() const { return FExpr->isPascal(); }
10889
10890 SourceLocation getLocationOfByte(
10891 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
10892 const TargetInfo &Target, unsigned *StartToken = nullptr,
10893 unsigned *StartTokenByteOffset = nullptr) const {
10894 return FExpr->getLocationOfByte(ByteNo: ByteNo + Offset, SM, Features, Target,
10895 StartToken, StartTokenByteOffset);
10896 }
10897
10898 SourceLocation getBeginLoc() const LLVM_READONLY {
10899 return FExpr->getBeginLoc().getLocWithOffset(Offset);
10900 }
10901
10902 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); }
10903};
10904
10905} // namespace
10906
10907static void CheckFormatString(
10908 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr,
10909 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK,
10910 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type,
10911 bool inFunctionCall, Sema::VariadicCallType CallType,
10912 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg,
10913 bool IgnoreStringsWithoutSpecifiers);
10914
10915static const Expr *maybeConstEvalStringLiteral(ASTContext &Context,
10916 const Expr *E);
10917
10918// Determine if an expression is a string literal or constant string.
10919// If this function returns false on the arguments to a function expecting a
10920// format string, we will usually need to emit a warning.
10921// True string literals are then checked by CheckFormatString.
10922static StringLiteralCheckType
10923checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
10924 Sema::FormatArgumentPassingKind APK, unsigned format_idx,
10925 unsigned firstDataArg, Sema::FormatStringType Type,
10926 Sema::VariadicCallType CallType, bool InFunctionCall,
10927 llvm::SmallBitVector &CheckedVarArgs,
10928 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset,
10929 bool IgnoreStringsWithoutSpecifiers = false) {
10930 if (S.isConstantEvaluatedContext())
10931 return SLCT_NotALiteral;
10932tryAgain:
10933 assert(Offset.isSigned() && "invalid offset");
10934
10935 if (E->isTypeDependent() || E->isValueDependent())
10936 return SLCT_NotALiteral;
10937
10938 E = E->IgnoreParenCasts();
10939
10940 if (E->isNullPointerConstant(Ctx&: S.Context, NPC: Expr::NPC_ValueDependentIsNotNull))
10941 // Technically -Wformat-nonliteral does not warn about this case.
10942 // The behavior of printf and friends in this case is implementation
10943 // dependent. Ideally if the format string cannot be null then
10944 // it should have a 'nonnull' attribute in the function prototype.
10945 return SLCT_UncheckedLiteral;
10946
10947 switch (E->getStmtClass()) {
10948 case Stmt::InitListExprClass:
10949 // Handle expressions like {"foobar"}.
10950 if (const clang::Expr *SLE = maybeConstEvalStringLiteral(Context&: S.Context, E)) {
10951 return checkFormatStringExpr(S, E: SLE, Args, APK, format_idx, firstDataArg,
10952 Type, CallType, /*InFunctionCall*/ false,
10953 CheckedVarArgs, UncoveredArg, Offset,
10954 IgnoreStringsWithoutSpecifiers);
10955 }
10956 return SLCT_NotALiteral;
10957 case Stmt::BinaryConditionalOperatorClass:
10958 case Stmt::ConditionalOperatorClass: {
10959 // The expression is a literal if both sub-expressions were, and it was
10960 // completely checked only if both sub-expressions were checked.
10961 const AbstractConditionalOperator *C =
10962 cast<AbstractConditionalOperator>(Val: E);
10963
10964 // Determine whether it is necessary to check both sub-expressions, for
10965 // example, because the condition expression is a constant that can be
10966 // evaluated at compile time.
10967 bool CheckLeft = true, CheckRight = true;
10968
10969 bool Cond;
10970 if (C->getCond()->EvaluateAsBooleanCondition(
10971 Result&: Cond, Ctx: S.getASTContext(), InConstantContext: S.isConstantEvaluatedContext())) {
10972 if (Cond)
10973 CheckRight = false;
10974 else
10975 CheckLeft = false;
10976 }
10977
10978 // We need to maintain the offsets for the right and the left hand side
10979 // separately to check if every possible indexed expression is a valid
10980 // string literal. They might have different offsets for different string
10981 // literals in the end.
10982 StringLiteralCheckType Left;
10983 if (!CheckLeft)
10984 Left = SLCT_UncheckedLiteral;
10985 else {
10986 Left = checkFormatStringExpr(S, E: C->getTrueExpr(), Args, APK, format_idx,
10987 firstDataArg, Type, CallType, InFunctionCall,
10988 CheckedVarArgs, UncoveredArg, Offset,
10989 IgnoreStringsWithoutSpecifiers);
10990 if (Left == SLCT_NotALiteral || !CheckRight) {
10991 return Left;
10992 }
10993 }
10994
10995 StringLiteralCheckType Right = checkFormatStringExpr(
10996 S, E: C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type,
10997 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10998 IgnoreStringsWithoutSpecifiers);
10999
11000 return (CheckLeft && Left < Right) ? Left : Right;
11001 }
11002
11003 case Stmt::ImplicitCastExprClass:
11004 E = cast<ImplicitCastExpr>(Val: E)->getSubExpr();
11005 goto tryAgain;
11006
11007 case Stmt::OpaqueValueExprClass:
11008 if (const Expr *src = cast<OpaqueValueExpr>(Val: E)->getSourceExpr()) {
11009 E = src;
11010 goto tryAgain;
11011 }
11012 return SLCT_NotALiteral;
11013
11014 case Stmt::PredefinedExprClass:
11015 // While __func__, etc., are technically not string literals, they
11016 // cannot contain format specifiers and thus are not a security
11017 // liability.
11018 return SLCT_UncheckedLiteral;
11019
11020 case Stmt::DeclRefExprClass: {
11021 const DeclRefExpr *DR = cast<DeclRefExpr>(Val: E);
11022
11023 // As an exception, do not flag errors for variables binding to
11024 // const string literals.
11025 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: DR->getDecl())) {
11026 bool isConstant = false;
11027 QualType T = DR->getType();
11028
11029 if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
11030 isConstant = AT->getElementType().isConstant(Ctx: S.Context);
11031 } else if (const PointerType *PT = T->getAs<PointerType>()) {
11032 isConstant = T.isConstant(Ctx: S.Context) &&
11033 PT->getPointeeType().isConstant(Ctx: S.Context);
11034 } else if (T->isObjCObjectPointerType()) {
11035 // In ObjC, there is usually no "const ObjectPointer" type,
11036 // so don't check if the pointee type is constant.
11037 isConstant = T.isConstant(Ctx: S.Context);
11038 }
11039
11040 if (isConstant) {
11041 if (const Expr *Init = VD->getAnyInitializer()) {
11042 // Look through initializers like const char c[] = { "foo" }
11043 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Val: Init)) {
11044 if (InitList->isStringLiteralInit())
11045 Init = InitList->getInit(Init: 0)->IgnoreParenImpCasts();
11046 }
11047 return checkFormatStringExpr(
11048 S, E: Init, Args, APK, format_idx, firstDataArg, Type, CallType,
11049 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset);
11050 }
11051 }
11052
11053 // When the format argument is an argument of this function, and this
11054 // function also has the format attribute, there are several interactions
11055 // for which there shouldn't be a warning. For instance, when calling
11056 // v*printf from a function that has the printf format attribute, we
11057 // should not emit a warning about using `fmt`, even though it's not
11058 // constant, because the arguments have already been checked for the
11059 // caller of `logmessage`:
11060 //
11061 // __attribute__((format(printf, 1, 2)))
11062 // void logmessage(char const *fmt, ...) {
11063 // va_list ap;
11064 // va_start(ap, fmt);
11065 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */
11066 // ...
11067 // }
11068 //
11069 // Another interaction that we need to support is calling a variadic
11070 // format function from a format function that has fixed arguments. For
11071 // instance:
11072 //
11073 // __attribute__((format(printf, 1, 2)))
11074 // void logstring(char const *fmt, char const *str) {
11075 // printf(fmt, str); /* do not emit a warning about "fmt" */
11076 // }
11077 //
11078 // Same (and perhaps more relatably) for the variadic template case:
11079 //
11080 // template<typename... Args>
11081 // __attribute__((format(printf, 1, 2)))
11082 // void log(const char *fmt, Args&&... args) {
11083 // printf(fmt, forward<Args>(args)...);
11084 // /* do not emit a warning about "fmt" */
11085 // }
11086 //
11087 // Due to implementation difficulty, we only check the format, not the
11088 // format arguments, in all cases.
11089 //
11090 if (const auto *PV = dyn_cast<ParmVarDecl>(Val: VD)) {
11091 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) {
11092 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) {
11093 bool IsCXXMember = false;
11094 if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
11095 IsCXXMember = MD->isInstance();
11096
11097 bool IsVariadic = false;
11098 if (const FunctionType *FnTy = D->getFunctionType())
11099 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic();
11100 else if (const auto *BD = dyn_cast<BlockDecl>(D))
11101 IsVariadic = BD->isVariadic();
11102 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D))
11103 IsVariadic = OMD->isVariadic();
11104
11105 Sema::FormatStringInfo CallerFSI;
11106 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic,
11107 &CallerFSI)) {
11108 // We also check if the formats are compatible.
11109 // We can't pass a 'scanf' string to a 'printf' function.
11110 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx &&
11111 Type == S.GetFormatStringType(PVFormat)) {
11112 // Lastly, check that argument passing kinds transition in a
11113 // way that makes sense:
11114 // from a caller with FAPK_VAList, allow FAPK_VAList
11115 // from a caller with FAPK_Fixed, allow FAPK_Fixed
11116 // from a caller with FAPK_Fixed, allow FAPK_Variadic
11117 // from a caller with FAPK_Variadic, allow FAPK_VAList
11118 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) {
11119 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList):
11120 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed):
11121 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic):
11122 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList):
11123 return SLCT_UncheckedLiteral;
11124 }
11125 }
11126 }
11127 }
11128 }
11129 }
11130 }
11131
11132 return SLCT_NotALiteral;
11133 }
11134
11135 case Stmt::CallExprClass:
11136 case Stmt::CXXMemberCallExprClass: {
11137 const CallExpr *CE = cast<CallExpr>(Val: E);
11138 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Val: CE->getCalleeDecl())) {
11139 bool IsFirst = true;
11140 StringLiteralCheckType CommonResult;
11141 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
11142 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
11143 StringLiteralCheckType Result = checkFormatStringExpr(
11144 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
11145 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
11146 IgnoreStringsWithoutSpecifiers);
11147 if (IsFirst) {
11148 CommonResult = Result;
11149 IsFirst = false;
11150 }
11151 }
11152 if (!IsFirst)
11153 return CommonResult;
11154
11155 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) {
11156 unsigned BuiltinID = FD->getBuiltinID();
11157 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
11158 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
11159 const Expr *Arg = CE->getArg(Arg: 0);
11160 return checkFormatStringExpr(
11161 S, E: Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
11162 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
11163 IgnoreStringsWithoutSpecifiers);
11164 }
11165 }
11166 }
11167 if (const Expr *SLE = maybeConstEvalStringLiteral(Context&: S.Context, E))
11168 return checkFormatStringExpr(S, E: SLE, Args, APK, format_idx, firstDataArg,
11169 Type, CallType, /*InFunctionCall*/ false,
11170 CheckedVarArgs, UncoveredArg, Offset,
11171 IgnoreStringsWithoutSpecifiers);
11172 return SLCT_NotALiteral;
11173 }
11174 case Stmt::ObjCMessageExprClass: {
11175 const auto *ME = cast<ObjCMessageExpr>(Val: E);
11176 if (const auto *MD = ME->getMethodDecl()) {
11177 if (const auto *FA = MD->getAttr<FormatArgAttr>()) {
11178 // As a special case heuristic, if we're using the method -[NSBundle
11179 // localizedStringForKey:value:table:], ignore any key strings that lack
11180 // format specifiers. The idea is that if the key doesn't have any
11181 // format specifiers then its probably just a key to map to the
11182 // localized strings. If it does have format specifiers though, then its
11183 // likely that the text of the key is the format string in the
11184 // programmer's language, and should be checked.
11185 const ObjCInterfaceDecl *IFace;
11186 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) &&
11187 IFace->getIdentifier()->isStr("NSBundle") &&
11188 MD->getSelector().isKeywordSelector(
11189 Names: {"localizedStringForKey", "value", "table"})) {
11190 IgnoreStringsWithoutSpecifiers = true;
11191 }
11192
11193 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
11194 return checkFormatStringExpr(
11195 S, E: Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
11196 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
11197 IgnoreStringsWithoutSpecifiers);
11198 }
11199 }
11200
11201 return SLCT_NotALiteral;
11202 }
11203 case Stmt::ObjCStringLiteralClass:
11204 case Stmt::StringLiteralClass: {
11205 const StringLiteral *StrE = nullptr;
11206
11207 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(Val: E))
11208 StrE = ObjCFExpr->getString();
11209 else
11210 StrE = cast<StringLiteral>(Val: E);
11211
11212 if (StrE) {
11213 if (Offset.isNegative() || Offset > StrE->getLength()) {
11214 // TODO: It would be better to have an explicit warning for out of
11215 // bounds literals.
11216 return SLCT_NotALiteral;
11217 }
11218 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(width: 64).getSExtValue());
11219 CheckFormatString(S, FExpr: &FStr, OrigFormatExpr: E, Args, APK, format_idx, firstDataArg, Type,
11220 inFunctionCall: InFunctionCall, CallType, CheckedVarArgs, UncoveredArg,
11221 IgnoreStringsWithoutSpecifiers);
11222 return SLCT_CheckedLiteral;
11223 }
11224
11225 return SLCT_NotALiteral;
11226 }
11227 case Stmt::BinaryOperatorClass: {
11228 const BinaryOperator *BinOp = cast<BinaryOperator>(Val: E);
11229
11230 // A string literal + an int offset is still a string literal.
11231 if (BinOp->isAdditiveOp()) {
11232 Expr::EvalResult LResult, RResult;
11233
11234 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(
11235 Result&: LResult, Ctx: S.Context, AllowSideEffects: Expr::SE_NoSideEffects,
11236 InConstantContext: S.isConstantEvaluatedContext());
11237 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(
11238 Result&: RResult, Ctx: S.Context, AllowSideEffects: Expr::SE_NoSideEffects,
11239 InConstantContext: S.isConstantEvaluatedContext());
11240
11241 if (LIsInt != RIsInt) {
11242 BinaryOperatorKind BinOpKind = BinOp->getOpcode();
11243
11244 if (LIsInt) {
11245 if (BinOpKind == BO_Add) {
11246 sumOffsets(Offset, Addend: LResult.Val.getInt(), BinOpKind, AddendIsRight: RIsInt);
11247 E = BinOp->getRHS();
11248 goto tryAgain;
11249 }
11250 } else {
11251 sumOffsets(Offset, Addend: RResult.Val.getInt(), BinOpKind, AddendIsRight: RIsInt);
11252 E = BinOp->getLHS();
11253 goto tryAgain;
11254 }
11255 }
11256 }
11257
11258 return SLCT_NotALiteral;
11259 }
11260 case Stmt::UnaryOperatorClass: {
11261 const UnaryOperator *UnaOp = cast<UnaryOperator>(Val: E);
11262 auto ASE = dyn_cast<ArraySubscriptExpr>(Val: UnaOp->getSubExpr());
11263 if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
11264 Expr::EvalResult IndexResult;
11265 if (ASE->getRHS()->EvaluateAsInt(Result&: IndexResult, Ctx: S.Context,
11266 AllowSideEffects: Expr::SE_NoSideEffects,
11267 InConstantContext: S.isConstantEvaluatedContext())) {
11268 sumOffsets(Offset, Addend: IndexResult.Val.getInt(), BinOpKind: BO_Add,
11269 /*RHS is int*/ AddendIsRight: true);
11270 E = ASE->getBase();
11271 goto tryAgain;
11272 }
11273 }
11274
11275 return SLCT_NotALiteral;
11276 }
11277
11278 default:
11279 return SLCT_NotALiteral;
11280 }
11281}
11282
11283// If this expression can be evaluated at compile-time,
11284// check if the result is a StringLiteral and return it
11285// otherwise return nullptr
11286static const Expr *maybeConstEvalStringLiteral(ASTContext &Context,
11287 const Expr *E) {
11288 Expr::EvalResult Result;
11289 if (E->EvaluateAsRValue(Result, Ctx: Context) && Result.Val.isLValue()) {
11290 const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>();
11291 if (isa_and_nonnull<StringLiteral>(Val: LVE))
11292 return LVE;
11293 }
11294 return nullptr;
11295}
11296
11297Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
11298 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
11299 .Case(S: "scanf", Value: FST_Scanf)
11300 .Cases(S0: "printf", S1: "printf0", Value: FST_Printf)
11301 .Cases(S0: "NSString", S1: "CFString", Value: FST_NSString)
11302 .Case(S: "strftime", Value: FST_Strftime)
11303 .Case(S: "strfmon", Value: FST_Strfmon)
11304 .Cases(S0: "kprintf", S1: "cmn_err", S2: "vcmn_err", S3: "zcmn_err", Value: FST_Kprintf)
11305 .Case(S: "freebsd_kprintf", Value: FST_FreeBSDKPrintf)
11306 .Case(S: "os_trace", Value: FST_OSLog)
11307 .Case(S: "os_log", Value: FST_OSLog)
11308 .Default(Value: FST_Unknown);
11309}
11310
11311/// CheckFormatArguments - Check calls to printf and scanf (and similar
11312/// functions) for correct use of format strings.
11313/// Returns true if a format string has been fully checked.
11314bool Sema::CheckFormatArguments(const FormatAttr *Format,
11315 ArrayRef<const Expr *> Args, bool IsCXXMember,
11316 VariadicCallType CallType, SourceLocation Loc,
11317 SourceRange Range,
11318 llvm::SmallBitVector &CheckedVarArgs) {
11319 FormatStringInfo FSI;
11320 if (getFormatStringInfo(Format, IsCXXMember, IsVariadic: CallType != VariadicDoesNotApply,
11321 FSI: &FSI))
11322 return CheckFormatArguments(Args, FAPK: FSI.ArgPassingKind, format_idx: FSI.FormatIdx,
11323 firstDataArg: FSI.FirstDataArg, Type: GetFormatStringType(Format),
11324 CallType, Loc, range: Range, CheckedVarArgs);
11325 return false;
11326}
11327
11328bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
11329 Sema::FormatArgumentPassingKind APK,
11330 unsigned format_idx, unsigned firstDataArg,
11331 FormatStringType Type,
11332 VariadicCallType CallType, SourceLocation Loc,
11333 SourceRange Range,
11334 llvm::SmallBitVector &CheckedVarArgs) {
11335 // CHECK: printf/scanf-like function is called with no format string.
11336 if (format_idx >= Args.size()) {
11337 Diag(Loc, diag::warn_missing_format_string) << Range;
11338 return false;
11339 }
11340
11341 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
11342
11343 // CHECK: format string is not a string literal.
11344 //
11345 // Dynamically generated format strings are difficult to
11346 // automatically vet at compile time. Requiring that format strings
11347 // are string literals: (1) permits the checking of format strings by
11348 // the compiler and thereby (2) can practically remove the source of
11349 // many format string exploits.
11350
11351 // Format string can be either ObjC string (e.g. @"%d") or
11352 // C string (e.g. "%d")
11353 // ObjC string uses the same format specifiers as C string, so we can use
11354 // the same format string checking logic for both ObjC and C strings.
11355 UncoveredArgHandler UncoveredArg;
11356 StringLiteralCheckType CT = checkFormatStringExpr(
11357 S&: *this, E: OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type,
11358 CallType,
11359 /*IsFunctionCall*/ InFunctionCall: true, CheckedVarArgs, UncoveredArg,
11360 /*no string offset*/ Offset: llvm::APSInt(64, false) = 0);
11361
11362 // Generate a diagnostic where an uncovered argument is detected.
11363 if (UncoveredArg.hasUncoveredArg()) {
11364 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg;
11365 assert(ArgIdx < Args.size() && "ArgIdx outside bounds");
11366 UncoveredArg.Diagnose(S&: *this, /*IsFunctionCall*/true, ArgExpr: Args[ArgIdx]);
11367 }
11368
11369 if (CT != SLCT_NotALiteral)
11370 // Literal format string found, check done!
11371 return CT == SLCT_CheckedLiteral;
11372
11373 // Strftime is particular as it always uses a single 'time' argument,
11374 // so it is safe to pass a non-literal string.
11375 if (Type == FST_Strftime)
11376 return false;
11377
11378 // Do not emit diag when the string param is a macro expansion and the
11379 // format is either NSString or CFString. This is a hack to prevent
11380 // diag when using the NSLocalizedString and CFCopyLocalizedString macros
11381 // which are usually used in place of NS and CF string literals.
11382 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc();
11383 if (Type == FST_NSString && SourceMgr.isInSystemMacro(loc: FormatLoc))
11384 return false;
11385
11386 // If there are no arguments specified, warn with -Wformat-security, otherwise
11387 // warn only with -Wformat-nonliteral.
11388 if (Args.size() == firstDataArg) {
11389 Diag(FormatLoc, diag::warn_format_nonliteral_noargs)
11390 << OrigFormatExpr->getSourceRange();
11391 switch (Type) {
11392 default:
11393 break;
11394 case FST_Kprintf:
11395 case FST_FreeBSDKPrintf:
11396 case FST_Printf:
11397 Diag(FormatLoc, diag::note_format_security_fixit)
11398 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
11399 break;
11400 case FST_NSString:
11401 Diag(FormatLoc, diag::note_format_security_fixit)
11402 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", ");
11403 break;
11404 }
11405 } else {
11406 Diag(FormatLoc, diag::warn_format_nonliteral)
11407 << OrigFormatExpr->getSourceRange();
11408 }
11409 return false;
11410}
11411
11412namespace {
11413
11414class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
11415protected:
11416 Sema &S;
11417 const FormatStringLiteral *FExpr;
11418 const Expr *OrigFormatExpr;
11419 const Sema::FormatStringType FSType;
11420 const unsigned FirstDataArg;
11421 const unsigned NumDataArgs;
11422 const char *Beg; // Start of format string.
11423 const Sema::FormatArgumentPassingKind ArgPassingKind;
11424 ArrayRef<const Expr *> Args;
11425 unsigned FormatIdx;
11426 llvm::SmallBitVector CoveredArgs;
11427 bool usesPositionalArgs = false;
11428 bool atFirstArg = true;
11429 bool inFunctionCall;
11430 Sema::VariadicCallType CallType;
11431 llvm::SmallBitVector &CheckedVarArgs;
11432 UncoveredArgHandler &UncoveredArg;
11433
11434public:
11435 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
11436 const Expr *origFormatExpr,
11437 const Sema::FormatStringType type, unsigned firstDataArg,
11438 unsigned numDataArgs, const char *beg,
11439 Sema::FormatArgumentPassingKind APK,
11440 ArrayRef<const Expr *> Args, unsigned formatIdx,
11441 bool inFunctionCall, Sema::VariadicCallType callType,
11442 llvm::SmallBitVector &CheckedVarArgs,
11443 UncoveredArgHandler &UncoveredArg)
11444 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
11445 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
11446 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx),
11447 inFunctionCall(inFunctionCall), CallType(callType),
11448 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
11449 CoveredArgs.resize(N: numDataArgs);
11450 CoveredArgs.reset();
11451 }
11452
11453 void DoneProcessing();
11454
11455 void HandleIncompleteSpecifier(const char *startSpecifier,
11456 unsigned specifierLen) override;
11457
11458 void HandleInvalidLengthModifier(
11459 const analyze_format_string::FormatSpecifier &FS,
11460 const analyze_format_string::ConversionSpecifier &CS,
11461 const char *startSpecifier, unsigned specifierLen,
11462 unsigned DiagID);
11463
11464 void HandleNonStandardLengthModifier(
11465 const analyze_format_string::FormatSpecifier &FS,
11466 const char *startSpecifier, unsigned specifierLen);
11467
11468 void HandleNonStandardConversionSpecifier(
11469 const analyze_format_string::ConversionSpecifier &CS,
11470 const char *startSpecifier, unsigned specifierLen);
11471
11472 void HandlePosition(const char *startPos, unsigned posLen) override;
11473
11474 void HandleInvalidPosition(const char *startSpecifier,
11475 unsigned specifierLen,
11476 analyze_format_string::PositionContext p) override;
11477
11478 void HandleZeroPosition(const char *startPos, unsigned posLen) override;
11479
11480 void HandleNullChar(const char *nullCharacter) override;
11481
11482 template <typename Range>
11483 static void
11484 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
11485 const PartialDiagnostic &PDiag, SourceLocation StringLoc,
11486 bool IsStringLocation, Range StringRange,
11487 ArrayRef<FixItHint> Fixit = std::nullopt);
11488
11489protected:
11490 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
11491 const char *startSpec,
11492 unsigned specifierLen,
11493 const char *csStart, unsigned csLen);
11494
11495 void HandlePositionalNonpositionalArgs(SourceLocation Loc,
11496 const char *startSpec,
11497 unsigned specifierLen);
11498
11499 SourceRange getFormatStringRange();
11500 CharSourceRange getSpecifierRange(const char *startSpecifier,
11501 unsigned specifierLen);
11502 SourceLocation getLocationOfByte(const char *x);
11503
11504 const Expr *getDataArg(unsigned i) const;
11505
11506 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
11507 const analyze_format_string::ConversionSpecifier &CS,
11508 const char *startSpecifier, unsigned specifierLen,
11509 unsigned argIndex);
11510
11511 template <typename Range>
11512 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
11513 bool IsStringLocation, Range StringRange,
11514 ArrayRef<FixItHint> Fixit = std::nullopt);
11515};
11516
11517} // namespace
11518
11519SourceRange CheckFormatHandler::getFormatStringRange() {
11520 return OrigFormatExpr->getSourceRange();
11521}
11522
11523CharSourceRange CheckFormatHandler::
11524getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
11525 SourceLocation Start = getLocationOfByte(x: startSpecifier);
11526 SourceLocation End = getLocationOfByte(x: startSpecifier + specifierLen - 1);
11527
11528 // Advance the end SourceLocation by one due to half-open ranges.
11529 End = End.getLocWithOffset(Offset: 1);
11530
11531 return CharSourceRange::getCharRange(B: Start, E: End);
11532}
11533
11534SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
11535 return FExpr->getLocationOfByte(ByteNo: x - Beg, SM: S.getSourceManager(),
11536 Features: S.getLangOpts(), Target: S.Context.getTargetInfo());
11537}
11538
11539void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
11540 unsigned specifierLen){
11541 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
11542 getLocationOfByte(startSpecifier),
11543 /*IsStringLocation*/true,
11544 getSpecifierRange(startSpecifier, specifierLen));
11545}
11546
11547void CheckFormatHandler::HandleInvalidLengthModifier(
11548 const analyze_format_string::FormatSpecifier &FS,
11549 const analyze_format_string::ConversionSpecifier &CS,
11550 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
11551 using namespace analyze_format_string;
11552
11553 const LengthModifier &LM = FS.getLengthModifier();
11554 CharSourceRange LMRange = getSpecifierRange(startSpecifier: LM.getStart(), specifierLen: LM.getLength());
11555
11556 // See if we know how to fix this length modifier.
11557 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
11558 if (FixedLM) {
11559 EmitFormatDiagnostic(PDiag: S.PDiag(DiagID) << LM.toString() << CS.toString(),
11560 Loc: getLocationOfByte(x: LM.getStart()),
11561 /*IsStringLocation*/true,
11562 StringRange: getSpecifierRange(startSpecifier, specifierLen));
11563
11564 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
11565 << FixedLM->toString()
11566 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
11567
11568 } else {
11569 FixItHint Hint;
11570 if (DiagID == diag::warn_format_nonsensical_length)
11571 Hint = FixItHint::CreateRemoval(RemoveRange: LMRange);
11572
11573 EmitFormatDiagnostic(PDiag: S.PDiag(DiagID) << LM.toString() << CS.toString(),
11574 Loc: getLocationOfByte(x: LM.getStart()),
11575 /*IsStringLocation*/true,
11576 StringRange: getSpecifierRange(startSpecifier, specifierLen),
11577 FixIt: Hint);
11578 }
11579}
11580
11581void CheckFormatHandler::HandleNonStandardLengthModifier(
11582 const analyze_format_string::FormatSpecifier &FS,
11583 const char *startSpecifier, unsigned specifierLen) {
11584 using namespace analyze_format_string;
11585
11586 const LengthModifier &LM = FS.getLengthModifier();
11587 CharSourceRange LMRange = getSpecifierRange(startSpecifier: LM.getStart(), specifierLen: LM.getLength());
11588
11589 // See if we know how to fix this length modifier.
11590 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
11591 if (FixedLM) {
11592 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
11593 << LM.toString() << 0,
11594 getLocationOfByte(LM.getStart()),
11595 /*IsStringLocation*/true,
11596 getSpecifierRange(startSpecifier, specifierLen));
11597
11598 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
11599 << FixedLM->toString()
11600 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
11601
11602 } else {
11603 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
11604 << LM.toString() << 0,
11605 getLocationOfByte(LM.getStart()),
11606 /*IsStringLocation*/true,
11607 getSpecifierRange(startSpecifier, specifierLen));
11608 }
11609}
11610
11611void CheckFormatHandler::HandleNonStandardConversionSpecifier(
11612 const analyze_format_string::ConversionSpecifier &CS,
11613 const char *startSpecifier, unsigned specifierLen) {
11614 using namespace analyze_format_string;
11615
11616 // See if we know how to fix this conversion specifier.
11617 std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
11618 if (FixedCS) {
11619 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
11620 << CS.toString() << /*conversion specifier*/1,
11621 getLocationOfByte(CS.getStart()),
11622 /*IsStringLocation*/true,
11623 getSpecifierRange(startSpecifier, specifierLen));
11624
11625 CharSourceRange CSRange = getSpecifierRange(startSpecifier: CS.getStart(), specifierLen: CS.getLength());
11626 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
11627 << FixedCS->toString()
11628 << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
11629 } else {
11630 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
11631 << CS.toString() << /*conversion specifier*/1,
11632 getLocationOfByte(CS.getStart()),
11633 /*IsStringLocation*/true,
11634 getSpecifierRange(startSpecifier, specifierLen));
11635 }
11636}
11637
11638void CheckFormatHandler::HandlePosition(const char *startPos,
11639 unsigned posLen) {
11640 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
11641 getLocationOfByte(startPos),
11642 /*IsStringLocation*/true,
11643 getSpecifierRange(startPos, posLen));
11644}
11645
11646void CheckFormatHandler::HandleInvalidPosition(
11647 const char *startSpecifier, unsigned specifierLen,
11648 analyze_format_string::PositionContext p) {
11649 EmitFormatDiagnostic(
11650 S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned)p,
11651 getLocationOfByte(startSpecifier), /*IsStringLocation*/ true,
11652 getSpecifierRange(startSpecifier, specifierLen));
11653}
11654
11655void CheckFormatHandler::HandleZeroPosition(const char *startPos,
11656 unsigned posLen) {
11657 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
11658 getLocationOfByte(startPos),
11659 /*IsStringLocation*/true,
11660 getSpecifierRange(startPos, posLen));
11661}
11662
11663void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
11664 if (!isa<ObjCStringLiteral>(Val: OrigFormatExpr)) {
11665 // The presence of a null character is likely an error.
11666 EmitFormatDiagnostic(
11667 S.PDiag(diag::warn_printf_format_string_contains_null_char),
11668 getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
11669 getFormatStringRange());
11670 }
11671}
11672
11673// Note that this may return NULL if there was an error parsing or building
11674// one of the argument expressions.
11675const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
11676 return Args[FirstDataArg + i];
11677}
11678
11679void CheckFormatHandler::DoneProcessing() {
11680 // Does the number of data arguments exceed the number of
11681 // format conversions in the format string?
11682 if (ArgPassingKind != Sema::FAPK_VAList) {
11683 // Find any arguments that weren't covered.
11684 CoveredArgs.flip();
11685 signed notCoveredArg = CoveredArgs.find_first();
11686 if (notCoveredArg >= 0) {
11687 assert((unsigned)notCoveredArg < NumDataArgs);
11688 UncoveredArg.Update(NewFirstUncoveredArg: notCoveredArg, StrExpr: OrigFormatExpr);
11689 } else {
11690 UncoveredArg.setAllCovered();
11691 }
11692 }
11693}
11694
11695void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
11696 const Expr *ArgExpr) {
11697 assert(hasUncoveredArg() && !DiagnosticExprs.empty() &&
11698 "Invalid state");
11699
11700 if (!ArgExpr)
11701 return;
11702
11703 SourceLocation Loc = ArgExpr->getBeginLoc();
11704
11705 if (S.getSourceManager().isInSystemMacro(loc: Loc))
11706 return;
11707
11708 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used);
11709 for (auto E : DiagnosticExprs)
11710 PDiag << E->getSourceRange();
11711
11712 CheckFormatHandler::EmitFormatDiagnostic(
11713 S, IsFunctionCall, DiagnosticExprs[0],
11714 PDiag, Loc, /*IsStringLocation*/false,
11715 DiagnosticExprs[0]->getSourceRange());
11716}
11717
11718bool
11719CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
11720 SourceLocation Loc,
11721 const char *startSpec,
11722 unsigned specifierLen,
11723 const char *csStart,
11724 unsigned csLen) {
11725 bool keepGoing = true;
11726 if (argIndex < NumDataArgs) {
11727 // Consider the argument coverered, even though the specifier doesn't
11728 // make sense.
11729 CoveredArgs.set(argIndex);
11730 }
11731 else {
11732 // If argIndex exceeds the number of data arguments we
11733 // don't issue a warning because that is just a cascade of warnings (and
11734 // they may have intended '%%' anyway). We don't want to continue processing
11735 // the format string after this point, however, as we will like just get
11736 // gibberish when trying to match arguments.
11737 keepGoing = false;
11738 }
11739
11740 StringRef Specifier(csStart, csLen);
11741
11742 // If the specifier in non-printable, it could be the first byte of a UTF-8
11743 // sequence. In that case, print the UTF-8 code point. If not, print the byte
11744 // hex value.
11745 std::string CodePointStr;
11746 if (!llvm::sys::locale::isPrint(c: *csStart)) {
11747 llvm::UTF32 CodePoint;
11748 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart);
11749 const llvm::UTF8 *E =
11750 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen);
11751 llvm::ConversionResult Result =
11752 llvm::convertUTF8Sequence(source: B, sourceEnd: E, target: &CodePoint, flags: llvm::strictConversion);
11753
11754 if (Result != llvm::conversionOK) {
11755 unsigned char FirstChar = *csStart;
11756 CodePoint = (llvm::UTF32)FirstChar;
11757 }
11758
11759 llvm::raw_string_ostream OS(CodePointStr);
11760 if (CodePoint < 256)
11761 OS << "\\x" << llvm::format(Fmt: "%02x", Vals: CodePoint);
11762 else if (CodePoint <= 0xFFFF)
11763 OS << "\\u" << llvm::format(Fmt: "%04x", Vals: CodePoint);
11764 else
11765 OS << "\\U" << llvm::format(Fmt: "%08x", Vals: CodePoint);
11766 OS.flush();
11767 Specifier = CodePointStr;
11768 }
11769
11770 EmitFormatDiagnostic(
11771 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc,
11772 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen));
11773
11774 return keepGoing;
11775}
11776
11777void
11778CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
11779 const char *startSpec,
11780 unsigned specifierLen) {
11781 EmitFormatDiagnostic(
11782 S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
11783 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
11784}
11785
11786bool
11787CheckFormatHandler::CheckNumArgs(
11788 const analyze_format_string::FormatSpecifier &FS,
11789 const analyze_format_string::ConversionSpecifier &CS,
11790 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
11791
11792 if (argIndex >= NumDataArgs) {
11793 PartialDiagnostic PDiag = FS.usesPositionalArg()
11794 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
11795 << (argIndex+1) << NumDataArgs)
11796 : S.PDiag(diag::warn_printf_insufficient_data_args);
11797 EmitFormatDiagnostic(
11798 PDiag, Loc: getLocationOfByte(x: CS.getStart()), /*IsStringLocation*/true,
11799 StringRange: getSpecifierRange(startSpecifier, specifierLen));
11800
11801 // Since more arguments than conversion tokens are given, by extension
11802 // all arguments are covered, so mark this as so.
11803 UncoveredArg.setAllCovered();
11804 return false;
11805 }
11806 return true;
11807}
11808
11809template<typename Range>
11810void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
11811 SourceLocation Loc,
11812 bool IsStringLocation,
11813 Range StringRange,
11814 ArrayRef<FixItHint> FixIt) {
11815 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
11816 Loc, IsStringLocation, StringRange, FixIt);
11817}
11818
11819/// If the format string is not within the function call, emit a note
11820/// so that the function call and string are in diagnostic messages.
11821///
11822/// \param InFunctionCall if true, the format string is within the function
11823/// call and only one diagnostic message will be produced. Otherwise, an
11824/// extra note will be emitted pointing to location of the format string.
11825///
11826/// \param ArgumentExpr the expression that is passed as the format string
11827/// argument in the function call. Used for getting locations when two
11828/// diagnostics are emitted.
11829///
11830/// \param PDiag the callee should already have provided any strings for the
11831/// diagnostic message. This function only adds locations and fixits
11832/// to diagnostics.
11833///
11834/// \param Loc primary location for diagnostic. If two diagnostics are
11835/// required, one will be at Loc and a new SourceLocation will be created for
11836/// the other one.
11837///
11838/// \param IsStringLocation if true, Loc points to the format string should be
11839/// used for the note. Otherwise, Loc points to the argument list and will
11840/// be used with PDiag.
11841///
11842/// \param StringRange some or all of the string to highlight. This is
11843/// templated so it can accept either a CharSourceRange or a SourceRange.
11844///
11845/// \param FixIt optional fix it hint for the format string.
11846template <typename Range>
11847void CheckFormatHandler::EmitFormatDiagnostic(
11848 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr,
11849 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation,
11850 Range StringRange, ArrayRef<FixItHint> FixIt) {
11851 if (InFunctionCall) {
11852 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag);
11853 D << StringRange;
11854 D << FixIt;
11855 } else {
11856 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
11857 << ArgumentExpr->getSourceRange();
11858
11859 const Sema::SemaDiagnosticBuilder &Note =
11860 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
11861 diag::note_format_string_defined);
11862
11863 Note << StringRange;
11864 Note << FixIt;
11865 }
11866}
11867
11868//===--- CHECK: Printf format string checking -----------------------------===//
11869
11870namespace {
11871
11872class CheckPrintfHandler : public CheckFormatHandler {
11873public:
11874 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr,
11875 const Expr *origFormatExpr,
11876 const Sema::FormatStringType type, unsigned firstDataArg,
11877 unsigned numDataArgs, bool isObjC, const char *beg,
11878 Sema::FormatArgumentPassingKind APK,
11879 ArrayRef<const Expr *> Args, unsigned formatIdx,
11880 bool inFunctionCall, Sema::VariadicCallType CallType,
11881 llvm::SmallBitVector &CheckedVarArgs,
11882 UncoveredArgHandler &UncoveredArg)
11883 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
11884 numDataArgs, beg, APK, Args, formatIdx,
11885 inFunctionCall, CallType, CheckedVarArgs,
11886 UncoveredArg) {}
11887
11888 bool isObjCContext() const { return FSType == Sema::FST_NSString; }
11889
11890 /// Returns true if '%@' specifiers are allowed in the format string.
11891 bool allowsObjCArg() const {
11892 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog ||
11893 FSType == Sema::FST_OSTrace;
11894 }
11895
11896 bool HandleInvalidPrintfConversionSpecifier(
11897 const analyze_printf::PrintfSpecifier &FS,
11898 const char *startSpecifier,
11899 unsigned specifierLen) override;
11900
11901 void handleInvalidMaskType(StringRef MaskType) override;
11902
11903 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
11904 const char *startSpecifier, unsigned specifierLen,
11905 const TargetInfo &Target) override;
11906 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
11907 const char *StartSpecifier,
11908 unsigned SpecifierLen,
11909 const Expr *E);
11910
11911 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
11912 const char *startSpecifier, unsigned specifierLen);
11913 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
11914 const analyze_printf::OptionalAmount &Amt,
11915 unsigned type,
11916 const char *startSpecifier, unsigned specifierLen);
11917 void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
11918 const analyze_printf::OptionalFlag &flag,
11919 const char *startSpecifier, unsigned specifierLen);
11920 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
11921 const analyze_printf::OptionalFlag &ignoredFlag,
11922 const analyze_printf::OptionalFlag &flag,
11923 const char *startSpecifier, unsigned specifierLen);
11924 bool checkForCStrMembers(const analyze_printf::ArgType &AT,
11925 const Expr *E);
11926
11927 void HandleEmptyObjCModifierFlag(const char *startFlag,
11928 unsigned flagLen) override;
11929
11930 void HandleInvalidObjCModifierFlag(const char *startFlag,
11931 unsigned flagLen) override;
11932
11933 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart,
11934 const char *flagsEnd,
11935 const char *conversionPosition)
11936 override;
11937};
11938
11939} // namespace
11940
11941bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
11942 const analyze_printf::PrintfSpecifier &FS,
11943 const char *startSpecifier,
11944 unsigned specifierLen) {
11945 const analyze_printf::PrintfConversionSpecifier &CS =
11946 FS.getConversionSpecifier();
11947
11948 return HandleInvalidConversionSpecifier(argIndex: FS.getArgIndex(),
11949 Loc: getLocationOfByte(x: CS.getStart()),
11950 startSpec: startSpecifier, specifierLen,
11951 csStart: CS.getStart(), csLen: CS.getLength());
11952}
11953
11954void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
11955 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size);
11956}
11957
11958bool CheckPrintfHandler::HandleAmount(
11959 const analyze_format_string::OptionalAmount &Amt, unsigned k,
11960 const char *startSpecifier, unsigned specifierLen) {
11961 if (Amt.hasDataArgument()) {
11962 if (ArgPassingKind != Sema::FAPK_VAList) {
11963 unsigned argIndex = Amt.getArgIndex();
11964 if (argIndex >= NumDataArgs) {
11965 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
11966 << k,
11967 getLocationOfByte(Amt.getStart()),
11968 /*IsStringLocation*/ true,
11969 getSpecifierRange(startSpecifier, specifierLen));
11970 // Don't do any more checking. We will just emit
11971 // spurious errors.
11972 return false;
11973 }
11974
11975 // Type check the data argument. It should be an 'int'.
11976 // Although not in conformance with C99, we also allow the argument to be
11977 // an 'unsigned int' as that is a reasonably safe case. GCC also
11978 // doesn't emit a warning for that case.
11979 CoveredArgs.set(argIndex);
11980 const Expr *Arg = getDataArg(i: argIndex);
11981 if (!Arg)
11982 return false;
11983
11984 QualType T = Arg->getType();
11985
11986 const analyze_printf::ArgType &AT = Amt.getArgType(Ctx&: S.Context);
11987 assert(AT.isValid());
11988
11989 if (!AT.matchesType(C&: S.Context, argTy: T)) {
11990 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
11991 << k << AT.getRepresentativeTypeName(S.Context)
11992 << T << Arg->getSourceRange(),
11993 getLocationOfByte(Amt.getStart()),
11994 /*IsStringLocation*/true,
11995 getSpecifierRange(startSpecifier, specifierLen));
11996 // Don't do any more checking. We will just emit
11997 // spurious errors.
11998 return false;
11999 }
12000 }
12001 }
12002 return true;
12003}
12004
12005void CheckPrintfHandler::HandleInvalidAmount(
12006 const analyze_printf::PrintfSpecifier &FS,
12007 const analyze_printf::OptionalAmount &Amt,
12008 unsigned type,
12009 const char *startSpecifier,
12010 unsigned specifierLen) {
12011 const analyze_printf::PrintfConversionSpecifier &CS =
12012 FS.getConversionSpecifier();
12013
12014 FixItHint fixit =
12015 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
12016 ? FixItHint::CreateRemoval(RemoveRange: getSpecifierRange(startSpecifier: Amt.getStart(),
12017 specifierLen: Amt.getConstantLength()))
12018 : FixItHint();
12019
12020 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
12021 << type << CS.toString(),
12022 getLocationOfByte(Amt.getStart()),
12023 /*IsStringLocation*/true,
12024 getSpecifierRange(startSpecifier, specifierLen),
12025 fixit);
12026}
12027
12028void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
12029 const analyze_printf::OptionalFlag &flag,
12030 const char *startSpecifier,
12031 unsigned specifierLen) {
12032 // Warn about pointless flag with a fixit removal.
12033 const analyze_printf::PrintfConversionSpecifier &CS =
12034 FS.getConversionSpecifier();
12035 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
12036 << flag.toString() << CS.toString(),
12037 getLocationOfByte(flag.getPosition()),
12038 /*IsStringLocation*/true,
12039 getSpecifierRange(startSpecifier, specifierLen),
12040 FixItHint::CreateRemoval(
12041 getSpecifierRange(flag.getPosition(), 1)));
12042}
12043
12044void CheckPrintfHandler::HandleIgnoredFlag(
12045 const analyze_printf::PrintfSpecifier &FS,
12046 const analyze_printf::OptionalFlag &ignoredFlag,
12047 const analyze_printf::OptionalFlag &flag,
12048 const char *startSpecifier,
12049 unsigned specifierLen) {
12050 // Warn about ignored flag with a fixit removal.
12051 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
12052 << ignoredFlag.toString() << flag.toString(),
12053 getLocationOfByte(ignoredFlag.getPosition()),
12054 /*IsStringLocation*/true,
12055 getSpecifierRange(startSpecifier, specifierLen),
12056 FixItHint::CreateRemoval(
12057 getSpecifierRange(ignoredFlag.getPosition(), 1)));
12058}
12059
12060void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag,
12061 unsigned flagLen) {
12062 // Warn about an empty flag.
12063 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag),
12064 getLocationOfByte(startFlag),
12065 /*IsStringLocation*/true,
12066 getSpecifierRange(startFlag, flagLen));
12067}
12068
12069void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag,
12070 unsigned flagLen) {
12071 // Warn about an invalid flag.
12072 auto Range = getSpecifierRange(startSpecifier: startFlag, specifierLen: flagLen);
12073 StringRef flag(startFlag, flagLen);
12074 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag,
12075 getLocationOfByte(startFlag),
12076 /*IsStringLocation*/true,
12077 Range, FixItHint::CreateRemoval(Range));
12078}
12079
12080void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
12081 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) {
12082 // Warn about using '[...]' without a '@' conversion.
12083 auto Range = getSpecifierRange(startSpecifier: flagsStart, specifierLen: flagsEnd - flagsStart + 1);
12084 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion;
12085 EmitFormatDiagnostic(S.PDiag(DiagID: diag) << StringRef(conversionPosition, 1),
12086 getLocationOfByte(x: conversionPosition),
12087 /*IsStringLocation*/true,
12088 Range, FixItHint::CreateRemoval(RemoveRange: Range));
12089}
12090
12091// Determines if the specified is a C++ class or struct containing
12092// a member with the specified name and kind (e.g. a CXXMethodDecl named
12093// "c_str()").
12094template<typename MemberKind>
12095static llvm::SmallPtrSet<MemberKind*, 1>
12096CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
12097 const RecordType *RT = Ty->getAs<RecordType>();
12098 llvm::SmallPtrSet<MemberKind*, 1> Results;
12099
12100 if (!RT)
12101 return Results;
12102 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl());
12103 if (!RD || !RD->getDefinition())
12104 return Results;
12105
12106 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
12107 Sema::LookupMemberName);
12108 R.suppressDiagnostics();
12109
12110 // We just need to include all members of the right kind turned up by the
12111 // filter, at this point.
12112 if (S.LookupQualifiedName(R, RT->getDecl()))
12113 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
12114 NamedDecl *decl = (*I)->getUnderlyingDecl();
12115 if (MemberKind *FK = dyn_cast<MemberKind>(decl))
12116 Results.insert(FK);
12117 }
12118 return Results;
12119}
12120
12121/// Check if we could call '.c_str()' on an object.
12122///
12123/// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
12124/// allow the call, or if it would be ambiguous).
12125bool Sema::hasCStrMethod(const Expr *E) {
12126 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
12127
12128 MethodSet Results =
12129 CXXRecordMembersNamed<CXXMethodDecl>(Name: "c_str", S&: *this, Ty: E->getType());
12130 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
12131 MI != ME; ++MI)
12132 if ((*MI)->getMinRequiredArguments() == 0)
12133 return true;
12134 return false;
12135}
12136
12137// Check if a (w)string was passed when a (w)char* was needed, and offer a
12138// better diagnostic if so. AT is assumed to be valid.
12139// Returns true when a c_str() conversion method is found.
12140bool CheckPrintfHandler::checkForCStrMembers(
12141 const analyze_printf::ArgType &AT, const Expr *E) {
12142 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
12143
12144 MethodSet Results =
12145 CXXRecordMembersNamed<CXXMethodDecl>(Name: "c_str", S, Ty: E->getType());
12146
12147 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
12148 MI != ME; ++MI) {
12149 const CXXMethodDecl *Method = *MI;
12150 if (Method->getMinRequiredArguments() == 0 &&
12151 AT.matchesType(C&: S.Context, argTy: Method->getReturnType())) {
12152 // FIXME: Suggest parens if the expression needs them.
12153 SourceLocation EndLoc = S.getLocForEndOfToken(Loc: E->getEndLoc());
12154 S.Diag(E->getBeginLoc(), diag::note_printf_c_str)
12155 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()");
12156 return true;
12157 }
12158 }
12159
12160 return false;
12161}
12162
12163bool CheckPrintfHandler::HandlePrintfSpecifier(
12164 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier,
12165 unsigned specifierLen, const TargetInfo &Target) {
12166 using namespace analyze_format_string;
12167 using namespace analyze_printf;
12168
12169 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
12170
12171 if (FS.consumesDataArgument()) {
12172 if (atFirstArg) {
12173 atFirstArg = false;
12174 usesPositionalArgs = FS.usesPositionalArg();
12175 }
12176 else if (usesPositionalArgs != FS.usesPositionalArg()) {
12177 HandlePositionalNonpositionalArgs(Loc: getLocationOfByte(x: CS.getStart()),
12178 startSpec: startSpecifier, specifierLen);
12179 return false;
12180 }
12181 }
12182
12183 // First check if the field width, precision, and conversion specifier
12184 // have matching data arguments.
12185 if (!HandleAmount(Amt: FS.getFieldWidth(), /* field width */ k: 0,
12186 startSpecifier, specifierLen)) {
12187 return false;
12188 }
12189
12190 if (!HandleAmount(Amt: FS.getPrecision(), /* precision */ k: 1,
12191 startSpecifier, specifierLen)) {
12192 return false;
12193 }
12194
12195 if (!CS.consumesDataArgument()) {
12196 // FIXME: Technically specifying a precision or field width here
12197 // makes no sense. Worth issuing a warning at some point.
12198 return true;
12199 }
12200
12201 // Consume the argument.
12202 unsigned argIndex = FS.getArgIndex();
12203 if (argIndex < NumDataArgs) {
12204 // The check to see if the argIndex is valid will come later.
12205 // We set the bit here because we may exit early from this
12206 // function if we encounter some other error.
12207 CoveredArgs.set(argIndex);
12208 }
12209
12210 // FreeBSD kernel extensions.
12211 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
12212 CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
12213 // We need at least two arguments.
12214 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex: argIndex + 1))
12215 return false;
12216
12217 // Claim the second argument.
12218 CoveredArgs.set(argIndex + 1);
12219
12220 // Type check the first argument (int for %b, pointer for %D)
12221 const Expr *Ex = getDataArg(i: argIndex);
12222 const analyze_printf::ArgType &AT =
12223 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
12224 ArgType(S.Context.IntTy) : ArgType::CPointerTy;
12225 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
12226 EmitFormatDiagnostic(
12227 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
12228 << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
12229 << false << Ex->getSourceRange(),
12230 Ex->getBeginLoc(), /*IsStringLocation*/ false,
12231 getSpecifierRange(startSpecifier, specifierLen));
12232
12233 // Type check the second argument (char * for both %b and %D)
12234 Ex = getDataArg(i: argIndex + 1);
12235 const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
12236 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
12237 EmitFormatDiagnostic(
12238 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
12239 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
12240 << false << Ex->getSourceRange(),
12241 Ex->getBeginLoc(), /*IsStringLocation*/ false,
12242 getSpecifierRange(startSpecifier, specifierLen));
12243
12244 return true;
12245 }
12246
12247 // Check for using an Objective-C specific conversion specifier
12248 // in a non-ObjC literal.
12249 if (!allowsObjCArg() && CS.isObjCArg()) {
12250 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
12251 specifierLen);
12252 }
12253
12254 // %P can only be used with os_log.
12255 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) {
12256 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
12257 specifierLen);
12258 }
12259
12260 // %n is not allowed with os_log.
12261 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) {
12262 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg),
12263 getLocationOfByte(CS.getStart()),
12264 /*IsStringLocation*/ false,
12265 getSpecifierRange(startSpecifier, specifierLen));
12266
12267 return true;
12268 }
12269
12270 // Only scalars are allowed for os_trace.
12271 if (FSType == Sema::FST_OSTrace &&
12272 (CS.getKind() == ConversionSpecifier::PArg ||
12273 CS.getKind() == ConversionSpecifier::sArg ||
12274 CS.getKind() == ConversionSpecifier::ObjCObjArg)) {
12275 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
12276 specifierLen);
12277 }
12278
12279 // Check for use of public/private annotation outside of os_log().
12280 if (FSType != Sema::FST_OSLog) {
12281 if (FS.isPublic().isSet()) {
12282 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
12283 << "public",
12284 getLocationOfByte(FS.isPublic().getPosition()),
12285 /*IsStringLocation*/ false,
12286 getSpecifierRange(startSpecifier, specifierLen));
12287 }
12288 if (FS.isPrivate().isSet()) {
12289 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
12290 << "private",
12291 getLocationOfByte(FS.isPrivate().getPosition()),
12292 /*IsStringLocation*/ false,
12293 getSpecifierRange(startSpecifier, specifierLen));
12294 }
12295 }
12296
12297 const llvm::Triple &Triple = Target.getTriple();
12298 if (CS.getKind() == ConversionSpecifier::nArg &&
12299 (Triple.isAndroid() || Triple.isOSFuchsia())) {
12300 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported),
12301 getLocationOfByte(CS.getStart()),
12302 /*IsStringLocation*/ false,
12303 getSpecifierRange(startSpecifier, specifierLen));
12304 }
12305
12306 // Check for invalid use of field width
12307 if (!FS.hasValidFieldWidth()) {
12308 HandleInvalidAmount(FS, Amt: FS.getFieldWidth(), /* field width */ type: 0,
12309 startSpecifier, specifierLen);
12310 }
12311
12312 // Check for invalid use of precision
12313 if (!FS.hasValidPrecision()) {
12314 HandleInvalidAmount(FS, Amt: FS.getPrecision(), /* precision */ type: 1,
12315 startSpecifier, specifierLen);
12316 }
12317
12318 // Precision is mandatory for %P specifier.
12319 if (CS.getKind() == ConversionSpecifier::PArg &&
12320 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) {
12321 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision),
12322 getLocationOfByte(startSpecifier),
12323 /*IsStringLocation*/ false,
12324 getSpecifierRange(startSpecifier, specifierLen));
12325 }
12326
12327 // Check each flag does not conflict with any other component.
12328 if (!FS.hasValidThousandsGroupingPrefix())
12329 HandleFlag(FS, flag: FS.hasThousandsGrouping(), startSpecifier, specifierLen);
12330 if (!FS.hasValidLeadingZeros())
12331 HandleFlag(FS, flag: FS.hasLeadingZeros(), startSpecifier, specifierLen);
12332 if (!FS.hasValidPlusPrefix())
12333 HandleFlag(FS, flag: FS.hasPlusPrefix(), startSpecifier, specifierLen);
12334 if (!FS.hasValidSpacePrefix())
12335 HandleFlag(FS, flag: FS.hasSpacePrefix(), startSpecifier, specifierLen);
12336 if (!FS.hasValidAlternativeForm())
12337 HandleFlag(FS, flag: FS.hasAlternativeForm(), startSpecifier, specifierLen);
12338 if (!FS.hasValidLeftJustified())
12339 HandleFlag(FS, flag: FS.isLeftJustified(), startSpecifier, specifierLen);
12340
12341 // Check that flags are not ignored by another flag
12342 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
12343 HandleIgnoredFlag(FS, ignoredFlag: FS.hasSpacePrefix(), flag: FS.hasPlusPrefix(),
12344 startSpecifier, specifierLen);
12345 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
12346 HandleIgnoredFlag(FS, ignoredFlag: FS.hasLeadingZeros(), flag: FS.isLeftJustified(),
12347 startSpecifier, specifierLen);
12348
12349 // Check the length modifier is valid with the given conversion specifier.
12350 if (!FS.hasValidLengthModifier(Target: S.getASTContext().getTargetInfo(),
12351 LO: S.getLangOpts()))
12352 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
12353 diag::warn_format_nonsensical_length);
12354 else if (!FS.hasStandardLengthModifier())
12355 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
12356 else if (!FS.hasStandardLengthConversionCombination())
12357 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
12358 diag::warn_format_non_standard_conversion_spec);
12359
12360 if (!FS.hasStandardConversionSpecifier(LangOpt: S.getLangOpts()))
12361 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
12362
12363 // The remaining checks depend on the data arguments.
12364 if (ArgPassingKind == Sema::FAPK_VAList)
12365 return true;
12366
12367 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
12368 return false;
12369
12370 const Expr *Arg = getDataArg(i: argIndex);
12371 if (!Arg)
12372 return true;
12373
12374 return checkFormatExpr(FS, StartSpecifier: startSpecifier, SpecifierLen: specifierLen, E: Arg);
12375}
12376
12377static bool requiresParensToAddCast(const Expr *E) {
12378 // FIXME: We should have a general way to reason about operator
12379 // precedence and whether parens are actually needed here.
12380 // Take care of a few common cases where they aren't.
12381 const Expr *Inside = E->IgnoreImpCasts();
12382 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Val: Inside))
12383 Inside = POE->getSyntacticForm()->IgnoreImpCasts();
12384
12385 switch (Inside->getStmtClass()) {
12386 case Stmt::ArraySubscriptExprClass:
12387 case Stmt::CallExprClass:
12388 case Stmt::CharacterLiteralClass:
12389 case Stmt::CXXBoolLiteralExprClass:
12390 case Stmt::DeclRefExprClass:
12391 case Stmt::FloatingLiteralClass:
12392 case Stmt::IntegerLiteralClass:
12393 case Stmt::MemberExprClass:
12394 case Stmt::ObjCArrayLiteralClass:
12395 case Stmt::ObjCBoolLiteralExprClass:
12396 case Stmt::ObjCBoxedExprClass:
12397 case Stmt::ObjCDictionaryLiteralClass:
12398 case Stmt::ObjCEncodeExprClass:
12399 case Stmt::ObjCIvarRefExprClass:
12400 case Stmt::ObjCMessageExprClass:
12401 case Stmt::ObjCPropertyRefExprClass:
12402 case Stmt::ObjCStringLiteralClass:
12403 case Stmt::ObjCSubscriptRefExprClass:
12404 case Stmt::ParenExprClass:
12405 case Stmt::StringLiteralClass:
12406 case Stmt::UnaryOperatorClass:
12407 return false;
12408 default:
12409 return true;
12410 }
12411}
12412
12413static std::pair<QualType, StringRef>
12414shouldNotPrintDirectly(const ASTContext &Context,
12415 QualType IntendedTy,
12416 const Expr *E) {
12417 // Use a 'while' to peel off layers of typedefs.
12418 QualType TyTy = IntendedTy;
12419 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
12420 StringRef Name = UserTy->getDecl()->getName();
12421 QualType CastTy = llvm::StringSwitch<QualType>(Name)
12422 .Case(S: "CFIndex", Value: Context.getNSIntegerType())
12423 .Case(S: "NSInteger", Value: Context.getNSIntegerType())
12424 .Case(S: "NSUInteger", Value: Context.getNSUIntegerType())
12425 .Case(S: "SInt32", Value: Context.IntTy)
12426 .Case("UInt32", Context.UnsignedIntTy)
12427 .Default(QualType());
12428
12429 if (!CastTy.isNull())
12430 return std::make_pair(x&: CastTy, y&: Name);
12431
12432 TyTy = UserTy->desugar();
12433 }
12434
12435 // Strip parens if necessary.
12436 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
12437 return shouldNotPrintDirectly(Context,
12438 PE->getSubExpr()->getType(),
12439 PE->getSubExpr());
12440
12441 // If this is a conditional expression, then its result type is constructed
12442 // via usual arithmetic conversions and thus there might be no necessary
12443 // typedef sugar there. Recurse to operands to check for NSInteger &
12444 // Co. usage condition.
12445 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(Val: E)) {
12446 QualType TrueTy, FalseTy;
12447 StringRef TrueName, FalseName;
12448
12449 std::tie(TrueTy, TrueName) =
12450 shouldNotPrintDirectly(Context,
12451 CO->getTrueExpr()->getType(),
12452 CO->getTrueExpr());
12453 std::tie(FalseTy, FalseName) =
12454 shouldNotPrintDirectly(Context,
12455 CO->getFalseExpr()->getType(),
12456 CO->getFalseExpr());
12457
12458 if (TrueTy == FalseTy)
12459 return std::make_pair(x&: TrueTy, y&: TrueName);
12460 else if (TrueTy.isNull())
12461 return std::make_pair(x&: FalseTy, y&: FalseName);
12462 else if (FalseTy.isNull())
12463 return std::make_pair(x&: TrueTy, y&: TrueName);
12464 }
12465
12466 return std::make_pair(x: QualType(), y: StringRef());
12467}
12468
12469/// Return true if \p ICE is an implicit argument promotion of an arithmetic
12470/// type. Bit-field 'promotions' from a higher ranked type to a lower ranked
12471/// type do not count.
12472static bool
12473isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
12474 QualType From = ICE->getSubExpr()->getType();
12475 QualType To = ICE->getType();
12476 // It's an integer promotion if the destination type is the promoted
12477 // source type.
12478 if (ICE->getCastKind() == CK_IntegralCast &&
12479 S.Context.isPromotableIntegerType(T: From) &&
12480 S.Context.getPromotedIntegerType(PromotableType: From) == To)
12481 return true;
12482 // Look through vector types, since we do default argument promotion for
12483 // those in OpenCL.
12484 if (const auto *VecTy = From->getAs<ExtVectorType>())
12485 From = VecTy->getElementType();
12486 if (const auto *VecTy = To->getAs<ExtVectorType>())
12487 To = VecTy->getElementType();
12488 // It's a floating promotion if the source type is a lower rank.
12489 return ICE->getCastKind() == CK_FloatingCast &&
12490 S.Context.getFloatingTypeOrder(LHS: From, RHS: To) < 0;
12491}
12492
12493static analyze_format_string::ArgType::MatchKind
12494handleFormatSignedness(analyze_format_string::ArgType::MatchKind Match,
12495 DiagnosticsEngine &Diags, SourceLocation Loc) {
12496 if (Match == analyze_format_string::ArgType::NoMatchSignedness) {
12497 Match =
12498 Diags.isIgnored(
12499 diag::warn_format_conversion_argument_type_mismatch_signedness, Loc)
12500 ? analyze_format_string::ArgType::Match
12501 : analyze_format_string::ArgType::NoMatch;
12502 }
12503 return Match;
12504}
12505
12506bool
12507CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
12508 const char *StartSpecifier,
12509 unsigned SpecifierLen,
12510 const Expr *E) {
12511 using namespace analyze_format_string;
12512 using namespace analyze_printf;
12513
12514 // Now type check the data expression that matches the
12515 // format specifier.
12516 const analyze_printf::ArgType &AT = FS.getArgType(Ctx&: S.Context, IsObjCLiteral: isObjCContext());
12517 if (!AT.isValid())
12518 return true;
12519
12520 QualType ExprTy = E->getType();
12521 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(Val&: ExprTy)) {
12522 ExprTy = TET->getUnderlyingExpr()->getType();
12523 }
12524
12525 // When using the format attribute in C++, you can receive a function or an
12526 // array that will necessarily decay to a pointer when passed to the final
12527 // format consumer. Apply decay before type comparison.
12528 if (ExprTy->canDecayToPointerType())
12529 ExprTy = S.Context.getDecayedType(T: ExprTy);
12530
12531 // Diagnose attempts to print a boolean value as a character. Unlike other
12532 // -Wformat diagnostics, this is fine from a type perspective, but it still
12533 // doesn't make sense.
12534 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg &&
12535 E->isKnownToHaveBooleanValue()) {
12536 const CharSourceRange &CSR =
12537 getSpecifierRange(startSpecifier: StartSpecifier, specifierLen: SpecifierLen);
12538 SmallString<4> FSString;
12539 llvm::raw_svector_ostream os(FSString);
12540 FS.toString(os);
12541 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character)
12542 << FSString,
12543 E->getExprLoc(), false, CSR);
12544 return true;
12545 }
12546
12547 ArgType::MatchKind ImplicitMatch = ArgType::NoMatch;
12548 ArgType::MatchKind Match = AT.matchesType(C&: S.Context, argTy: ExprTy);
12549 ArgType::MatchKind OrigMatch = Match;
12550
12551 Match = handleFormatSignedness(Match, Diags&: S.getDiagnostics(), Loc: E->getExprLoc());
12552 if (Match == ArgType::Match)
12553 return true;
12554
12555 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr
12556 assert(Match != ArgType::NoMatchPromotionTypeConfusion);
12557
12558 // Look through argument promotions for our error message's reported type.
12559 // This includes the integral and floating promotions, but excludes array
12560 // and function pointer decay (seeing that an argument intended to be a
12561 // string has type 'char [6]' is probably more confusing than 'char *') and
12562 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type).
12563 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: E)) {
12564 if (isArithmeticArgumentPromotion(S, ICE)) {
12565 E = ICE->getSubExpr();
12566 ExprTy = E->getType();
12567
12568 // Check if we didn't match because of an implicit cast from a 'char'
12569 // or 'short' to an 'int'. This is done because printf is a varargs
12570 // function.
12571 if (ICE->getType() == S.Context.IntTy ||
12572 ICE->getType() == S.Context.UnsignedIntTy) {
12573 // All further checking is done on the subexpression
12574 ImplicitMatch = AT.matchesType(C&: S.Context, argTy: ExprTy);
12575 if (OrigMatch == ArgType::NoMatchSignedness &&
12576 ImplicitMatch != ArgType::NoMatchSignedness)
12577 // If the original match was a signedness match this match on the
12578 // implicit cast type also need to be signedness match otherwise we
12579 // might introduce new unexpected warnings from -Wformat-signedness.
12580 return true;
12581 ImplicitMatch = handleFormatSignedness(
12582 Match: ImplicitMatch, Diags&: S.getDiagnostics(), Loc: E->getExprLoc());
12583 if (ImplicitMatch == ArgType::Match)
12584 return true;
12585 }
12586 }
12587 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(Val: E)) {
12588 // Special case for 'a', which has type 'int' in C.
12589 // Note, however, that we do /not/ want to treat multibyte constants like
12590 // 'MooV' as characters! This form is deprecated but still exists. In
12591 // addition, don't treat expressions as of type 'char' if one byte length
12592 // modifier is provided.
12593 if (ExprTy == S.Context.IntTy &&
12594 FS.getLengthModifier().getKind() != LengthModifier::AsChar)
12595 if (llvm::isUIntN(N: S.Context.getCharWidth(), x: CL->getValue())) {
12596 ExprTy = S.Context.CharTy;
12597 // To improve check results, we consider a character literal in C
12598 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is
12599 // more likely a type confusion situation, so we will suggest to
12600 // use '%hhd' instead by discarding the MatchPromotion.
12601 if (Match == ArgType::MatchPromotion)
12602 Match = ArgType::NoMatch;
12603 }
12604 }
12605 if (Match == ArgType::MatchPromotion) {
12606 // WG14 N2562 only clarified promotions in *printf
12607 // For NSLog in ObjC, just preserve -Wformat behavior
12608 if (!S.getLangOpts().ObjC &&
12609 ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion &&
12610 ImplicitMatch != ArgType::NoMatchTypeConfusion)
12611 return true;
12612 Match = ArgType::NoMatch;
12613 }
12614 if (ImplicitMatch == ArgType::NoMatchPedantic ||
12615 ImplicitMatch == ArgType::NoMatchTypeConfusion)
12616 Match = ImplicitMatch;
12617 assert(Match != ArgType::MatchPromotion);
12618
12619 // Look through unscoped enums to their underlying type.
12620 bool IsEnum = false;
12621 bool IsScopedEnum = false;
12622 QualType IntendedTy = ExprTy;
12623 if (auto EnumTy = ExprTy->getAs<EnumType>()) {
12624 IntendedTy = EnumTy->getDecl()->getIntegerType();
12625 if (EnumTy->isUnscopedEnumerationType()) {
12626 ExprTy = IntendedTy;
12627 // This controls whether we're talking about the underlying type or not,
12628 // which we only want to do when it's an unscoped enum.
12629 IsEnum = true;
12630 } else {
12631 IsScopedEnum = true;
12632 }
12633 }
12634
12635 // %C in an Objective-C context prints a unichar, not a wchar_t.
12636 // If the argument is an integer of some kind, believe the %C and suggest
12637 // a cast instead of changing the conversion specifier.
12638 if (isObjCContext() &&
12639 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
12640 if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
12641 !ExprTy->isCharType()) {
12642 // 'unichar' is defined as a typedef of unsigned short, but we should
12643 // prefer using the typedef if it is visible.
12644 IntendedTy = S.Context.UnsignedShortTy;
12645
12646 // While we are here, check if the value is an IntegerLiteral that happens
12647 // to be within the valid range.
12648 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Val: E)) {
12649 const llvm::APInt &V = IL->getValue();
12650 if (V.getActiveBits() <= S.Context.getTypeSize(T: IntendedTy))
12651 return true;
12652 }
12653
12654 LookupResult Result(S, &S.Context.Idents.get(Name: "unichar"), E->getBeginLoc(),
12655 Sema::LookupOrdinaryName);
12656 if (S.LookupName(R&: Result, S: S.getCurScope())) {
12657 NamedDecl *ND = Result.getFoundDecl();
12658 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(Val: ND))
12659 if (TD->getUnderlyingType() == IntendedTy)
12660 IntendedTy = S.Context.getTypedefType(Decl: TD);
12661 }
12662 }
12663 }
12664
12665 // Special-case some of Darwin's platform-independence types by suggesting
12666 // casts to primitive types that are known to be large enough.
12667 bool ShouldNotPrintDirectly = false; StringRef CastTyName;
12668 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
12669 QualType CastTy;
12670 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
12671 if (!CastTy.isNull()) {
12672 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
12673 // (long in ASTContext). Only complain to pedants or when they're the
12674 // underlying type of a scoped enum (which always needs a cast).
12675 if (!IsScopedEnum &&
12676 (CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
12677 (AT.isSizeT() || AT.isPtrdiffT()) &&
12678 AT.matchesType(C&: S.Context, argTy: CastTy))
12679 Match = ArgType::NoMatchPedantic;
12680 IntendedTy = CastTy;
12681 ShouldNotPrintDirectly = true;
12682 }
12683 }
12684
12685 // We may be able to offer a FixItHint if it is a supported type.
12686 PrintfSpecifier fixedFS = FS;
12687 bool Success =
12688 fixedFS.fixType(QT: IntendedTy, LangOpt: S.getLangOpts(), Ctx&: S.Context, IsObjCLiteral: isObjCContext());
12689
12690 if (Success) {
12691 // Get the fix string from the fixed format specifier
12692 SmallString<16> buf;
12693 llvm::raw_svector_ostream os(buf);
12694 fixedFS.toString(os);
12695
12696 CharSourceRange SpecRange = getSpecifierRange(startSpecifier: StartSpecifier, specifierLen: SpecifierLen);
12697
12698 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly && !IsScopedEnum) {
12699 unsigned Diag;
12700 switch (Match) {
12701 case ArgType::Match:
12702 case ArgType::MatchPromotion:
12703 case ArgType::NoMatchPromotionTypeConfusion:
12704 case ArgType::NoMatchSignedness:
12705 llvm_unreachable("expected non-matching");
12706 case ArgType::NoMatchPedantic:
12707 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
12708 break;
12709 case ArgType::NoMatchTypeConfusion:
12710 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion;
12711 break;
12712 case ArgType::NoMatch:
12713 Diag = diag::warn_format_conversion_argument_type_mismatch;
12714 break;
12715 }
12716
12717 // In this case, the specifier is wrong and should be changed to match
12718 // the argument.
12719 EmitFormatDiagnostic(S.PDiag(DiagID: Diag)
12720 << AT.getRepresentativeTypeName(C&: S.Context)
12721 << IntendedTy << IsEnum << E->getSourceRange(),
12722 E->getBeginLoc(),
12723 /*IsStringLocation*/ false, SpecRange,
12724 FixItHint::CreateReplacement(RemoveRange: SpecRange, Code: os.str()));
12725 } else {
12726 // The canonical type for formatting this value is different from the
12727 // actual type of the expression. (This occurs, for example, with Darwin's
12728 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
12729 // should be printed as 'long' for 64-bit compatibility.)
12730 // Rather than emitting a normal format/argument mismatch, we want to
12731 // add a cast to the recommended type (and correct the format string
12732 // if necessary). We should also do so for scoped enumerations.
12733 SmallString<16> CastBuf;
12734 llvm::raw_svector_ostream CastFix(CastBuf);
12735 CastFix << (S.LangOpts.CPlusPlus ? "static_cast<" : "(");
12736 IntendedTy.print(OS&: CastFix, Policy: S.Context.getPrintingPolicy());
12737 CastFix << (S.LangOpts.CPlusPlus ? ">" : ")");
12738
12739 SmallVector<FixItHint,4> Hints;
12740 ArgType::MatchKind IntendedMatch = AT.matchesType(C&: S.Context, argTy: IntendedTy);
12741 IntendedMatch = handleFormatSignedness(Match: IntendedMatch, Diags&: S.getDiagnostics(),
12742 Loc: E->getExprLoc());
12743 if ((IntendedMatch != ArgType::Match) || ShouldNotPrintDirectly)
12744 Hints.push_back(Elt: FixItHint::CreateReplacement(RemoveRange: SpecRange, Code: os.str()));
12745
12746 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(Val: E)) {
12747 // If there's already a cast present, just replace it.
12748 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
12749 Hints.push_back(Elt: FixItHint::CreateReplacement(RemoveRange: CastRange, Code: CastFix.str()));
12750
12751 } else if (!requiresParensToAddCast(E) && !S.LangOpts.CPlusPlus) {
12752 // If the expression has high enough precedence,
12753 // just write the C-style cast.
12754 Hints.push_back(
12755 FixItHint::CreateInsertion(InsertionLoc: E->getBeginLoc(), Code: CastFix.str()));
12756 } else {
12757 // Otherwise, add parens around the expression as well as the cast.
12758 CastFix << "(";
12759 Hints.push_back(
12760 FixItHint::CreateInsertion(InsertionLoc: E->getBeginLoc(), Code: CastFix.str()));
12761
12762 // We don't use getLocForEndOfToken because it returns invalid source
12763 // locations for macro expansions (by design).
12764 SourceLocation EndLoc = S.SourceMgr.getSpellingLoc(Loc: E->getEndLoc());
12765 SourceLocation After = EndLoc.getLocWithOffset(
12766 Offset: Lexer::MeasureTokenLength(Loc: EndLoc, SM: S.SourceMgr, LangOpts: S.LangOpts));
12767 Hints.push_back(Elt: FixItHint::CreateInsertion(InsertionLoc: After, Code: ")"));
12768 }
12769
12770 if (ShouldNotPrintDirectly && !IsScopedEnum) {
12771 // The expression has a type that should not be printed directly.
12772 // We extract the name from the typedef because we don't want to show
12773 // the underlying type in the diagnostic.
12774 StringRef Name;
12775 if (const auto *TypedefTy = ExprTy->getAs<TypedefType>())
12776 Name = TypedefTy->getDecl()->getName();
12777 else
12778 Name = CastTyName;
12779 unsigned Diag = Match == ArgType::NoMatchPedantic
12780 ? diag::warn_format_argument_needs_cast_pedantic
12781 : diag::warn_format_argument_needs_cast;
12782 EmitFormatDiagnostic(S.PDiag(DiagID: Diag) << Name << IntendedTy << IsEnum
12783 << E->getSourceRange(),
12784 E->getBeginLoc(), /*IsStringLocation=*/false,
12785 SpecRange, Hints);
12786 } else {
12787 // In this case, the expression could be printed using a different
12788 // specifier, but we've decided that the specifier is probably correct
12789 // and we should cast instead. Just use the normal warning message.
12790
12791 unsigned Diag =
12792 IsScopedEnum
12793 ? diag::warn_format_conversion_argument_type_mismatch_pedantic
12794 : diag::warn_format_conversion_argument_type_mismatch;
12795
12796 EmitFormatDiagnostic(
12797 S.PDiag(DiagID: Diag) << AT.getRepresentativeTypeName(C&: S.Context) << ExprTy
12798 << IsEnum << E->getSourceRange(),
12799 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints);
12800 }
12801 }
12802 } else {
12803 const CharSourceRange &CSR = getSpecifierRange(startSpecifier: StartSpecifier,
12804 specifierLen: SpecifierLen);
12805 // Since the warning for passing non-POD types to variadic functions
12806 // was deferred until now, we emit a warning for non-POD
12807 // arguments here.
12808 bool EmitTypeMismatch = false;
12809 switch (S.isValidVarArgType(Ty: ExprTy)) {
12810 case Sema::VAK_Valid:
12811 case Sema::VAK_ValidInCXX11: {
12812 unsigned Diag;
12813 switch (Match) {
12814 case ArgType::Match:
12815 case ArgType::MatchPromotion:
12816 case ArgType::NoMatchPromotionTypeConfusion:
12817 case ArgType::NoMatchSignedness:
12818 llvm_unreachable("expected non-matching");
12819 case ArgType::NoMatchPedantic:
12820 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
12821 break;
12822 case ArgType::NoMatchTypeConfusion:
12823 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion;
12824 break;
12825 case ArgType::NoMatch:
12826 Diag = diag::warn_format_conversion_argument_type_mismatch;
12827 break;
12828 }
12829
12830 EmitFormatDiagnostic(
12831 S.PDiag(DiagID: Diag) << AT.getRepresentativeTypeName(C&: S.Context) << ExprTy
12832 << IsEnum << CSR << E->getSourceRange(),
12833 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12834 break;
12835 }
12836 case Sema::VAK_Undefined:
12837 case Sema::VAK_MSVCUndefined:
12838 if (CallType == Sema::VariadicDoesNotApply) {
12839 EmitTypeMismatch = true;
12840 } else {
12841 EmitFormatDiagnostic(
12842 S.PDiag(diag::warn_non_pod_vararg_with_format_string)
12843 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
12844 << AT.getRepresentativeTypeName(S.Context) << CSR
12845 << E->getSourceRange(),
12846 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12847 checkForCStrMembers(AT, E);
12848 }
12849 break;
12850
12851 case Sema::VAK_Invalid:
12852 if (CallType == Sema::VariadicDoesNotApply)
12853 EmitTypeMismatch = true;
12854 else if (ExprTy->isObjCObjectType())
12855 EmitFormatDiagnostic(
12856 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
12857 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
12858 << AT.getRepresentativeTypeName(S.Context) << CSR
12859 << E->getSourceRange(),
12860 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12861 else
12862 // FIXME: If this is an initializer list, suggest removing the braces
12863 // or inserting a cast to the target type.
12864 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format)
12865 << isa<InitListExpr>(E) << ExprTy << CallType
12866 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange();
12867 break;
12868 }
12869
12870 if (EmitTypeMismatch) {
12871 // The function is not variadic, so we do not generate warnings about
12872 // being allowed to pass that object as a variadic argument. Instead,
12873 // since there are inherently no printf specifiers for types which cannot
12874 // be passed as variadic arguments, emit a plain old specifier mismatch
12875 // argument.
12876 EmitFormatDiagnostic(
12877 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
12878 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false
12879 << E->getSourceRange(),
12880 E->getBeginLoc(), false, CSR);
12881 }
12882
12883 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&
12884 "format string specifier index out of range");
12885 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
12886 }
12887
12888 return true;
12889}
12890
12891//===--- CHECK: Scanf format string checking ------------------------------===//
12892
12893namespace {
12894
12895class CheckScanfHandler : public CheckFormatHandler {
12896public:
12897 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
12898 const Expr *origFormatExpr, Sema::FormatStringType type,
12899 unsigned firstDataArg, unsigned numDataArgs,
12900 const char *beg, Sema::FormatArgumentPassingKind APK,
12901 ArrayRef<const Expr *> Args, unsigned formatIdx,
12902 bool inFunctionCall, Sema::VariadicCallType CallType,
12903 llvm::SmallBitVector &CheckedVarArgs,
12904 UncoveredArgHandler &UncoveredArg)
12905 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
12906 numDataArgs, beg, APK, Args, formatIdx,
12907 inFunctionCall, CallType, CheckedVarArgs,
12908 UncoveredArg) {}
12909
12910 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
12911 const char *startSpecifier,
12912 unsigned specifierLen) override;
12913
12914 bool HandleInvalidScanfConversionSpecifier(
12915 const analyze_scanf::ScanfSpecifier &FS,
12916 const char *startSpecifier,
12917 unsigned specifierLen) override;
12918
12919 void HandleIncompleteScanList(const char *start, const char *end) override;
12920};
12921
12922} // namespace
12923
12924void CheckScanfHandler::HandleIncompleteScanList(const char *start,
12925 const char *end) {
12926 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
12927 getLocationOfByte(end), /*IsStringLocation*/true,
12928 getSpecifierRange(start, end - start));
12929}
12930
12931bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
12932 const analyze_scanf::ScanfSpecifier &FS,
12933 const char *startSpecifier,
12934 unsigned specifierLen) {
12935 const analyze_scanf::ScanfConversionSpecifier &CS =
12936 FS.getConversionSpecifier();
12937
12938 return HandleInvalidConversionSpecifier(argIndex: FS.getArgIndex(),
12939 Loc: getLocationOfByte(x: CS.getStart()),
12940 startSpec: startSpecifier, specifierLen,
12941 csStart: CS.getStart(), csLen: CS.getLength());
12942}
12943
12944bool CheckScanfHandler::HandleScanfSpecifier(
12945 const analyze_scanf::ScanfSpecifier &FS,
12946 const char *startSpecifier,
12947 unsigned specifierLen) {
12948 using namespace analyze_scanf;
12949 using namespace analyze_format_string;
12950
12951 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
12952
12953 // Handle case where '%' and '*' don't consume an argument. These shouldn't
12954 // be used to decide if we are using positional arguments consistently.
12955 if (FS.consumesDataArgument()) {
12956 if (atFirstArg) {
12957 atFirstArg = false;
12958 usesPositionalArgs = FS.usesPositionalArg();
12959 }
12960 else if (usesPositionalArgs != FS.usesPositionalArg()) {
12961 HandlePositionalNonpositionalArgs(Loc: getLocationOfByte(x: CS.getStart()),
12962 startSpec: startSpecifier, specifierLen);
12963 return false;
12964 }
12965 }
12966
12967 // Check if the field with is non-zero.
12968 const OptionalAmount &Amt = FS.getFieldWidth();
12969 if (Amt.getHowSpecified() == OptionalAmount::Constant) {
12970 if (Amt.getConstantAmount() == 0) {
12971 const CharSourceRange &R = getSpecifierRange(startSpecifier: Amt.getStart(),
12972 specifierLen: Amt.getConstantLength());
12973 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
12974 getLocationOfByte(Amt.getStart()),
12975 /*IsStringLocation*/true, R,
12976 FixItHint::CreateRemoval(R));
12977 }
12978 }
12979
12980 if (!FS.consumesDataArgument()) {
12981 // FIXME: Technically specifying a precision or field width here
12982 // makes no sense. Worth issuing a warning at some point.
12983 return true;
12984 }
12985
12986 // Consume the argument.
12987 unsigned argIndex = FS.getArgIndex();
12988 if (argIndex < NumDataArgs) {
12989 // The check to see if the argIndex is valid will come later.
12990 // We set the bit here because we may exit early from this
12991 // function if we encounter some other error.
12992 CoveredArgs.set(argIndex);
12993 }
12994
12995 // Check the length modifier is valid with the given conversion specifier.
12996 if (!FS.hasValidLengthModifier(Target: S.getASTContext().getTargetInfo(),
12997 LO: S.getLangOpts()))
12998 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
12999 diag::warn_format_nonsensical_length);
13000 else if (!FS.hasStandardLengthModifier())
13001 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
13002 else if (!FS.hasStandardLengthConversionCombination())
13003 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
13004 diag::warn_format_non_standard_conversion_spec);
13005
13006 if (!FS.hasStandardConversionSpecifier(LangOpt: S.getLangOpts()))
13007 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
13008
13009 // The remaining checks depend on the data arguments.
13010 if (ArgPassingKind == Sema::FAPK_VAList)
13011 return true;
13012
13013 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
13014 return false;
13015
13016 // Check that the argument type matches the format specifier.
13017 const Expr *Ex = getDataArg(i: argIndex);
13018 if (!Ex)
13019 return true;
13020
13021 const analyze_format_string::ArgType &AT = FS.getArgType(Ctx&: S.Context);
13022
13023 if (!AT.isValid()) {
13024 return true;
13025 }
13026
13027 analyze_format_string::ArgType::MatchKind Match =
13028 AT.matchesType(C&: S.Context, argTy: Ex->getType());
13029 Match = handleFormatSignedness(Match, Diags&: S.getDiagnostics(), Loc: Ex->getExprLoc());
13030 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
13031 if (Match == analyze_format_string::ArgType::Match)
13032 return true;
13033
13034 ScanfSpecifier fixedFS = FS;
13035 bool Success = fixedFS.fixType(QT: Ex->getType(), RawQT: Ex->IgnoreImpCasts()->getType(),
13036 LangOpt: S.getLangOpts(), Ctx&: S.Context);
13037
13038 unsigned Diag =
13039 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
13040 : diag::warn_format_conversion_argument_type_mismatch;
13041
13042 if (Success) {
13043 // Get the fix string from the fixed format specifier.
13044 SmallString<128> buf;
13045 llvm::raw_svector_ostream os(buf);
13046 fixedFS.toString(os);
13047
13048 EmitFormatDiagnostic(
13049 S.PDiag(DiagID: Diag) << AT.getRepresentativeTypeName(C&: S.Context)
13050 << Ex->getType() << false << Ex->getSourceRange(),
13051 Ex->getBeginLoc(),
13052 /*IsStringLocation*/ false,
13053 getSpecifierRange(startSpecifier, specifierLen),
13054 FixItHint::CreateReplacement(
13055 RemoveRange: getSpecifierRange(startSpecifier, specifierLen), Code: os.str()));
13056 } else {
13057 EmitFormatDiagnostic(S.PDiag(DiagID: Diag)
13058 << AT.getRepresentativeTypeName(C&: S.Context)
13059 << Ex->getType() << false << Ex->getSourceRange(),
13060 Ex->getBeginLoc(),
13061 /*IsStringLocation*/ false,
13062 getSpecifierRange(startSpecifier, specifierLen));
13063 }
13064
13065 return true;
13066}
13067
13068static void CheckFormatString(
13069 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr,
13070 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK,
13071 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type,
13072 bool inFunctionCall, Sema::VariadicCallType CallType,
13073 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg,
13074 bool IgnoreStringsWithoutSpecifiers) {
13075 // CHECK: is the format string a wide literal?
13076 if (!FExpr->isAscii() && !FExpr->isUTF8()) {
13077 CheckFormatHandler::EmitFormatDiagnostic(
13078 S, inFunctionCall, Args[format_idx],
13079 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(),
13080 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
13081 return;
13082 }
13083
13084 // Str - The format string. NOTE: this is NOT null-terminated!
13085 StringRef StrRef = FExpr->getString();
13086 const char *Str = StrRef.data();
13087 // Account for cases where the string literal is truncated in a declaration.
13088 const ConstantArrayType *T =
13089 S.Context.getAsConstantArrayType(T: FExpr->getType());
13090 assert(T && "String literal not of constant array type!");
13091 size_t TypeSize = T->getZExtSize();
13092 size_t StrLen = std::min(a: std::max(a: TypeSize, b: size_t(1)) - 1, b: StrRef.size());
13093 const unsigned numDataArgs = Args.size() - firstDataArg;
13094
13095 if (IgnoreStringsWithoutSpecifiers &&
13096 !analyze_format_string::parseFormatStringHasFormattingSpecifiers(
13097 Begin: Str, End: Str + StrLen, LO: S.getLangOpts(), Target: S.Context.getTargetInfo()))
13098 return;
13099
13100 // Emit a warning if the string literal is truncated and does not contain an
13101 // embedded null character.
13102 if (TypeSize <= StrRef.size() && !StrRef.substr(Start: 0, N: TypeSize).contains(C: '\0')) {
13103 CheckFormatHandler::EmitFormatDiagnostic(
13104 S, inFunctionCall, Args[format_idx],
13105 S.PDiag(diag::warn_printf_format_string_not_null_terminated),
13106 FExpr->getBeginLoc(),
13107 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
13108 return;
13109 }
13110
13111 // CHECK: empty format string?
13112 if (StrLen == 0 && numDataArgs > 0) {
13113 CheckFormatHandler::EmitFormatDiagnostic(
13114 S, inFunctionCall, Args[format_idx],
13115 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(),
13116 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
13117 return;
13118 }
13119
13120 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
13121 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog ||
13122 Type == Sema::FST_OSTrace) {
13123 CheckPrintfHandler H(
13124 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
13125 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK,
13126 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs,
13127 UncoveredArg);
13128
13129 if (!analyze_format_string::ParsePrintfString(
13130 H, beg: Str, end: Str + StrLen, LO: S.getLangOpts(), Target: S.Context.getTargetInfo(),
13131 isFreeBSDKPrintf: Type == Sema::FST_FreeBSDKPrintf))
13132 H.DoneProcessing();
13133 } else if (Type == Sema::FST_Scanf) {
13134 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
13135 numDataArgs, Str, APK, Args, format_idx, inFunctionCall,
13136 CallType, CheckedVarArgs, UncoveredArg);
13137
13138 if (!analyze_format_string::ParseScanfString(
13139 H, beg: Str, end: Str + StrLen, LO: S.getLangOpts(), Target: S.Context.getTargetInfo()))
13140 H.DoneProcessing();
13141 } // TODO: handle other formats
13142}
13143
13144bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
13145 // Str - The format string. NOTE: this is NOT null-terminated!
13146 StringRef StrRef = FExpr->getString();
13147 const char *Str = StrRef.data();
13148 // Account for cases where the string literal is truncated in a declaration.
13149 const ConstantArrayType *T = Context.getAsConstantArrayType(T: FExpr->getType());
13150 assert(T && "String literal not of constant array type!");
13151 size_t TypeSize = T->getZExtSize();
13152 size_t StrLen = std::min(a: std::max(a: TypeSize, b: size_t(1)) - 1, b: StrRef.size());
13153 return analyze_format_string::ParseFormatStringHasSArg(beg: Str, end: Str + StrLen,
13154 LO: getLangOpts(),
13155 Target: Context.getTargetInfo());
13156}
13157
13158//===--- CHECK: Warn on use of wrong absolute value function. -------------===//
13159
13160// Returns the related absolute value function that is larger, of 0 if one
13161// does not exist.
13162static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
13163 switch (AbsFunction) {
13164 default:
13165 return 0;
13166
13167 case Builtin::BI__builtin_abs:
13168 return Builtin::BI__builtin_labs;
13169 case Builtin::BI__builtin_labs:
13170 return Builtin::BI__builtin_llabs;
13171 case Builtin::BI__builtin_llabs:
13172 return 0;
13173
13174 case Builtin::BI__builtin_fabsf:
13175 return Builtin::BI__builtin_fabs;
13176 case Builtin::BI__builtin_fabs:
13177 return Builtin::BI__builtin_fabsl;
13178 case Builtin::BI__builtin_fabsl:
13179 return 0;
13180
13181 case Builtin::BI__builtin_cabsf:
13182 return Builtin::BI__builtin_cabs;
13183 case Builtin::BI__builtin_cabs:
13184 return Builtin::BI__builtin_cabsl;
13185 case Builtin::BI__builtin_cabsl:
13186 return 0;
13187
13188 case Builtin::BIabs:
13189 return Builtin::BIlabs;
13190 case Builtin::BIlabs:
13191 return Builtin::BIllabs;
13192 case Builtin::BIllabs:
13193 return 0;
13194
13195 case Builtin::BIfabsf:
13196 return Builtin::BIfabs;
13197 case Builtin::BIfabs:
13198 return Builtin::BIfabsl;
13199 case Builtin::BIfabsl:
13200 return 0;
13201
13202 case Builtin::BIcabsf:
13203 return Builtin::BIcabs;
13204 case Builtin::BIcabs:
13205 return Builtin::BIcabsl;
13206 case Builtin::BIcabsl:
13207 return 0;
13208 }
13209}
13210
13211// Returns the argument type of the absolute value function.
13212static QualType getAbsoluteValueArgumentType(ASTContext &Context,
13213 unsigned AbsType) {
13214 if (AbsType == 0)
13215 return QualType();
13216
13217 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
13218 QualType BuiltinType = Context.GetBuiltinType(ID: AbsType, Error);
13219 if (Error != ASTContext::GE_None)
13220 return QualType();
13221
13222 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
13223 if (!FT)
13224 return QualType();
13225
13226 if (FT->getNumParams() != 1)
13227 return QualType();
13228
13229 return FT->getParamType(i: 0);
13230}
13231
13232// Returns the best absolute value function, or zero, based on type and
13233// current absolute value function.
13234static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
13235 unsigned AbsFunctionKind) {
13236 unsigned BestKind = 0;
13237 uint64_t ArgSize = Context.getTypeSize(T: ArgType);
13238 for (unsigned Kind = AbsFunctionKind; Kind != 0;
13239 Kind = getLargerAbsoluteValueFunction(AbsFunction: Kind)) {
13240 QualType ParamType = getAbsoluteValueArgumentType(Context, AbsType: Kind);
13241 if (Context.getTypeSize(T: ParamType) >= ArgSize) {
13242 if (BestKind == 0)
13243 BestKind = Kind;
13244 else if (Context.hasSameType(T1: ParamType, T2: ArgType)) {
13245 BestKind = Kind;
13246 break;
13247 }
13248 }
13249 }
13250 return BestKind;
13251}
13252
13253enum AbsoluteValueKind {
13254 AVK_Integer,
13255 AVK_Floating,
13256 AVK_Complex
13257};
13258
13259static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
13260 if (T->isIntegralOrEnumerationType())
13261 return AVK_Integer;
13262 if (T->isRealFloatingType())
13263 return AVK_Floating;
13264 if (T->isAnyComplexType())
13265 return AVK_Complex;
13266
13267 llvm_unreachable("Type not integer, floating, or complex");
13268}
13269
13270// Changes the absolute value function to a different type. Preserves whether
13271// the function is a builtin.
13272static unsigned changeAbsFunction(unsigned AbsKind,
13273 AbsoluteValueKind ValueKind) {
13274 switch (ValueKind) {
13275 case AVK_Integer:
13276 switch (AbsKind) {
13277 default:
13278 return 0;
13279 case Builtin::BI__builtin_fabsf:
13280 case Builtin::BI__builtin_fabs:
13281 case Builtin::BI__builtin_fabsl:
13282 case Builtin::BI__builtin_cabsf:
13283 case Builtin::BI__builtin_cabs:
13284 case Builtin::BI__builtin_cabsl:
13285 return Builtin::BI__builtin_abs;
13286 case Builtin::BIfabsf:
13287 case Builtin::BIfabs:
13288 case Builtin::BIfabsl:
13289 case Builtin::BIcabsf:
13290 case Builtin::BIcabs:
13291 case Builtin::BIcabsl:
13292 return Builtin::BIabs;
13293 }
13294 case AVK_Floating:
13295 switch (AbsKind) {
13296 default:
13297 return 0;
13298 case Builtin::BI__builtin_abs:
13299 case Builtin::BI__builtin_labs:
13300 case Builtin::BI__builtin_llabs:
13301 case Builtin::BI__builtin_cabsf:
13302 case Builtin::BI__builtin_cabs:
13303 case Builtin::BI__builtin_cabsl:
13304 return Builtin::BI__builtin_fabsf;
13305 case Builtin::BIabs:
13306 case Builtin::BIlabs:
13307 case Builtin::BIllabs:
13308 case Builtin::BIcabsf:
13309 case Builtin::BIcabs:
13310 case Builtin::BIcabsl:
13311 return Builtin::BIfabsf;
13312 }
13313 case AVK_Complex:
13314 switch (AbsKind) {
13315 default:
13316 return 0;
13317 case Builtin::BI__builtin_abs:
13318 case Builtin::BI__builtin_labs:
13319 case Builtin::BI__builtin_llabs:
13320 case Builtin::BI__builtin_fabsf:
13321 case Builtin::BI__builtin_fabs:
13322 case Builtin::BI__builtin_fabsl:
13323 return Builtin::BI__builtin_cabsf;
13324 case Builtin::BIabs:
13325 case Builtin::BIlabs:
13326 case Builtin::BIllabs:
13327 case Builtin::BIfabsf:
13328 case Builtin::BIfabs:
13329 case Builtin::BIfabsl:
13330 return Builtin::BIcabsf;
13331 }
13332 }
13333 llvm_unreachable("Unable to convert function");
13334}
13335
13336static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
13337 const IdentifierInfo *FnInfo = FDecl->getIdentifier();
13338 if (!FnInfo)
13339 return 0;
13340
13341 switch (FDecl->getBuiltinID()) {
13342 default:
13343 return 0;
13344 case Builtin::BI__builtin_abs:
13345 case Builtin::BI__builtin_fabs:
13346 case Builtin::BI__builtin_fabsf:
13347 case Builtin::BI__builtin_fabsl:
13348 case Builtin::BI__builtin_labs:
13349 case Builtin::BI__builtin_llabs:
13350 case Builtin::BI__builtin_cabs:
13351 case Builtin::BI__builtin_cabsf:
13352 case Builtin::BI__builtin_cabsl:
13353 case Builtin::BIabs:
13354 case Builtin::BIlabs:
13355 case Builtin::BIllabs:
13356 case Builtin::BIfabs:
13357 case Builtin::BIfabsf:
13358 case Builtin::BIfabsl:
13359 case Builtin::BIcabs:
13360 case Builtin::BIcabsf:
13361 case Builtin::BIcabsl:
13362 return FDecl->getBuiltinID();
13363 }
13364 llvm_unreachable("Unknown Builtin type");
13365}
13366
13367// If the replacement is valid, emit a note with replacement function.
13368// Additionally, suggest including the proper header if not already included.
13369static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
13370 unsigned AbsKind, QualType ArgType) {
13371 bool EmitHeaderHint = true;
13372 const char *HeaderName = nullptr;
13373 StringRef FunctionName;
13374 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
13375 FunctionName = "std::abs";
13376 if (ArgType->isIntegralOrEnumerationType()) {
13377 HeaderName = "cstdlib";
13378 } else if (ArgType->isRealFloatingType()) {
13379 HeaderName = "cmath";
13380 } else {
13381 llvm_unreachable("Invalid Type");
13382 }
13383
13384 // Lookup all std::abs
13385 if (NamespaceDecl *Std = S.getStdNamespace()) {
13386 LookupResult R(S, &S.Context.Idents.get(Name: "abs"), Loc, Sema::LookupAnyName);
13387 R.suppressDiagnostics();
13388 S.LookupQualifiedName(R, Std);
13389
13390 for (const auto *I : R) {
13391 const FunctionDecl *FDecl = nullptr;
13392 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(Val: I)) {
13393 FDecl = dyn_cast<FunctionDecl>(Val: UsingD->getTargetDecl());
13394 } else {
13395 FDecl = dyn_cast<FunctionDecl>(Val: I);
13396 }
13397 if (!FDecl)
13398 continue;
13399
13400 // Found std::abs(), check that they are the right ones.
13401 if (FDecl->getNumParams() != 1)
13402 continue;
13403
13404 // Check that the parameter type can handle the argument.
13405 QualType ParamType = FDecl->getParamDecl(i: 0)->getType();
13406 if (getAbsoluteValueKind(T: ArgType) == getAbsoluteValueKind(T: ParamType) &&
13407 S.Context.getTypeSize(T: ArgType) <=
13408 S.Context.getTypeSize(T: ParamType)) {
13409 // Found a function, don't need the header hint.
13410 EmitHeaderHint = false;
13411 break;
13412 }
13413 }
13414 }
13415 } else {
13416 FunctionName = S.Context.BuiltinInfo.getName(ID: AbsKind);
13417 HeaderName = S.Context.BuiltinInfo.getHeaderName(ID: AbsKind);
13418
13419 if (HeaderName) {
13420 DeclarationName DN(&S.Context.Idents.get(Name: FunctionName));
13421 LookupResult R(S, DN, Loc, Sema::LookupAnyName);
13422 R.suppressDiagnostics();
13423 S.LookupName(R, S: S.getCurScope());
13424
13425 if (R.isSingleResult()) {
13426 FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: R.getFoundDecl());
13427 if (FD && FD->getBuiltinID() == AbsKind) {
13428 EmitHeaderHint = false;
13429 } else {
13430 return;
13431 }
13432 } else if (!R.empty()) {
13433 return;
13434 }
13435 }
13436 }
13437
13438 S.Diag(Loc, diag::note_replace_abs_function)
13439 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName);
13440
13441 if (!HeaderName)
13442 return;
13443
13444 if (!EmitHeaderHint)
13445 return;
13446
13447 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName
13448 << FunctionName;
13449}
13450
13451template <std::size_t StrLen>
13452static bool IsStdFunction(const FunctionDecl *FDecl,
13453 const char (&Str)[StrLen]) {
13454 if (!FDecl)
13455 return false;
13456 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str))
13457 return false;
13458 if (!FDecl->isInStdNamespace())
13459 return false;
13460
13461 return true;
13462}
13463
13464void Sema::CheckInfNaNFunction(const CallExpr *Call,
13465 const FunctionDecl *FDecl) {
13466 FPOptions FPO = Call->getFPFeaturesInEffect(LO: getLangOpts());
13467 if ((IsStdFunction(FDecl, "isnan") || IsStdFunction(FDecl, "isunordered") ||
13468 (Call->getBuiltinCallee() == Builtin::BI__builtin_nanf)) &&
13469 FPO.getNoHonorNaNs())
13470 Diag(Call->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
13471 << 1 << 0 << Call->getSourceRange();
13472 else if ((IsStdFunction(FDecl, "isinf") ||
13473 (IsStdFunction(FDecl, "isfinite") ||
13474 (FDecl->getIdentifier() && FDecl->getName() == "infinity"))) &&
13475 FPO.getNoHonorInfs())
13476 Diag(Call->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
13477 << 0 << 0 << Call->getSourceRange();
13478}
13479
13480// Warn when using the wrong abs() function.
13481void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
13482 const FunctionDecl *FDecl) {
13483 if (Call->getNumArgs() != 1)
13484 return;
13485
13486 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
13487 bool IsStdAbs = IsStdFunction(FDecl, Str: "abs");
13488 if (AbsKind == 0 && !IsStdAbs)
13489 return;
13490
13491 QualType ArgType = Call->getArg(Arg: 0)->IgnoreParenImpCasts()->getType();
13492 QualType ParamType = Call->getArg(Arg: 0)->getType();
13493
13494 // Unsigned types cannot be negative. Suggest removing the absolute value
13495 // function call.
13496 if (ArgType->isUnsignedIntegerType()) {
13497 StringRef FunctionName =
13498 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(ID: AbsKind);
13499 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
13500 Diag(Call->getExprLoc(), diag::note_remove_abs)
13501 << FunctionName
13502 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
13503 return;
13504 }
13505
13506 // Taking the absolute value of a pointer is very suspicious, they probably
13507 // wanted to index into an array, dereference a pointer, call a function, etc.
13508 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
13509 unsigned DiagType = 0;
13510 if (ArgType->isFunctionType())
13511 DiagType = 1;
13512 else if (ArgType->isArrayType())
13513 DiagType = 2;
13514
13515 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
13516 return;
13517 }
13518
13519 // std::abs has overloads which prevent most of the absolute value problems
13520 // from occurring.
13521 if (IsStdAbs)
13522 return;
13523
13524 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(T: ArgType);
13525 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(T: ParamType);
13526
13527 // The argument and parameter are the same kind. Check if they are the right
13528 // size.
13529 if (ArgValueKind == ParamValueKind) {
13530 if (Context.getTypeSize(T: ArgType) <= Context.getTypeSize(T: ParamType))
13531 return;
13532
13533 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsFunctionKind: AbsKind);
13534 Diag(Call->getExprLoc(), diag::warn_abs_too_small)
13535 << FDecl << ArgType << ParamType;
13536
13537 if (NewAbsKind == 0)
13538 return;
13539
13540 emitReplacement(*this, Call->getExprLoc(),
13541 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
13542 return;
13543 }
13544
13545 // ArgValueKind != ParamValueKind
13546 // The wrong type of absolute value function was used. Attempt to find the
13547 // proper one.
13548 unsigned NewAbsKind = changeAbsFunction(AbsKind, ValueKind: ArgValueKind);
13549 NewAbsKind = getBestAbsFunction(Context, ArgType, AbsFunctionKind: NewAbsKind);
13550 if (NewAbsKind == 0)
13551 return;
13552
13553 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
13554 << FDecl << ParamValueKind << ArgValueKind;
13555
13556 emitReplacement(*this, Call->getExprLoc(),
13557 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
13558}
13559
13560//===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
13561void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
13562 const FunctionDecl *FDecl) {
13563 if (!Call || !FDecl) return;
13564
13565 // Ignore template specializations and macros.
13566 if (inTemplateInstantiation()) return;
13567 if (Call->getExprLoc().isMacroID()) return;
13568
13569 // Only care about the one template argument, two function parameter std::max
13570 if (Call->getNumArgs() != 2) return;
13571 if (!IsStdFunction(FDecl, Str: "max")) return;
13572 const auto * ArgList = FDecl->getTemplateSpecializationArgs();
13573 if (!ArgList) return;
13574 if (ArgList->size() != 1) return;
13575
13576 // Check that template type argument is unsigned integer.
13577 const auto& TA = ArgList->get(Idx: 0);
13578 if (TA.getKind() != TemplateArgument::Type) return;
13579 QualType ArgType = TA.getAsType();
13580 if (!ArgType->isUnsignedIntegerType()) return;
13581
13582 // See if either argument is a literal zero.
13583 auto IsLiteralZeroArg = [](const Expr* E) -> bool {
13584 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Val: E);
13585 if (!MTE) return false;
13586 const auto *Num = dyn_cast<IntegerLiteral>(Val: MTE->getSubExpr());
13587 if (!Num) return false;
13588 if (Num->getValue() != 0) return false;
13589 return true;
13590 };
13591
13592 const Expr *FirstArg = Call->getArg(Arg: 0);
13593 const Expr *SecondArg = Call->getArg(Arg: 1);
13594 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg);
13595 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg);
13596
13597 // Only warn when exactly one argument is zero.
13598 if (IsFirstArgZero == IsSecondArgZero) return;
13599
13600 SourceRange FirstRange = FirstArg->getSourceRange();
13601 SourceRange SecondRange = SecondArg->getSourceRange();
13602
13603 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange;
13604
13605 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero)
13606 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange;
13607
13608 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
13609 SourceRange RemovalRange;
13610 if (IsFirstArgZero) {
13611 RemovalRange = SourceRange(FirstRange.getBegin(),
13612 SecondRange.getBegin().getLocWithOffset(Offset: -1));
13613 } else {
13614 RemovalRange = SourceRange(getLocForEndOfToken(Loc: FirstRange.getEnd()),
13615 SecondRange.getEnd());
13616 }
13617
13618 Diag(Call->getExprLoc(), diag::note_remove_max_call)
13619 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange())
13620 << FixItHint::CreateRemoval(RemovalRange);
13621}
13622
13623//===--- CHECK: Standard memory functions ---------------------------------===//
13624
13625/// Takes the expression passed to the size_t parameter of functions
13626/// such as memcmp, strncat, etc and warns if it's a comparison.
13627///
13628/// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
13629static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
13630 IdentifierInfo *FnName,
13631 SourceLocation FnLoc,
13632 SourceLocation RParenLoc) {
13633 const BinaryOperator *Size = dyn_cast<BinaryOperator>(Val: E);
13634 if (!Size)
13635 return false;
13636
13637 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||:
13638 if (!Size->isComparisonOp() && !Size->isLogicalOp())
13639 return false;
13640
13641 SourceRange SizeRange = Size->getSourceRange();
13642 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
13643 << SizeRange << FnName;
13644 S.Diag(FnLoc, diag::note_memsize_comparison_paren)
13645 << FnName
13646 << FixItHint::CreateInsertion(
13647 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")")
13648 << FixItHint::CreateRemoval(RParenLoc);
13649 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
13650 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
13651 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()),
13652 ")");
13653
13654 return true;
13655}
13656
13657/// Determine whether the given type is or contains a dynamic class type
13658/// (e.g., whether it has a vtable).
13659static const CXXRecordDecl *getContainedDynamicClass(QualType T,
13660 bool &IsContained) {
13661 // Look through array types while ignoring qualifiers.
13662 const Type *Ty = T->getBaseElementTypeUnsafe();
13663 IsContained = false;
13664
13665 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
13666 RD = RD ? RD->getDefinition() : nullptr;
13667 if (!RD || RD->isInvalidDecl())
13668 return nullptr;
13669
13670 if (RD->isDynamicClass())
13671 return RD;
13672
13673 // Check all the fields. If any bases were dynamic, the class is dynamic.
13674 // It's impossible for a class to transitively contain itself by value, so
13675 // infinite recursion is impossible.
13676 for (auto *FD : RD->fields()) {
13677 bool SubContained;
13678 if (const CXXRecordDecl *ContainedRD =
13679 getContainedDynamicClass(FD->getType(), SubContained)) {
13680 IsContained = true;
13681 return ContainedRD;
13682 }
13683 }
13684
13685 return nullptr;
13686}
13687
13688static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) {
13689 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(Val: E))
13690 if (Unary->getKind() == UETT_SizeOf)
13691 return Unary;
13692 return nullptr;
13693}
13694
13695/// If E is a sizeof expression, returns its argument expression,
13696/// otherwise returns NULL.
13697static const Expr *getSizeOfExprArg(const Expr *E) {
13698 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
13699 if (!SizeOf->isArgumentType())
13700 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
13701 return nullptr;
13702}
13703
13704/// If E is a sizeof expression, returns its argument type.
13705static QualType getSizeOfArgType(const Expr *E) {
13706 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
13707 return SizeOf->getTypeOfArgument();
13708 return QualType();
13709}
13710
13711namespace {
13712
13713struct SearchNonTrivialToInitializeField
13714 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> {
13715 using Super =
13716 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>;
13717
13718 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {}
13719
13720 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
13721 SourceLocation SL) {
13722 if (const auto *AT = asDerived().getContext().getAsArrayType(T: FT)) {
13723 asDerived().visitArray(PDIK, AT, SL);
13724 return;
13725 }
13726
13727 Super::visitWithKind(PDIK, FT, Args&: SL);
13728 }
13729
13730 void visitARCStrong(QualType FT, SourceLocation SL) {
13731 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
13732 }
13733 void visitARCWeak(QualType FT, SourceLocation SL) {
13734 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
13735 }
13736 void visitStruct(QualType FT, SourceLocation SL) {
13737 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
13738 visit(FD->getType(), FD->getLocation());
13739 }
13740 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
13741 const ArrayType *AT, SourceLocation SL) {
13742 visit(FT: getContext().getBaseElementType(VAT: AT), Args&: SL);
13743 }
13744 void visitTrivial(QualType FT, SourceLocation SL) {}
13745
13746 static void diag(QualType RT, const Expr *E, Sema &S) {
13747 SearchNonTrivialToInitializeField(E, S).visitStruct(FT: RT, SL: SourceLocation());
13748 }
13749
13750 ASTContext &getContext() { return S.getASTContext(); }
13751
13752 const Expr *E;
13753 Sema &S;
13754};
13755
13756struct SearchNonTrivialToCopyField
13757 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> {
13758 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>;
13759
13760 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {}
13761
13762 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
13763 SourceLocation SL) {
13764 if (const auto *AT = asDerived().getContext().getAsArrayType(T: FT)) {
13765 asDerived().visitArray(PCK, AT, SL);
13766 return;
13767 }
13768
13769 Super::visitWithKind(PCK, FT, Args&: SL);
13770 }
13771
13772 void visitARCStrong(QualType FT, SourceLocation SL) {
13773 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
13774 }
13775 void visitARCWeak(QualType FT, SourceLocation SL) {
13776 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
13777 }
13778 void visitStruct(QualType FT, SourceLocation SL) {
13779 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
13780 visit(FD->getType(), FD->getLocation());
13781 }
13782 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
13783 SourceLocation SL) {
13784 visit(FT: getContext().getBaseElementType(VAT: AT), Args&: SL);
13785 }
13786 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
13787 SourceLocation SL) {}
13788 void visitTrivial(QualType FT, SourceLocation SL) {}
13789 void visitVolatileTrivial(QualType FT, SourceLocation SL) {}
13790
13791 static void diag(QualType RT, const Expr *E, Sema &S) {
13792 SearchNonTrivialToCopyField(E, S).visitStruct(FT: RT, SL: SourceLocation());
13793 }
13794
13795 ASTContext &getContext() { return S.getASTContext(); }
13796
13797 const Expr *E;
13798 Sema &S;
13799};
13800
13801}
13802
13803/// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
13804static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) {
13805 SizeofExpr = SizeofExpr->IgnoreParenImpCasts();
13806
13807 if (const auto *BO = dyn_cast<BinaryOperator>(Val: SizeofExpr)) {
13808 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add)
13809 return false;
13810
13811 return doesExprLikelyComputeSize(SizeofExpr: BO->getLHS()) ||
13812 doesExprLikelyComputeSize(SizeofExpr: BO->getRHS());
13813 }
13814
13815 return getAsSizeOfExpr(E: SizeofExpr) != nullptr;
13816}
13817
13818/// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
13819///
13820/// \code
13821/// #define MACRO 0
13822/// foo(MACRO);
13823/// foo(0);
13824/// \endcode
13825///
13826/// This should return true for the first call to foo, but not for the second
13827/// (regardless of whether foo is a macro or function).
13828static bool isArgumentExpandedFromMacro(SourceManager &SM,
13829 SourceLocation CallLoc,
13830 SourceLocation ArgLoc) {
13831 if (!CallLoc.isMacroID())
13832 return SM.getFileID(SpellingLoc: CallLoc) != SM.getFileID(SpellingLoc: ArgLoc);
13833
13834 return SM.getFileID(SpellingLoc: SM.getImmediateMacroCallerLoc(Loc: CallLoc)) !=
13835 SM.getFileID(SpellingLoc: SM.getImmediateMacroCallerLoc(Loc: ArgLoc));
13836}
13837
13838/// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
13839/// last two arguments transposed.
13840static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
13841 if (BId != Builtin::BImemset && BId != Builtin::BIbzero)
13842 return;
13843
13844 const Expr *SizeArg =
13845 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
13846
13847 auto isLiteralZero = [](const Expr *E) {
13848 return (isa<IntegerLiteral>(E) &&
13849 cast<IntegerLiteral>(E)->getValue() == 0) ||
13850 (isa<CharacterLiteral>(E) &&
13851 cast<CharacterLiteral>(E)->getValue() == 0);
13852 };
13853
13854 // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
13855 SourceLocation CallLoc = Call->getRParenLoc();
13856 SourceManager &SM = S.getSourceManager();
13857 if (isLiteralZero(SizeArg) &&
13858 !isArgumentExpandedFromMacro(SM, CallLoc, ArgLoc: SizeArg->getExprLoc())) {
13859
13860 SourceLocation DiagLoc = SizeArg->getExprLoc();
13861
13862 // Some platforms #define bzero to __builtin_memset. See if this is the
13863 // case, and if so, emit a better diagnostic.
13864 if (BId == Builtin::BIbzero ||
13865 (CallLoc.isMacroID() && Lexer::getImmediateMacroName(
13866 CallLoc, SM, S.getLangOpts()) == "bzero")) {
13867 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size);
13868 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence);
13869 } else if (!isLiteralZero(Call->getArg(Arg: 1)->IgnoreImpCasts())) {
13870 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0;
13871 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0;
13872 }
13873 return;
13874 }
13875
13876 // If the second argument to a memset is a sizeof expression and the third
13877 // isn't, this is also likely an error. This should catch
13878 // 'memset(buf, sizeof(buf), 0xff)'.
13879 if (BId == Builtin::BImemset &&
13880 doesExprLikelyComputeSize(Call->getArg(1)) &&
13881 !doesExprLikelyComputeSize(Call->getArg(2))) {
13882 SourceLocation DiagLoc = Call->getArg(Arg: 1)->getExprLoc();
13883 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1;
13884 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1;
13885 return;
13886 }
13887}
13888
13889/// Check for dangerous or invalid arguments to memset().
13890///
13891/// This issues warnings on known problematic, dangerous or unspecified
13892/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
13893/// function calls.
13894///
13895/// \param Call The call expression to diagnose.
13896void Sema::CheckMemaccessArguments(const CallExpr *Call,
13897 unsigned BId,
13898 IdentifierInfo *FnName) {
13899 assert(BId != 0);
13900
13901 // It is possible to have a non-standard definition of memset. Validate
13902 // we have enough arguments, and if not, abort further checking.
13903 unsigned ExpectedNumArgs =
13904 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3);
13905 if (Call->getNumArgs() < ExpectedNumArgs)
13906 return;
13907
13908 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero ||
13909 BId == Builtin::BIstrndup ? 1 : 2);
13910 unsigned LenArg =
13911 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2);
13912 const Expr *LenExpr = Call->getArg(Arg: LenArg)->IgnoreParenImpCasts();
13913
13914 if (CheckMemorySizeofForComparison(S&: *this, E: LenExpr, FnName,
13915 FnLoc: Call->getBeginLoc(), RParenLoc: Call->getRParenLoc()))
13916 return;
13917
13918 // Catch cases like 'memset(buf, sizeof(buf), 0)'.
13919 CheckMemaccessSize(S&: *this, BId, Call);
13920
13921 // We have special checking when the length is a sizeof expression.
13922 QualType SizeOfArgTy = getSizeOfArgType(E: LenExpr);
13923 const Expr *SizeOfArg = getSizeOfExprArg(E: LenExpr);
13924 llvm::FoldingSetNodeID SizeOfArgID;
13925
13926 // Although widely used, 'bzero' is not a standard function. Be more strict
13927 // with the argument types before allowing diagnostics and only allow the
13928 // form bzero(ptr, sizeof(...)).
13929 QualType FirstArgTy = Call->getArg(Arg: 0)->IgnoreParenImpCasts()->getType();
13930 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>())
13931 return;
13932
13933 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
13934 const Expr *Dest = Call->getArg(Arg: ArgIdx)->IgnoreParenImpCasts();
13935 SourceRange ArgRange = Call->getArg(Arg: ArgIdx)->getSourceRange();
13936
13937 QualType DestTy = Dest->getType();
13938 QualType PointeeTy;
13939 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
13940 PointeeTy = DestPtrTy->getPointeeType();
13941
13942 // Never warn about void type pointers. This can be used to suppress
13943 // false positives.
13944 if (PointeeTy->isVoidType())
13945 continue;
13946
13947 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
13948 // actually comparing the expressions for equality. Because computing the
13949 // expression IDs can be expensive, we only do this if the diagnostic is
13950 // enabled.
13951 if (SizeOfArg &&
13952 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess,
13953 SizeOfArg->getExprLoc())) {
13954 // We only compute IDs for expressions if the warning is enabled, and
13955 // cache the sizeof arg's ID.
13956 if (SizeOfArgID == llvm::FoldingSetNodeID())
13957 SizeOfArg->Profile(SizeOfArgID, Context, true);
13958 llvm::FoldingSetNodeID DestID;
13959 Dest->Profile(DestID, Context, true);
13960 if (DestID == SizeOfArgID) {
13961 // TODO: For strncpy() and friends, this could suggest sizeof(dst)
13962 // over sizeof(src) as well.
13963 unsigned ActionIdx = 0; // Default is to suggest dereferencing.
13964 StringRef ReadableName = FnName->getName();
13965
13966 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Val: Dest))
13967 if (UnaryOp->getOpcode() == UO_AddrOf)
13968 ActionIdx = 1; // If its an address-of operator, just remove it.
13969 if (!PointeeTy->isIncompleteType() &&
13970 (Context.getTypeSize(T: PointeeTy) == Context.getCharWidth()))
13971 ActionIdx = 2; // If the pointee's size is sizeof(char),
13972 // suggest an explicit length.
13973
13974 // If the function is defined as a builtin macro, do not show macro
13975 // expansion.
13976 SourceLocation SL = SizeOfArg->getExprLoc();
13977 SourceRange DSR = Dest->getSourceRange();
13978 SourceRange SSR = SizeOfArg->getSourceRange();
13979 SourceManager &SM = getSourceManager();
13980
13981 if (SM.isMacroArgExpansion(Loc: SL)) {
13982 ReadableName = Lexer::getImmediateMacroName(Loc: SL, SM, LangOpts);
13983 SL = SM.getSpellingLoc(Loc: SL);
13984 DSR = SourceRange(SM.getSpellingLoc(Loc: DSR.getBegin()),
13985 SM.getSpellingLoc(Loc: DSR.getEnd()));
13986 SSR = SourceRange(SM.getSpellingLoc(Loc: SSR.getBegin()),
13987 SM.getSpellingLoc(Loc: SSR.getEnd()));
13988 }
13989
13990 DiagRuntimeBehavior(SL, SizeOfArg,
13991 PDiag(diag::warn_sizeof_pointer_expr_memaccess)
13992 << ReadableName
13993 << PointeeTy
13994 << DestTy
13995 << DSR
13996 << SSR);
13997 DiagRuntimeBehavior(SL, SizeOfArg,
13998 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
13999 << ActionIdx
14000 << SSR);
14001
14002 break;
14003 }
14004 }
14005
14006 // Also check for cases where the sizeof argument is the exact same
14007 // type as the memory argument, and where it points to a user-defined
14008 // record type.
14009 if (SizeOfArgTy != QualType()) {
14010 if (PointeeTy->isRecordType() &&
14011 Context.typesAreCompatible(T1: SizeOfArgTy, T2: DestTy)) {
14012 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
14013 PDiag(diag::warn_sizeof_pointer_type_memaccess)
14014 << FnName << SizeOfArgTy << ArgIdx
14015 << PointeeTy << Dest->getSourceRange()
14016 << LenExpr->getSourceRange());
14017 break;
14018 }
14019 }
14020 } else if (DestTy->isArrayType()) {
14021 PointeeTy = DestTy;
14022 }
14023
14024 if (PointeeTy == QualType())
14025 continue;
14026
14027 // Always complain about dynamic classes.
14028 bool IsContained;
14029 if (const CXXRecordDecl *ContainedRD =
14030 getContainedDynamicClass(T: PointeeTy, IsContained)) {
14031
14032 unsigned OperationType = 0;
14033 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp;
14034 // "overwritten" if we're warning about the destination for any call
14035 // but memcmp; otherwise a verb appropriate to the call.
14036 if (ArgIdx != 0 || IsCmp) {
14037 if (BId == Builtin::BImemcpy)
14038 OperationType = 1;
14039 else if(BId == Builtin::BImemmove)
14040 OperationType = 2;
14041 else if (IsCmp)
14042 OperationType = 3;
14043 }
14044
14045 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
14046 PDiag(diag::warn_dyn_class_memaccess)
14047 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName
14048 << IsContained << ContainedRD << OperationType
14049 << Call->getCallee()->getSourceRange());
14050 } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
14051 BId != Builtin::BImemset)
14052 DiagRuntimeBehavior(
14053 Dest->getExprLoc(), Dest,
14054 PDiag(diag::warn_arc_object_memaccess)
14055 << ArgIdx << FnName << PointeeTy
14056 << Call->getCallee()->getSourceRange());
14057 else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
14058 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
14059 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
14060 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
14061 PDiag(diag::warn_cstruct_memaccess)
14062 << ArgIdx << FnName << PointeeTy << 0);
14063 SearchNonTrivialToInitializeField::diag(RT: PointeeTy, E: Dest, S&: *this);
14064 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
14065 RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
14066 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
14067 PDiag(diag::warn_cstruct_memaccess)
14068 << ArgIdx << FnName << PointeeTy << 1);
14069 SearchNonTrivialToCopyField::diag(RT: PointeeTy, E: Dest, S&: *this);
14070 } else {
14071 continue;
14072 }
14073 } else
14074 continue;
14075
14076 DiagRuntimeBehavior(
14077 Dest->getExprLoc(), Dest,
14078 PDiag(diag::note_bad_memaccess_silence)
14079 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
14080 break;
14081 }
14082}
14083
14084// A little helper routine: ignore addition and subtraction of integer literals.
14085// This intentionally does not ignore all integer constant expressions because
14086// we don't want to remove sizeof().
14087static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
14088 Ex = Ex->IgnoreParenCasts();
14089
14090 while (true) {
14091 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Val: Ex);
14092 if (!BO || !BO->isAdditiveOp())
14093 break;
14094
14095 const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
14096 const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
14097
14098 if (isa<IntegerLiteral>(Val: RHS))
14099 Ex = LHS;
14100 else if (isa<IntegerLiteral>(Val: LHS))
14101 Ex = RHS;
14102 else
14103 break;
14104 }
14105
14106 return Ex;
14107}
14108
14109static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
14110 ASTContext &Context) {
14111 // Only handle constant-sized or VLAs, but not flexible members.
14112 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T: Ty)) {
14113 // Only issue the FIXIT for arrays of size > 1.
14114 if (CAT->getZExtSize() <= 1)
14115 return false;
14116 } else if (!Ty->isVariableArrayType()) {
14117 return false;
14118 }
14119 return true;
14120}
14121
14122// Warn if the user has made the 'size' argument to strlcpy or strlcat
14123// be the size of the source, instead of the destination.
14124void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
14125 IdentifierInfo *FnName) {
14126
14127 // Don't crash if the user has the wrong number of arguments
14128 unsigned NumArgs = Call->getNumArgs();
14129 if ((NumArgs != 3) && (NumArgs != 4))
14130 return;
14131
14132 const Expr *SrcArg = ignoreLiteralAdditions(Ex: Call->getArg(Arg: 1), Ctx&: Context);
14133 const Expr *SizeArg = ignoreLiteralAdditions(Ex: Call->getArg(Arg: 2), Ctx&: Context);
14134 const Expr *CompareWithSrc = nullptr;
14135
14136 if (CheckMemorySizeofForComparison(S&: *this, E: SizeArg, FnName,
14137 FnLoc: Call->getBeginLoc(), RParenLoc: Call->getRParenLoc()))
14138 return;
14139
14140 // Look for 'strlcpy(dst, x, sizeof(x))'
14141 if (const Expr *Ex = getSizeOfExprArg(E: SizeArg))
14142 CompareWithSrc = Ex;
14143 else {
14144 // Look for 'strlcpy(dst, x, strlen(x))'
14145 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(Val: SizeArg)) {
14146 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
14147 SizeCall->getNumArgs() == 1)
14148 CompareWithSrc = ignoreLiteralAdditions(Ex: SizeCall->getArg(Arg: 0), Ctx&: Context);
14149 }
14150 }
14151
14152 if (!CompareWithSrc)
14153 return;
14154
14155 // Determine if the argument to sizeof/strlen is equal to the source
14156 // argument. In principle there's all kinds of things you could do
14157 // here, for instance creating an == expression and evaluating it with
14158 // EvaluateAsBooleanCondition, but this uses a more direct technique:
14159 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(Val: SrcArg);
14160 if (!SrcArgDRE)
14161 return;
14162
14163 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(Val: CompareWithSrc);
14164 if (!CompareWithSrcDRE ||
14165 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
14166 return;
14167
14168 const Expr *OriginalSizeArg = Call->getArg(Arg: 2);
14169 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size)
14170 << OriginalSizeArg->getSourceRange() << FnName;
14171
14172 // Output a FIXIT hint if the destination is an array (rather than a
14173 // pointer to an array). This could be enhanced to handle some
14174 // pointers if we know the actual size, like if DstArg is 'array+2'
14175 // we could say 'sizeof(array)-2'.
14176 const Expr *DstArg = Call->getArg(Arg: 0)->IgnoreParenImpCasts();
14177 if (!isConstantSizeArrayWithMoreThanOneElement(Ty: DstArg->getType(), Context))
14178 return;
14179
14180 SmallString<128> sizeString;
14181 llvm::raw_svector_ostream OS(sizeString);
14182 OS << "sizeof(";
14183 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
14184 OS << ")";
14185
14186 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size)
14187 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
14188 OS.str());
14189}
14190
14191/// Check if two expressions refer to the same declaration.
14192static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
14193 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(Val: E1))
14194 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(Val: E2))
14195 return D1->getDecl() == D2->getDecl();
14196 return false;
14197}
14198
14199static const Expr *getStrlenExprArg(const Expr *E) {
14200 if (const CallExpr *CE = dyn_cast<CallExpr>(Val: E)) {
14201 const FunctionDecl *FD = CE->getDirectCallee();
14202 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
14203 return nullptr;
14204 return CE->getArg(Arg: 0)->IgnoreParenCasts();
14205 }
14206 return nullptr;
14207}
14208
14209// Warn on anti-patterns as the 'size' argument to strncat.
14210// The correct size argument should look like following:
14211// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
14212void Sema::CheckStrncatArguments(const CallExpr *CE,
14213 IdentifierInfo *FnName) {
14214 // Don't crash if the user has the wrong number of arguments.
14215 if (CE->getNumArgs() < 3)
14216 return;
14217 const Expr *DstArg = CE->getArg(Arg: 0)->IgnoreParenCasts();
14218 const Expr *SrcArg = CE->getArg(Arg: 1)->IgnoreParenCasts();
14219 const Expr *LenArg = CE->getArg(Arg: 2)->IgnoreParenCasts();
14220
14221 if (CheckMemorySizeofForComparison(S&: *this, E: LenArg, FnName, FnLoc: CE->getBeginLoc(),
14222 RParenLoc: CE->getRParenLoc()))
14223 return;
14224
14225 // Identify common expressions, which are wrongly used as the size argument
14226 // to strncat and may lead to buffer overflows.
14227 unsigned PatternType = 0;
14228 if (const Expr *SizeOfArg = getSizeOfExprArg(E: LenArg)) {
14229 // - sizeof(dst)
14230 if (referToTheSameDecl(E1: SizeOfArg, E2: DstArg))
14231 PatternType = 1;
14232 // - sizeof(src)
14233 else if (referToTheSameDecl(E1: SizeOfArg, E2: SrcArg))
14234 PatternType = 2;
14235 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Val: LenArg)) {
14236 if (BE->getOpcode() == BO_Sub) {
14237 const Expr *L = BE->getLHS()->IgnoreParenCasts();
14238 const Expr *R = BE->getRHS()->IgnoreParenCasts();
14239 // - sizeof(dst) - strlen(dst)
14240 if (referToTheSameDecl(E1: DstArg, E2: getSizeOfExprArg(E: L)) &&
14241 referToTheSameDecl(E1: DstArg, E2: getStrlenExprArg(E: R)))
14242 PatternType = 1;
14243 // - sizeof(src) - (anything)
14244 else if (referToTheSameDecl(E1: SrcArg, E2: getSizeOfExprArg(E: L)))
14245 PatternType = 2;
14246 }
14247 }
14248
14249 if (PatternType == 0)
14250 return;
14251
14252 // Generate the diagnostic.
14253 SourceLocation SL = LenArg->getBeginLoc();
14254 SourceRange SR = LenArg->getSourceRange();
14255 SourceManager &SM = getSourceManager();
14256
14257 // If the function is defined as a builtin macro, do not show macro expansion.
14258 if (SM.isMacroArgExpansion(Loc: SL)) {
14259 SL = SM.getSpellingLoc(Loc: SL);
14260 SR = SourceRange(SM.getSpellingLoc(Loc: SR.getBegin()),
14261 SM.getSpellingLoc(Loc: SR.getEnd()));
14262 }
14263
14264 // Check if the destination is an array (rather than a pointer to an array).
14265 QualType DstTy = DstArg->getType();
14266 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(Ty: DstTy,
14267 Context);
14268 if (!isKnownSizeArray) {
14269 if (PatternType == 1)
14270 Diag(SL, diag::warn_strncat_wrong_size) << SR;
14271 else
14272 Diag(SL, diag::warn_strncat_src_size) << SR;
14273 return;
14274 }
14275
14276 if (PatternType == 1)
14277 Diag(SL, diag::warn_strncat_large_size) << SR;
14278 else
14279 Diag(SL, diag::warn_strncat_src_size) << SR;
14280
14281 SmallString<128> sizeString;
14282 llvm::raw_svector_ostream OS(sizeString);
14283 OS << "sizeof(";
14284 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
14285 OS << ") - ";
14286 OS << "strlen(";
14287 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
14288 OS << ") - 1";
14289
14290 Diag(SL, diag::note_strncat_wrong_size)
14291 << FixItHint::CreateReplacement(SR, OS.str());
14292}
14293
14294namespace {
14295void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName,
14296 const UnaryOperator *UnaryExpr, const Decl *D) {
14297 if (isa<FieldDecl, FunctionDecl, VarDecl>(Val: D)) {
14298 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object)
14299 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D);
14300 return;
14301 }
14302}
14303
14304void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName,
14305 const UnaryOperator *UnaryExpr) {
14306 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Val: UnaryExpr->getSubExpr())) {
14307 const Decl *D = Lvalue->getDecl();
14308 if (isa<DeclaratorDecl>(Val: D))
14309 if (!dyn_cast<DeclaratorDecl>(Val: D)->getType()->isReferenceType())
14310 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D);
14311 }
14312
14313 if (const auto *Lvalue = dyn_cast<MemberExpr>(Val: UnaryExpr->getSubExpr()))
14314 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr,
14315 Lvalue->getMemberDecl());
14316}
14317
14318void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName,
14319 const UnaryOperator *UnaryExpr) {
14320 const auto *Lambda = dyn_cast<LambdaExpr>(
14321 Val: UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens());
14322 if (!Lambda)
14323 return;
14324
14325 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object)
14326 << CalleeName << 2 /*object: lambda expression*/;
14327}
14328
14329void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName,
14330 const DeclRefExpr *Lvalue) {
14331 const auto *Var = dyn_cast<VarDecl>(Val: Lvalue->getDecl());
14332 if (Var == nullptr)
14333 return;
14334
14335 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object)
14336 << CalleeName << 0 /*object: */ << Var;
14337}
14338
14339void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
14340 const CastExpr *Cast) {
14341 SmallString<128> SizeString;
14342 llvm::raw_svector_ostream OS(SizeString);
14343
14344 clang::CastKind Kind = Cast->getCastKind();
14345 if (Kind == clang::CK_BitCast &&
14346 !Cast->getSubExpr()->getType()->isFunctionPointerType())
14347 return;
14348 if (Kind == clang::CK_IntegralToPointer &&
14349 !isa<IntegerLiteral>(
14350 Val: Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens()))
14351 return;
14352
14353 switch (Cast->getCastKind()) {
14354 case clang::CK_BitCast:
14355 case clang::CK_IntegralToPointer:
14356 case clang::CK_FunctionToPointerDecay:
14357 OS << '\'';
14358 Cast->printPretty(OS, nullptr, S.getPrintingPolicy());
14359 OS << '\'';
14360 break;
14361 default:
14362 return;
14363 }
14364
14365 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object)
14366 << CalleeName << 0 /*object: */ << OS.str();
14367}
14368} // namespace
14369
14370/// Alerts the user that they are attempting to free a non-malloc'd object.
14371void Sema::CheckFreeArguments(const CallExpr *E) {
14372 const std::string CalleeName =
14373 cast<FunctionDecl>(Val: E->getCalleeDecl())->getQualifiedNameAsString();
14374
14375 { // Prefer something that doesn't involve a cast to make things simpler.
14376 const Expr *Arg = E->getArg(Arg: 0)->IgnoreParenCasts();
14377 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Val: Arg))
14378 switch (UnaryExpr->getOpcode()) {
14379 case UnaryOperator::Opcode::UO_AddrOf:
14380 return CheckFreeArgumentsAddressof(S&: *this, CalleeName, UnaryExpr);
14381 case UnaryOperator::Opcode::UO_Plus:
14382 return CheckFreeArgumentsPlus(S&: *this, CalleeName, UnaryExpr);
14383 default:
14384 break;
14385 }
14386
14387 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Val: Arg))
14388 if (Lvalue->getType()->isArrayType())
14389 return CheckFreeArgumentsStackArray(S&: *this, CalleeName, Lvalue);
14390
14391 if (const auto *Label = dyn_cast<AddrLabelExpr>(Val: Arg)) {
14392 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object)
14393 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier();
14394 return;
14395 }
14396
14397 if (isa<BlockExpr>(Val: Arg)) {
14398 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object)
14399 << CalleeName << 1 /*object: block*/;
14400 return;
14401 }
14402 }
14403 // Maybe the cast was important, check after the other cases.
14404 if (const auto *Cast = dyn_cast<CastExpr>(Val: E->getArg(Arg: 0)))
14405 return CheckFreeArgumentsCast(S&: *this, CalleeName, Cast);
14406}
14407
14408void
14409Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
14410 SourceLocation ReturnLoc,
14411 bool isObjCMethod,
14412 const AttrVec *Attrs,
14413 const FunctionDecl *FD) {
14414 // Check if the return value is null but should not be.
14415 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
14416 (!isObjCMethod && isNonNullType(lhsType))) &&
14417 CheckNonNullExpr(*this, RetValExp))
14418 Diag(ReturnLoc, diag::warn_null_ret)
14419 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
14420
14421 // C++11 [basic.stc.dynamic.allocation]p4:
14422 // If an allocation function declared with a non-throwing
14423 // exception-specification fails to allocate storage, it shall return
14424 // a null pointer. Any other allocation function that fails to allocate
14425 // storage shall indicate failure only by throwing an exception [...]
14426 if (FD) {
14427 OverloadedOperatorKind Op = FD->getOverloadedOperator();
14428 if (Op == OO_New || Op == OO_Array_New) {
14429 const FunctionProtoType *Proto
14430 = FD->getType()->castAs<FunctionProtoType>();
14431 if (!Proto->isNothrow(/*ResultIfDependent*/true) &&
14432 CheckNonNullExpr(*this, RetValExp))
14433 Diag(ReturnLoc, diag::warn_operator_new_returns_null)
14434 << FD << getLangOpts().CPlusPlus11;
14435 }
14436 }
14437
14438 if (RetValExp && RetValExp->getType()->isWebAssemblyTableType()) {
14439 Diag(ReturnLoc, diag::err_wasm_table_art) << 1;
14440 }
14441
14442 // PPC MMA non-pointer types are not allowed as return type. Checking the type
14443 // here prevent the user from using a PPC MMA type as trailing return type.
14444 if (Context.getTargetInfo().getTriple().isPPC64())
14445 CheckPPCMMAType(Type: RetValExp->getType(), TypeLoc: ReturnLoc);
14446}
14447
14448/// Check for comparisons of floating-point values using == and !=. Issue a
14449/// warning if the comparison is not likely to do what the programmer intended.
14450void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS,
14451 BinaryOperatorKind Opcode) {
14452 if (!BinaryOperator::isEqualityOp(Opc: Opcode))
14453 return;
14454
14455 // Match and capture subexpressions such as "(float) X == 0.1".
14456 FloatingLiteral *FPLiteral;
14457 CastExpr *FPCast;
14458 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) {
14459 FPLiteral = dyn_cast<FloatingLiteral>(Val: L->IgnoreParens());
14460 FPCast = dyn_cast<CastExpr>(Val: R->IgnoreParens());
14461 return FPLiteral && FPCast;
14462 };
14463
14464 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) {
14465 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>();
14466 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>();
14467 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() &&
14468 TargetTy->isFloatingPoint()) {
14469 bool Lossy;
14470 llvm::APFloat TargetC = FPLiteral->getValue();
14471 TargetC.convert(ToSemantics: Context.getFloatTypeSemantics(T: QualType(SourceTy, 0)),
14472 RM: llvm::APFloat::rmNearestTiesToEven, losesInfo: &Lossy);
14473 if (Lossy) {
14474 // If the literal cannot be represented in the source type, then a
14475 // check for == is always false and check for != is always true.
14476 Diag(Loc, diag::warn_float_compare_literal)
14477 << (Opcode == BO_EQ) << QualType(SourceTy, 0)
14478 << LHS->getSourceRange() << RHS->getSourceRange();
14479 return;
14480 }
14481 }
14482 }
14483
14484 // Match a more general floating-point equality comparison (-Wfloat-equal).
14485 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
14486 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
14487
14488 // Special case: check for x == x (which is OK).
14489 // Do not emit warnings for such cases.
14490 if (auto *DRL = dyn_cast<DeclRefExpr>(Val: LeftExprSansParen))
14491 if (auto *DRR = dyn_cast<DeclRefExpr>(Val: RightExprSansParen))
14492 if (DRL->getDecl() == DRR->getDecl())
14493 return;
14494
14495 // Special case: check for comparisons against literals that can be exactly
14496 // represented by APFloat. In such cases, do not emit a warning. This
14497 // is a heuristic: often comparison against such literals are used to
14498 // detect if a value in a variable has not changed. This clearly can
14499 // lead to false negatives.
14500 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(Val: LeftExprSansParen)) {
14501 if (FLL->isExact())
14502 return;
14503 } else
14504 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(Val: RightExprSansParen))
14505 if (FLR->isExact())
14506 return;
14507
14508 // Check for comparisons with builtin types.
14509 if (CallExpr* CL = dyn_cast<CallExpr>(Val: LeftExprSansParen))
14510 if (CL->getBuiltinCallee())
14511 return;
14512
14513 if (CallExpr* CR = dyn_cast<CallExpr>(Val: RightExprSansParen))
14514 if (CR->getBuiltinCallee())
14515 return;
14516
14517 // Emit the diagnostic.
14518 Diag(Loc, diag::warn_floatingpoint_eq)
14519 << LHS->getSourceRange() << RHS->getSourceRange();
14520}
14521
14522//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
14523//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
14524
14525namespace {
14526
14527/// Structure recording the 'active' range of an integer-valued
14528/// expression.
14529struct IntRange {
14530 /// The number of bits active in the int. Note that this includes exactly one
14531 /// sign bit if !NonNegative.
14532 unsigned Width;
14533
14534 /// True if the int is known not to have negative values. If so, all leading
14535 /// bits before Width are known zero, otherwise they are known to be the
14536 /// same as the MSB within Width.
14537 bool NonNegative;
14538
14539 IntRange(unsigned Width, bool NonNegative)
14540 : Width(Width), NonNegative(NonNegative) {}
14541
14542 /// Number of bits excluding the sign bit.
14543 unsigned valueBits() const {
14544 return NonNegative ? Width : Width - 1;
14545 }
14546
14547 /// Returns the range of the bool type.
14548 static IntRange forBoolType() {
14549 return IntRange(1, true);
14550 }
14551
14552 /// Returns the range of an opaque value of the given integral type.
14553 static IntRange forValueOfType(ASTContext &C, QualType T) {
14554 return forValueOfCanonicalType(C,
14555 T: T->getCanonicalTypeInternal().getTypePtr());
14556 }
14557
14558 /// Returns the range of an opaque value of a canonical integral type.
14559 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
14560 assert(T->isCanonicalUnqualified());
14561
14562 if (const VectorType *VT = dyn_cast<VectorType>(Val: T))
14563 T = VT->getElementType().getTypePtr();
14564 if (const ComplexType *CT = dyn_cast<ComplexType>(Val: T))
14565 T = CT->getElementType().getTypePtr();
14566 if (const AtomicType *AT = dyn_cast<AtomicType>(Val: T))
14567 T = AT->getValueType().getTypePtr();
14568
14569 if (!C.getLangOpts().CPlusPlus) {
14570 // For enum types in C code, use the underlying datatype.
14571 if (const EnumType *ET = dyn_cast<EnumType>(Val: T))
14572 T = ET->getDecl()->getIntegerType().getDesugaredType(Context: C).getTypePtr();
14573 } else if (const EnumType *ET = dyn_cast<EnumType>(Val: T)) {
14574 // For enum types in C++, use the known bit width of the enumerators.
14575 EnumDecl *Enum = ET->getDecl();
14576 // In C++11, enums can have a fixed underlying type. Use this type to
14577 // compute the range.
14578 if (Enum->isFixed()) {
14579 return IntRange(C.getIntWidth(T: QualType(T, 0)),
14580 !ET->isSignedIntegerOrEnumerationType());
14581 }
14582
14583 unsigned NumPositive = Enum->getNumPositiveBits();
14584 unsigned NumNegative = Enum->getNumNegativeBits();
14585
14586 if (NumNegative == 0)
14587 return IntRange(NumPositive, true/*NonNegative*/);
14588 else
14589 return IntRange(std::max(a: NumPositive + 1, b: NumNegative),
14590 false/*NonNegative*/);
14591 }
14592
14593 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
14594 return IntRange(EIT->getNumBits(), EIT->isUnsigned());
14595
14596 const BuiltinType *BT = cast<BuiltinType>(Val: T);
14597 assert(BT->isInteger());
14598
14599 return IntRange(C.getIntWidth(T: QualType(T, 0)), BT->isUnsignedInteger());
14600 }
14601
14602 /// Returns the "target" range of a canonical integral type, i.e.
14603 /// the range of values expressible in the type.
14604 ///
14605 /// This matches forValueOfCanonicalType except that enums have the
14606 /// full range of their type, not the range of their enumerators.
14607 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
14608 assert(T->isCanonicalUnqualified());
14609
14610 if (const VectorType *VT = dyn_cast<VectorType>(Val: T))
14611 T = VT->getElementType().getTypePtr();
14612 if (const ComplexType *CT = dyn_cast<ComplexType>(Val: T))
14613 T = CT->getElementType().getTypePtr();
14614 if (const AtomicType *AT = dyn_cast<AtomicType>(Val: T))
14615 T = AT->getValueType().getTypePtr();
14616 if (const EnumType *ET = dyn_cast<EnumType>(Val: T))
14617 T = C.getCanonicalType(T: ET->getDecl()->getIntegerType()).getTypePtr();
14618
14619 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
14620 return IntRange(EIT->getNumBits(), EIT->isUnsigned());
14621
14622 const BuiltinType *BT = cast<BuiltinType>(Val: T);
14623 assert(BT->isInteger());
14624
14625 return IntRange(C.getIntWidth(T: QualType(T, 0)), BT->isUnsignedInteger());
14626 }
14627
14628 /// Returns the supremum of two ranges: i.e. their conservative merge.
14629 static IntRange join(IntRange L, IntRange R) {
14630 bool Unsigned = L.NonNegative && R.NonNegative;
14631 return IntRange(std::max(a: L.valueBits(), b: R.valueBits()) + !Unsigned,
14632 L.NonNegative && R.NonNegative);
14633 }
14634
14635 /// Return the range of a bitwise-AND of the two ranges.
14636 static IntRange bit_and(IntRange L, IntRange R) {
14637 unsigned Bits = std::max(a: L.Width, b: R.Width);
14638 bool NonNegative = false;
14639 if (L.NonNegative) {
14640 Bits = std::min(a: Bits, b: L.Width);
14641 NonNegative = true;
14642 }
14643 if (R.NonNegative) {
14644 Bits = std::min(a: Bits, b: R.Width);
14645 NonNegative = true;
14646 }
14647 return IntRange(Bits, NonNegative);
14648 }
14649
14650 /// Return the range of a sum of the two ranges.
14651 static IntRange sum(IntRange L, IntRange R) {
14652 bool Unsigned = L.NonNegative && R.NonNegative;
14653 return IntRange(std::max(a: L.valueBits(), b: R.valueBits()) + 1 + !Unsigned,
14654 Unsigned);
14655 }
14656
14657 /// Return the range of a difference of the two ranges.
14658 static IntRange difference(IntRange L, IntRange R) {
14659 // We need a 1-bit-wider range if:
14660 // 1) LHS can be negative: least value can be reduced.
14661 // 2) RHS can be negative: greatest value can be increased.
14662 bool CanWiden = !L.NonNegative || !R.NonNegative;
14663 bool Unsigned = L.NonNegative && R.Width == 0;
14664 return IntRange(std::max(a: L.valueBits(), b: R.valueBits()) + CanWiden +
14665 !Unsigned,
14666 Unsigned);
14667 }
14668
14669 /// Return the range of a product of the two ranges.
14670 static IntRange product(IntRange L, IntRange R) {
14671 // If both LHS and RHS can be negative, we can form
14672 // -2^L * -2^R = 2^(L + R)
14673 // which requires L + R + 1 value bits to represent.
14674 bool CanWiden = !L.NonNegative && !R.NonNegative;
14675 bool Unsigned = L.NonNegative && R.NonNegative;
14676 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned,
14677 Unsigned);
14678 }
14679
14680 /// Return the range of a remainder operation between the two ranges.
14681 static IntRange rem(IntRange L, IntRange R) {
14682 // The result of a remainder can't be larger than the result of
14683 // either side. The sign of the result is the sign of the LHS.
14684 bool Unsigned = L.NonNegative;
14685 return IntRange(std::min(a: L.valueBits(), b: R.valueBits()) + !Unsigned,
14686 Unsigned);
14687 }
14688};
14689
14690} // namespace
14691
14692static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
14693 unsigned MaxWidth) {
14694 if (value.isSigned() && value.isNegative())
14695 return IntRange(value.getSignificantBits(), false);
14696
14697 if (value.getBitWidth() > MaxWidth)
14698 value = value.trunc(width: MaxWidth);
14699
14700 // isNonNegative() just checks the sign bit without considering
14701 // signedness.
14702 return IntRange(value.getActiveBits(), true);
14703}
14704
14705static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
14706 unsigned MaxWidth) {
14707 if (result.isInt())
14708 return GetValueRange(C, value&: result.getInt(), MaxWidth);
14709
14710 if (result.isVector()) {
14711 IntRange R = GetValueRange(C, result&: result.getVectorElt(I: 0), Ty, MaxWidth);
14712 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
14713 IntRange El = GetValueRange(C, result&: result.getVectorElt(I: i), Ty, MaxWidth);
14714 R = IntRange::join(L: R, R: El);
14715 }
14716 return R;
14717 }
14718
14719 if (result.isComplexInt()) {
14720 IntRange R = GetValueRange(C, value&: result.getComplexIntReal(), MaxWidth);
14721 IntRange I = GetValueRange(C, value&: result.getComplexIntImag(), MaxWidth);
14722 return IntRange::join(L: R, R: I);
14723 }
14724
14725 // This can happen with lossless casts to intptr_t of "based" lvalues.
14726 // Assume it might use arbitrary bits.
14727 // FIXME: The only reason we need to pass the type in here is to get
14728 // the sign right on this one case. It would be nice if APValue
14729 // preserved this.
14730 assert(result.isLValue() || result.isAddrLabelDiff());
14731 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
14732}
14733
14734static QualType GetExprType(const Expr *E) {
14735 QualType Ty = E->getType();
14736 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>())
14737 Ty = AtomicRHS->getValueType();
14738 return Ty;
14739}
14740
14741/// Pseudo-evaluate the given integer expression, estimating the
14742/// range of values it might take.
14743///
14744/// \param MaxWidth The width to which the value will be truncated.
14745/// \param Approximate If \c true, return a likely range for the result: in
14746/// particular, assume that arithmetic on narrower types doesn't leave
14747/// those types. If \c false, return a range including all possible
14748/// result values.
14749static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
14750 bool InConstantContext, bool Approximate) {
14751 E = E->IgnoreParens();
14752
14753 // Try a full evaluation first.
14754 Expr::EvalResult result;
14755 if (E->EvaluateAsRValue(Result&: result, Ctx: C, InConstantContext))
14756 return GetValueRange(C, result&: result.Val, Ty: GetExprType(E), MaxWidth);
14757
14758 // I think we only want to look through implicit casts here; if the
14759 // user has an explicit widening cast, we should treat the value as
14760 // being of the new, wider type.
14761 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: E)) {
14762 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
14763 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext,
14764 Approximate);
14765
14766 IntRange OutputTypeRange = IntRange::forValueOfType(C, T: GetExprType(CE));
14767
14768 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast ||
14769 CE->getCastKind() == CK_BooleanToSignedIntegral;
14770
14771 // Assume that non-integer casts can span the full range of the type.
14772 if (!isIntegerCast)
14773 return OutputTypeRange;
14774
14775 IntRange SubRange = GetExprRange(C, CE->getSubExpr(),
14776 std::min(a: MaxWidth, b: OutputTypeRange.Width),
14777 InConstantContext, Approximate);
14778
14779 // Bail out if the subexpr's range is as wide as the cast type.
14780 if (SubRange.Width >= OutputTypeRange.Width)
14781 return OutputTypeRange;
14782
14783 // Otherwise, we take the smaller width, and we're non-negative if
14784 // either the output type or the subexpr is.
14785 return IntRange(SubRange.Width,
14786 SubRange.NonNegative || OutputTypeRange.NonNegative);
14787 }
14788
14789 if (const auto *CO = dyn_cast<ConditionalOperator>(Val: E)) {
14790 // If we can fold the condition, just take that operand.
14791 bool CondResult;
14792 if (CO->getCond()->EvaluateAsBooleanCondition(Result&: CondResult, Ctx: C))
14793 return GetExprRange(C,
14794 E: CondResult ? CO->getTrueExpr() : CO->getFalseExpr(),
14795 MaxWidth, InConstantContext, Approximate);
14796
14797 // Otherwise, conservatively merge.
14798 // GetExprRange requires an integer expression, but a throw expression
14799 // results in a void type.
14800 Expr *E = CO->getTrueExpr();
14801 IntRange L = E->getType()->isVoidType()
14802 ? IntRange{0, true}
14803 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
14804 E = CO->getFalseExpr();
14805 IntRange R = E->getType()->isVoidType()
14806 ? IntRange{0, true}
14807 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
14808 return IntRange::join(L, R);
14809 }
14810
14811 if (const auto *BO = dyn_cast<BinaryOperator>(Val: E)) {
14812 IntRange (*Combine)(IntRange, IntRange) = IntRange::join;
14813
14814 switch (BO->getOpcode()) {
14815 case BO_Cmp:
14816 llvm_unreachable("builtin <=> should have class type");
14817
14818 // Boolean-valued operations are single-bit and positive.
14819 case BO_LAnd:
14820 case BO_LOr:
14821 case BO_LT:
14822 case BO_GT:
14823 case BO_LE:
14824 case BO_GE:
14825 case BO_EQ:
14826 case BO_NE:
14827 return IntRange::forBoolType();
14828
14829 // The type of the assignments is the type of the LHS, so the RHS
14830 // is not necessarily the same type.
14831 case BO_MulAssign:
14832 case BO_DivAssign:
14833 case BO_RemAssign:
14834 case BO_AddAssign:
14835 case BO_SubAssign:
14836 case BO_XorAssign:
14837 case BO_OrAssign:
14838 // TODO: bitfields?
14839 return IntRange::forValueOfType(C, T: GetExprType(E));
14840
14841 // Simple assignments just pass through the RHS, which will have
14842 // been coerced to the LHS type.
14843 case BO_Assign:
14844 // TODO: bitfields?
14845 return GetExprRange(C, E: BO->getRHS(), MaxWidth, InConstantContext,
14846 Approximate);
14847
14848 // Operations with opaque sources are black-listed.
14849 case BO_PtrMemD:
14850 case BO_PtrMemI:
14851 return IntRange::forValueOfType(C, T: GetExprType(E));
14852
14853 // Bitwise-and uses the *infinum* of the two source ranges.
14854 case BO_And:
14855 case BO_AndAssign:
14856 Combine = IntRange::bit_and;
14857 break;
14858
14859 // Left shift gets black-listed based on a judgement call.
14860 case BO_Shl:
14861 // ...except that we want to treat '1 << (blah)' as logically
14862 // positive. It's an important idiom.
14863 if (IntegerLiteral *I
14864 = dyn_cast<IntegerLiteral>(Val: BO->getLHS()->IgnoreParenCasts())) {
14865 if (I->getValue() == 1) {
14866 IntRange R = IntRange::forValueOfType(C, T: GetExprType(E));
14867 return IntRange(R.Width, /*NonNegative*/ true);
14868 }
14869 }
14870 [[fallthrough]];
14871
14872 case BO_ShlAssign:
14873 return IntRange::forValueOfType(C, T: GetExprType(E));
14874
14875 // Right shift by a constant can narrow its left argument.
14876 case BO_Shr:
14877 case BO_ShrAssign: {
14878 IntRange L = GetExprRange(C, E: BO->getLHS(), MaxWidth, InConstantContext,
14879 Approximate);
14880
14881 // If the shift amount is a positive constant, drop the width by
14882 // that much.
14883 if (std::optional<llvm::APSInt> shift =
14884 BO->getRHS()->getIntegerConstantExpr(Ctx: C)) {
14885 if (shift->isNonNegative()) {
14886 if (shift->uge(RHS: L.Width))
14887 L.Width = (L.NonNegative ? 0 : 1);
14888 else
14889 L.Width -= shift->getZExtValue();
14890 }
14891 }
14892
14893 return L;
14894 }
14895
14896 // Comma acts as its right operand.
14897 case BO_Comma:
14898 return GetExprRange(C, E: BO->getRHS(), MaxWidth, InConstantContext,
14899 Approximate);
14900
14901 case BO_Add:
14902 if (!Approximate)
14903 Combine = IntRange::sum;
14904 break;
14905
14906 case BO_Sub:
14907 if (BO->getLHS()->getType()->isPointerType())
14908 return IntRange::forValueOfType(C, T: GetExprType(E));
14909 if (!Approximate)
14910 Combine = IntRange::difference;
14911 break;
14912
14913 case BO_Mul:
14914 if (!Approximate)
14915 Combine = IntRange::product;
14916 break;
14917
14918 // The width of a division result is mostly determined by the size
14919 // of the LHS.
14920 case BO_Div: {
14921 // Don't 'pre-truncate' the operands.
14922 unsigned opWidth = C.getIntWidth(T: GetExprType(E));
14923 IntRange L = GetExprRange(C, E: BO->getLHS(), MaxWidth: opWidth, InConstantContext,
14924 Approximate);
14925
14926 // If the divisor is constant, use that.
14927 if (std::optional<llvm::APSInt> divisor =
14928 BO->getRHS()->getIntegerConstantExpr(Ctx: C)) {
14929 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor))
14930 if (log2 >= L.Width)
14931 L.Width = (L.NonNegative ? 0 : 1);
14932 else
14933 L.Width = std::min(a: L.Width - log2, b: MaxWidth);
14934 return L;
14935 }
14936
14937 // Otherwise, just use the LHS's width.
14938 // FIXME: This is wrong if the LHS could be its minimal value and the RHS
14939 // could be -1.
14940 IntRange R = GetExprRange(C, E: BO->getRHS(), MaxWidth: opWidth, InConstantContext,
14941 Approximate);
14942 return IntRange(L.Width, L.NonNegative && R.NonNegative);
14943 }
14944
14945 case BO_Rem:
14946 Combine = IntRange::rem;
14947 break;
14948
14949 // The default behavior is okay for these.
14950 case BO_Xor:
14951 case BO_Or:
14952 break;
14953 }
14954
14955 // Combine the two ranges, but limit the result to the type in which we
14956 // performed the computation.
14957 QualType T = GetExprType(E);
14958 unsigned opWidth = C.getIntWidth(T);
14959 IntRange L =
14960 GetExprRange(C, E: BO->getLHS(), MaxWidth: opWidth, InConstantContext, Approximate);
14961 IntRange R =
14962 GetExprRange(C, E: BO->getRHS(), MaxWidth: opWidth, InConstantContext, Approximate);
14963 IntRange C = Combine(L, R);
14964 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType();
14965 C.Width = std::min(a: C.Width, b: MaxWidth);
14966 return C;
14967 }
14968
14969 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E)) {
14970 switch (UO->getOpcode()) {
14971 // Boolean-valued operations are white-listed.
14972 case UO_LNot:
14973 return IntRange::forBoolType();
14974
14975 // Operations with opaque sources are black-listed.
14976 case UO_Deref:
14977 case UO_AddrOf: // should be impossible
14978 return IntRange::forValueOfType(C, T: GetExprType(E));
14979
14980 default:
14981 return GetExprRange(C, E: UO->getSubExpr(), MaxWidth, InConstantContext,
14982 Approximate);
14983 }
14984 }
14985
14986 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Val: E))
14987 return GetExprRange(C, E: OVE->getSourceExpr(), MaxWidth, InConstantContext,
14988 Approximate);
14989
14990 if (const auto *BitField = E->getSourceBitField())
14991 return IntRange(BitField->getBitWidthValue(Ctx: C),
14992 BitField->getType()->isUnsignedIntegerOrEnumerationType());
14993
14994 return IntRange::forValueOfType(C, T: GetExprType(E));
14995}
14996
14997static IntRange GetExprRange(ASTContext &C, const Expr *E,
14998 bool InConstantContext, bool Approximate) {
14999 return GetExprRange(C, E, MaxWidth: C.getIntWidth(T: GetExprType(E)), InConstantContext,
15000 Approximate);
15001}
15002
15003/// Checks whether the given value, which currently has the given
15004/// source semantics, has the same value when coerced through the
15005/// target semantics.
15006static bool IsSameFloatAfterCast(const llvm::APFloat &value,
15007 const llvm::fltSemantics &Src,
15008 const llvm::fltSemantics &Tgt) {
15009 llvm::APFloat truncated = value;
15010
15011 bool ignored;
15012 truncated.convert(ToSemantics: Src, RM: llvm::APFloat::rmNearestTiesToEven, losesInfo: &ignored);
15013 truncated.convert(ToSemantics: Tgt, RM: llvm::APFloat::rmNearestTiesToEven, losesInfo: &ignored);
15014
15015 return truncated.bitwiseIsEqual(RHS: value);
15016}
15017
15018/// Checks whether the given value, which currently has the given
15019/// source semantics, has the same value when coerced through the
15020/// target semantics.
15021///
15022/// The value might be a vector of floats (or a complex number).
15023static bool IsSameFloatAfterCast(const APValue &value,
15024 const llvm::fltSemantics &Src,
15025 const llvm::fltSemantics &Tgt) {
15026 if (value.isFloat())
15027 return IsSameFloatAfterCast(value: value.getFloat(), Src, Tgt);
15028
15029 if (value.isVector()) {
15030 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
15031 if (!IsSameFloatAfterCast(value: value.getVectorElt(I: i), Src, Tgt))
15032 return false;
15033 return true;
15034 }
15035
15036 assert(value.isComplexFloat());
15037 return (IsSameFloatAfterCast(value: value.getComplexFloatReal(), Src, Tgt) &&
15038 IsSameFloatAfterCast(value: value.getComplexFloatImag(), Src, Tgt));
15039}
15040
15041static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC,
15042 bool IsListInit = false);
15043
15044static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
15045 // Suppress cases where we are comparing against an enum constant.
15046 if (const DeclRefExpr *DR =
15047 dyn_cast<DeclRefExpr>(Val: E->IgnoreParenImpCasts()))
15048 if (isa<EnumConstantDecl>(Val: DR->getDecl()))
15049 return true;
15050
15051 // Suppress cases where the value is expanded from a macro, unless that macro
15052 // is how a language represents a boolean literal. This is the case in both C
15053 // and Objective-C.
15054 SourceLocation BeginLoc = E->getBeginLoc();
15055 if (BeginLoc.isMacroID()) {
15056 StringRef MacroName = Lexer::getImmediateMacroName(
15057 Loc: BeginLoc, SM: S.getSourceManager(), LangOpts: S.getLangOpts());
15058 return MacroName != "YES" && MacroName != "NO" &&
15059 MacroName != "true" && MacroName != "false";
15060 }
15061
15062 return false;
15063}
15064
15065static bool isKnownToHaveUnsignedValue(Expr *E) {
15066 return E->getType()->isIntegerType() &&
15067 (!E->getType()->isSignedIntegerType() ||
15068 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType());
15069}
15070
15071namespace {
15072/// The promoted range of values of a type. In general this has the
15073/// following structure:
15074///
15075/// |-----------| . . . |-----------|
15076/// ^ ^ ^ ^
15077/// Min HoleMin HoleMax Max
15078///
15079/// ... where there is only a hole if a signed type is promoted to unsigned
15080/// (in which case Min and Max are the smallest and largest representable
15081/// values).
15082struct PromotedRange {
15083 // Min, or HoleMax if there is a hole.
15084 llvm::APSInt PromotedMin;
15085 // Max, or HoleMin if there is a hole.
15086 llvm::APSInt PromotedMax;
15087
15088 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) {
15089 if (R.Width == 0)
15090 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned);
15091 else if (R.Width >= BitWidth && !Unsigned) {
15092 // Promotion made the type *narrower*. This happens when promoting
15093 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'.
15094 // Treat all values of 'signed int' as being in range for now.
15095 PromotedMin = llvm::APSInt::getMinValue(numBits: BitWidth, Unsigned);
15096 PromotedMax = llvm::APSInt::getMaxValue(numBits: BitWidth, Unsigned);
15097 } else {
15098 PromotedMin = llvm::APSInt::getMinValue(numBits: R.Width, Unsigned: R.NonNegative)
15099 .extOrTrunc(width: BitWidth);
15100 PromotedMin.setIsUnsigned(Unsigned);
15101
15102 PromotedMax = llvm::APSInt::getMaxValue(numBits: R.Width, Unsigned: R.NonNegative)
15103 .extOrTrunc(width: BitWidth);
15104 PromotedMax.setIsUnsigned(Unsigned);
15105 }
15106 }
15107
15108 // Determine whether this range is contiguous (has no hole).
15109 bool isContiguous() const { return PromotedMin <= PromotedMax; }
15110
15111 // Where a constant value is within the range.
15112 enum ComparisonResult {
15113 LT = 0x1,
15114 LE = 0x2,
15115 GT = 0x4,
15116 GE = 0x8,
15117 EQ = 0x10,
15118 NE = 0x20,
15119 InRangeFlag = 0x40,
15120
15121 Less = LE | LT | NE,
15122 Min = LE | InRangeFlag,
15123 InRange = InRangeFlag,
15124 Max = GE | InRangeFlag,
15125 Greater = GE | GT | NE,
15126
15127 OnlyValue = LE | GE | EQ | InRangeFlag,
15128 InHole = NE
15129 };
15130
15131 ComparisonResult compare(const llvm::APSInt &Value) const {
15132 assert(Value.getBitWidth() == PromotedMin.getBitWidth() &&
15133 Value.isUnsigned() == PromotedMin.isUnsigned());
15134 if (!isContiguous()) {
15135 assert(Value.isUnsigned() && "discontiguous range for signed compare");
15136 if (Value.isMinValue()) return Min;
15137 if (Value.isMaxValue()) return Max;
15138 if (Value >= PromotedMin) return InRange;
15139 if (Value <= PromotedMax) return InRange;
15140 return InHole;
15141 }
15142
15143 switch (llvm::APSInt::compareValues(I1: Value, I2: PromotedMin)) {
15144 case -1: return Less;
15145 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min;
15146 case 1:
15147 switch (llvm::APSInt::compareValues(I1: Value, I2: PromotedMax)) {
15148 case -1: return InRange;
15149 case 0: return Max;
15150 case 1: return Greater;
15151 }
15152 }
15153
15154 llvm_unreachable("impossible compare result");
15155 }
15156
15157 static std::optional<StringRef>
15158 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) {
15159 if (Op == BO_Cmp) {
15160 ComparisonResult LTFlag = LT, GTFlag = GT;
15161 if (ConstantOnRHS) std::swap(a&: LTFlag, b&: GTFlag);
15162
15163 if (R & EQ) return StringRef("'std::strong_ordering::equal'");
15164 if (R & LTFlag) return StringRef("'std::strong_ordering::less'");
15165 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'");
15166 return std::nullopt;
15167 }
15168
15169 ComparisonResult TrueFlag, FalseFlag;
15170 if (Op == BO_EQ) {
15171 TrueFlag = EQ;
15172 FalseFlag = NE;
15173 } else if (Op == BO_NE) {
15174 TrueFlag = NE;
15175 FalseFlag = EQ;
15176 } else {
15177 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) {
15178 TrueFlag = LT;
15179 FalseFlag = GE;
15180 } else {
15181 TrueFlag = GT;
15182 FalseFlag = LE;
15183 }
15184 if (Op == BO_GE || Op == BO_LE)
15185 std::swap(a&: TrueFlag, b&: FalseFlag);
15186 }
15187 if (R & TrueFlag)
15188 return StringRef("true");
15189 if (R & FalseFlag)
15190 return StringRef("false");
15191 return std::nullopt;
15192 }
15193};
15194}
15195
15196static bool HasEnumType(Expr *E) {
15197 // Strip off implicit integral promotions.
15198 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: E)) {
15199 if (ICE->getCastKind() != CK_IntegralCast &&
15200 ICE->getCastKind() != CK_NoOp)
15201 break;
15202 E = ICE->getSubExpr();
15203 }
15204
15205 return E->getType()->isEnumeralType();
15206}
15207
15208static int classifyConstantValue(Expr *Constant) {
15209 // The values of this enumeration are used in the diagnostics
15210 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare.
15211 enum ConstantValueKind {
15212 Miscellaneous = 0,
15213 LiteralTrue,
15214 LiteralFalse
15215 };
15216 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Val: Constant))
15217 return BL->getValue() ? ConstantValueKind::LiteralTrue
15218 : ConstantValueKind::LiteralFalse;
15219 return ConstantValueKind::Miscellaneous;
15220}
15221
15222static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
15223 Expr *Constant, Expr *Other,
15224 const llvm::APSInt &Value,
15225 bool RhsConstant) {
15226 if (S.inTemplateInstantiation())
15227 return false;
15228
15229 Expr *OriginalOther = Other;
15230
15231 Constant = Constant->IgnoreParenImpCasts();
15232 Other = Other->IgnoreParenImpCasts();
15233
15234 // Suppress warnings on tautological comparisons between values of the same
15235 // enumeration type. There are only two ways we could warn on this:
15236 // - If the constant is outside the range of representable values of
15237 // the enumeration. In such a case, we should warn about the cast
15238 // to enumeration type, not about the comparison.
15239 // - If the constant is the maximum / minimum in-range value. For an
15240 // enumeratin type, such comparisons can be meaningful and useful.
15241 if (Constant->getType()->isEnumeralType() &&
15242 S.Context.hasSameUnqualifiedType(T1: Constant->getType(), T2: Other->getType()))
15243 return false;
15244
15245 IntRange OtherValueRange = GetExprRange(
15246 C&: S.Context, E: Other, InConstantContext: S.isConstantEvaluatedContext(), /*Approximate=*/false);
15247
15248 QualType OtherT = Other->getType();
15249 if (const auto *AT = OtherT->getAs<AtomicType>())
15250 OtherT = AT->getValueType();
15251 IntRange OtherTypeRange = IntRange::forValueOfType(C&: S.Context, T: OtherT);
15252
15253 // Special case for ObjC BOOL on targets where its a typedef for a signed char
15254 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this.
15255 bool IsObjCSignedCharBool = S.getLangOpts().ObjC &&
15256 S.NSAPIObj->isObjCBOOLType(T: OtherT) &&
15257 OtherT->isSpecificBuiltinType(K: BuiltinType::SChar);
15258
15259 // Whether we're treating Other as being a bool because of the form of
15260 // expression despite it having another type (typically 'int' in C).
15261 bool OtherIsBooleanDespiteType =
15262 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
15263 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool)
15264 OtherTypeRange = OtherValueRange = IntRange::forBoolType();
15265
15266 // Check if all values in the range of possible values of this expression
15267 // lead to the same comparison outcome.
15268 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(),
15269 Value.isUnsigned());
15270 auto Cmp = OtherPromotedValueRange.compare(Value);
15271 auto Result = PromotedRange::constantValue(Op: E->getOpcode(), R: Cmp, ConstantOnRHS: RhsConstant);
15272 if (!Result)
15273 return false;
15274
15275 // Also consider the range determined by the type alone. This allows us to
15276 // classify the warning under the proper diagnostic group.
15277 bool TautologicalTypeCompare = false;
15278 {
15279 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(),
15280 Value.isUnsigned());
15281 auto TypeCmp = OtherPromotedTypeRange.compare(Value);
15282 if (auto TypeResult = PromotedRange::constantValue(Op: E->getOpcode(), R: TypeCmp,
15283 ConstantOnRHS: RhsConstant)) {
15284 TautologicalTypeCompare = true;
15285 Cmp = TypeCmp;
15286 Result = TypeResult;
15287 }
15288 }
15289
15290 // Don't warn if the non-constant operand actually always evaluates to the
15291 // same value.
15292 if (!TautologicalTypeCompare && OtherValueRange.Width == 0)
15293 return false;
15294
15295 // Suppress the diagnostic for an in-range comparison if the constant comes
15296 // from a macro or enumerator. We don't want to diagnose
15297 //
15298 // some_long_value <= INT_MAX
15299 //
15300 // when sizeof(int) == sizeof(long).
15301 bool InRange = Cmp & PromotedRange::InRangeFlag;
15302 if (InRange && IsEnumConstOrFromMacro(S, E: Constant))
15303 return false;
15304
15305 // A comparison of an unsigned bit-field against 0 is really a type problem,
15306 // even though at the type level the bit-field might promote to 'signed int'.
15307 if (Other->refersToBitField() && InRange && Value == 0 &&
15308 Other->getType()->isUnsignedIntegerOrEnumerationType())
15309 TautologicalTypeCompare = true;
15310
15311 // If this is a comparison to an enum constant, include that
15312 // constant in the diagnostic.
15313 const EnumConstantDecl *ED = nullptr;
15314 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Val: Constant))
15315 ED = dyn_cast<EnumConstantDecl>(Val: DR->getDecl());
15316
15317 // Should be enough for uint128 (39 decimal digits)
15318 SmallString<64> PrettySourceValue;
15319 llvm::raw_svector_ostream OS(PrettySourceValue);
15320 if (ED) {
15321 OS << '\'' << *ED << "' (" << Value << ")";
15322 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>(
15323 Val: Constant->IgnoreParenImpCasts())) {
15324 OS << (BL->getValue() ? "YES" : "NO");
15325 } else {
15326 OS << Value;
15327 }
15328
15329 if (!TautologicalTypeCompare) {
15330 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range)
15331 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative
15332 << E->getOpcodeStr() << OS.str() << *Result
15333 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
15334 return true;
15335 }
15336
15337 if (IsObjCSignedCharBool) {
15338 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
15339 S.PDiag(diag::warn_tautological_compare_objc_bool)
15340 << OS.str() << *Result);
15341 return true;
15342 }
15343
15344 // FIXME: We use a somewhat different formatting for the in-range cases and
15345 // cases involving boolean values for historical reasons. We should pick a
15346 // consistent way of presenting these diagnostics.
15347 if (!InRange || Other->isKnownToHaveBooleanValue()) {
15348
15349 S.DiagRuntimeBehavior(
15350 E->getOperatorLoc(), E,
15351 S.PDiag(!InRange ? diag::warn_out_of_range_compare
15352 : diag::warn_tautological_bool_compare)
15353 << OS.str() << classifyConstantValue(Constant) << OtherT
15354 << OtherIsBooleanDespiteType << *Result
15355 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
15356 } else {
15357 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy;
15358 unsigned Diag =
15359 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
15360 ? (HasEnumType(OriginalOther)
15361 ? diag::warn_unsigned_enum_always_true_comparison
15362 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison
15363 : diag::warn_unsigned_always_true_comparison)
15364 : diag::warn_tautological_constant_compare;
15365
15366 S.Diag(E->getOperatorLoc(), Diag)
15367 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result
15368 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
15369 }
15370
15371 return true;
15372}
15373
15374/// Analyze the operands of the given comparison. Implements the
15375/// fallback case from AnalyzeComparison.
15376static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
15377 AnalyzeImplicitConversions(S, E: E->getLHS(), CC: E->getOperatorLoc());
15378 AnalyzeImplicitConversions(S, E: E->getRHS(), CC: E->getOperatorLoc());
15379}
15380
15381/// Implements -Wsign-compare.
15382///
15383/// \param E the binary operator to check for warnings
15384static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
15385 // The type the comparison is being performed in.
15386 QualType T = E->getLHS()->getType();
15387
15388 // Only analyze comparison operators where both sides have been converted to
15389 // the same type.
15390 if (!S.Context.hasSameUnqualifiedType(T1: T, T2: E->getRHS()->getType()))
15391 return AnalyzeImpConvsInComparison(S, E);
15392
15393 // Don't analyze value-dependent comparisons directly.
15394 if (E->isValueDependent())
15395 return AnalyzeImpConvsInComparison(S, E);
15396
15397 Expr *LHS = E->getLHS();
15398 Expr *RHS = E->getRHS();
15399
15400 if (T->isIntegralType(Ctx: S.Context)) {
15401 std::optional<llvm::APSInt> RHSValue =
15402 RHS->getIntegerConstantExpr(Ctx: S.Context);
15403 std::optional<llvm::APSInt> LHSValue =
15404 LHS->getIntegerConstantExpr(Ctx: S.Context);
15405
15406 // We don't care about expressions whose result is a constant.
15407 if (RHSValue && LHSValue)
15408 return AnalyzeImpConvsInComparison(S, E);
15409
15410 // We only care about expressions where just one side is literal
15411 if ((bool)RHSValue ^ (bool)LHSValue) {
15412 // Is the constant on the RHS or LHS?
15413 const bool RhsConstant = (bool)RHSValue;
15414 Expr *Const = RhsConstant ? RHS : LHS;
15415 Expr *Other = RhsConstant ? LHS : RHS;
15416 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue;
15417
15418 // Check whether an integer constant comparison results in a value
15419 // of 'true' or 'false'.
15420 if (CheckTautologicalComparison(S, E, Constant: Const, Other, Value, RhsConstant))
15421 return AnalyzeImpConvsInComparison(S, E);
15422 }
15423 }
15424
15425 if (!T->hasUnsignedIntegerRepresentation()) {
15426 // We don't do anything special if this isn't an unsigned integral
15427 // comparison: we're only interested in integral comparisons, and
15428 // signed comparisons only happen in cases we don't care to warn about.
15429 return AnalyzeImpConvsInComparison(S, E);
15430 }
15431
15432 LHS = LHS->IgnoreParenImpCasts();
15433 RHS = RHS->IgnoreParenImpCasts();
15434
15435 if (!S.getLangOpts().CPlusPlus) {
15436 // Avoid warning about comparison of integers with different signs when
15437 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
15438 // the type of `E`.
15439 if (const auto *TET = dyn_cast<TypeOfExprType>(Val: LHS->getType()))
15440 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
15441 if (const auto *TET = dyn_cast<TypeOfExprType>(Val: RHS->getType()))
15442 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
15443 }
15444
15445 // Check to see if one of the (unmodified) operands is of different
15446 // signedness.
15447 Expr *signedOperand, *unsignedOperand;
15448 if (LHS->getType()->hasSignedIntegerRepresentation()) {
15449 assert(!RHS->getType()->hasSignedIntegerRepresentation() &&
15450 "unsigned comparison between two signed integer expressions?");
15451 signedOperand = LHS;
15452 unsignedOperand = RHS;
15453 } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
15454 signedOperand = RHS;
15455 unsignedOperand = LHS;
15456 } else {
15457 return AnalyzeImpConvsInComparison(S, E);
15458 }
15459
15460 // Otherwise, calculate the effective range of the signed operand.
15461 IntRange signedRange =
15462 GetExprRange(C&: S.Context, E: signedOperand, InConstantContext: S.isConstantEvaluatedContext(),
15463 /*Approximate=*/true);
15464
15465 // Go ahead and analyze implicit conversions in the operands. Note
15466 // that we skip the implicit conversions on both sides.
15467 AnalyzeImplicitConversions(S, E: LHS, CC: E->getOperatorLoc());
15468 AnalyzeImplicitConversions(S, E: RHS, CC: E->getOperatorLoc());
15469
15470 // If the signed range is non-negative, -Wsign-compare won't fire.
15471 if (signedRange.NonNegative)
15472 return;
15473
15474 // For (in)equality comparisons, if the unsigned operand is a
15475 // constant which cannot collide with a overflowed signed operand,
15476 // then reinterpreting the signed operand as unsigned will not
15477 // change the result of the comparison.
15478 if (E->isEqualityOp()) {
15479 unsigned comparisonWidth = S.Context.getIntWidth(T);
15480 IntRange unsignedRange =
15481 GetExprRange(C&: S.Context, E: unsignedOperand, InConstantContext: S.isConstantEvaluatedContext(),
15482 /*Approximate=*/true);
15483
15484 // We should never be unable to prove that the unsigned operand is
15485 // non-negative.
15486 assert(unsignedRange.NonNegative && "unsigned range includes negative?");
15487
15488 if (unsignedRange.Width < comparisonWidth)
15489 return;
15490 }
15491
15492 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
15493 S.PDiag(diag::warn_mixed_sign_comparison)
15494 << LHS->getType() << RHS->getType()
15495 << LHS->getSourceRange() << RHS->getSourceRange());
15496}
15497
15498/// Analyzes an attempt to assign the given value to a bitfield.
15499///
15500/// Returns true if there was something fishy about the attempt.
15501static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
15502 SourceLocation InitLoc) {
15503 assert(Bitfield->isBitField());
15504 if (Bitfield->isInvalidDecl())
15505 return false;
15506
15507 // White-list bool bitfields.
15508 QualType BitfieldType = Bitfield->getType();
15509 if (BitfieldType->isBooleanType())
15510 return false;
15511
15512 if (BitfieldType->isEnumeralType()) {
15513 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl();
15514 // If the underlying enum type was not explicitly specified as an unsigned
15515 // type and the enum contain only positive values, MSVC++ will cause an
15516 // inconsistency by storing this as a signed type.
15517 if (S.getLangOpts().CPlusPlus11 &&
15518 !BitfieldEnumDecl->getIntegerTypeSourceInfo() &&
15519 BitfieldEnumDecl->getNumPositiveBits() > 0 &&
15520 BitfieldEnumDecl->getNumNegativeBits() == 0) {
15521 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
15522 << BitfieldEnumDecl;
15523 }
15524 }
15525
15526 // Ignore value- or type-dependent expressions.
15527 if (Bitfield->getBitWidth()->isValueDependent() ||
15528 Bitfield->getBitWidth()->isTypeDependent() ||
15529 Init->isValueDependent() ||
15530 Init->isTypeDependent())
15531 return false;
15532
15533 Expr *OriginalInit = Init->IgnoreParenImpCasts();
15534 unsigned FieldWidth = Bitfield->getBitWidthValue(Ctx: S.Context);
15535
15536 Expr::EvalResult Result;
15537 if (!OriginalInit->EvaluateAsInt(Result, Ctx: S.Context,
15538 AllowSideEffects: Expr::SE_AllowSideEffects)) {
15539 // The RHS is not constant. If the RHS has an enum type, make sure the
15540 // bitfield is wide enough to hold all the values of the enum without
15541 // truncation.
15542 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) {
15543 EnumDecl *ED = EnumTy->getDecl();
15544 bool SignedBitfield = BitfieldType->isSignedIntegerType();
15545
15546 // Enum types are implicitly signed on Windows, so check if there are any
15547 // negative enumerators to see if the enum was intended to be signed or
15548 // not.
15549 bool SignedEnum = ED->getNumNegativeBits() > 0;
15550
15551 // Check for surprising sign changes when assigning enum values to a
15552 // bitfield of different signedness. If the bitfield is signed and we
15553 // have exactly the right number of bits to store this unsigned enum,
15554 // suggest changing the enum to an unsigned type. This typically happens
15555 // on Windows where unfixed enums always use an underlying type of 'int'.
15556 unsigned DiagID = 0;
15557 if (SignedEnum && !SignedBitfield) {
15558 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum;
15559 } else if (SignedBitfield && !SignedEnum &&
15560 ED->getNumPositiveBits() == FieldWidth) {
15561 DiagID = diag::warn_signed_bitfield_enum_conversion;
15562 }
15563
15564 if (DiagID) {
15565 S.Diag(InitLoc, DiagID) << Bitfield << ED;
15566 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo();
15567 SourceRange TypeRange =
15568 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange();
15569 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign)
15570 << SignedEnum << TypeRange;
15571 }
15572
15573 // Compute the required bitwidth. If the enum has negative values, we need
15574 // one more bit than the normal number of positive bits to represent the
15575 // sign bit.
15576 unsigned BitsNeeded = SignedEnum ? std::max(a: ED->getNumPositiveBits() + 1,
15577 b: ED->getNumNegativeBits())
15578 : ED->getNumPositiveBits();
15579
15580 // Check the bitwidth.
15581 if (BitsNeeded > FieldWidth) {
15582 Expr *WidthExpr = Bitfield->getBitWidth();
15583 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum)
15584 << Bitfield << ED;
15585 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield)
15586 << BitsNeeded << ED << WidthExpr->getSourceRange();
15587 }
15588 }
15589
15590 return false;
15591 }
15592
15593 llvm::APSInt Value = Result.Val.getInt();
15594
15595 unsigned OriginalWidth = Value.getBitWidth();
15596
15597 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce
15598 // false positives where the user is demonstrating they intend to use the
15599 // bit-field as a Boolean, check to see if the value is 1 and we're assigning
15600 // to a one-bit bit-field to see if the value came from a macro named 'true'.
15601 bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1;
15602 if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) {
15603 SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc();
15604 if (S.SourceMgr.isInSystemMacro(loc: MaybeMacroLoc) &&
15605 S.findMacroSpelling(loc&: MaybeMacroLoc, name: "true"))
15606 return false;
15607 }
15608
15609 if (!Value.isSigned() || Value.isNegative())
15610 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: OriginalInit))
15611 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
15612 OriginalWidth = Value.getSignificantBits();
15613
15614 if (OriginalWidth <= FieldWidth)
15615 return false;
15616
15617 // Compute the value which the bitfield will contain.
15618 llvm::APSInt TruncatedValue = Value.trunc(width: FieldWidth);
15619 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType());
15620
15621 // Check whether the stored value is equal to the original value.
15622 TruncatedValue = TruncatedValue.extend(width: OriginalWidth);
15623 if (llvm::APSInt::isSameValue(I1: Value, I2: TruncatedValue))
15624 return false;
15625
15626 std::string PrettyValue = toString(I: Value, Radix: 10);
15627 std::string PrettyTrunc = toString(I: TruncatedValue, Radix: 10);
15628
15629 S.Diag(InitLoc, OneAssignedToOneBitBitfield
15630 ? diag::warn_impcast_single_bit_bitield_precision_constant
15631 : diag::warn_impcast_bitfield_precision_constant)
15632 << PrettyValue << PrettyTrunc << OriginalInit->getType()
15633 << Init->getSourceRange();
15634
15635 return true;
15636}
15637
15638/// Analyze the given simple or compound assignment for warning-worthy
15639/// operations.
15640static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
15641 // Just recurse on the LHS.
15642 AnalyzeImplicitConversions(S, E: E->getLHS(), CC: E->getOperatorLoc());
15643
15644 // We want to recurse on the RHS as normal unless we're assigning to
15645 // a bitfield.
15646 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) {
15647 if (AnalyzeBitFieldAssignment(S, Bitfield, Init: E->getRHS(),
15648 InitLoc: E->getOperatorLoc())) {
15649 // Recurse, ignoring any implicit conversions on the RHS.
15650 return AnalyzeImplicitConversions(S, E: E->getRHS()->IgnoreParenImpCasts(),
15651 CC: E->getOperatorLoc());
15652 }
15653 }
15654
15655 AnalyzeImplicitConversions(S, E: E->getRHS(), CC: E->getOperatorLoc());
15656
15657 // Diagnose implicitly sequentially-consistent atomic assignment.
15658 if (E->getLHS()->getType()->isAtomicType())
15659 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
15660}
15661
15662/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
15663static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
15664 SourceLocation CContext, unsigned diag,
15665 bool pruneControlFlow = false) {
15666 if (pruneControlFlow) {
15667 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15668 S.PDiag(DiagID: diag)
15669 << SourceType << T << E->getSourceRange()
15670 << SourceRange(CContext));
15671 return;
15672 }
15673 S.Diag(E->getExprLoc(), diag)
15674 << SourceType << T << E->getSourceRange() << SourceRange(CContext);
15675}
15676
15677/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
15678static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
15679 SourceLocation CContext,
15680 unsigned diag, bool pruneControlFlow = false) {
15681 DiagnoseImpCast(S, E, SourceType: E->getType(), T, CContext, diag, pruneControlFlow);
15682}
15683
15684static bool isObjCSignedCharBool(Sema &S, QualType Ty) {
15685 return Ty->isSpecificBuiltinType(K: BuiltinType::SChar) &&
15686 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(T: Ty);
15687}
15688
15689static void adornObjCBoolConversionDiagWithTernaryFixit(
15690 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) {
15691 Expr *Ignored = SourceExpr->IgnoreImplicit();
15692 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Val: Ignored))
15693 Ignored = OVE->getSourceExpr();
15694 bool NeedsParens = isa<AbstractConditionalOperator>(Val: Ignored) ||
15695 isa<BinaryOperator>(Val: Ignored) ||
15696 isa<CXXOperatorCallExpr>(Val: Ignored);
15697 SourceLocation EndLoc = S.getLocForEndOfToken(Loc: SourceExpr->getEndLoc());
15698 if (NeedsParens)
15699 Builder << FixItHint::CreateInsertion(InsertionLoc: SourceExpr->getBeginLoc(), Code: "(")
15700 << FixItHint::CreateInsertion(InsertionLoc: EndLoc, Code: ")");
15701 Builder << FixItHint::CreateInsertion(InsertionLoc: EndLoc, Code: " ? YES : NO");
15702}
15703
15704/// Diagnose an implicit cast from a floating point value to an integer value.
15705static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
15706 SourceLocation CContext) {
15707 const bool IsBool = T->isSpecificBuiltinType(K: BuiltinType::Bool);
15708 const bool PruneWarnings = S.inTemplateInstantiation();
15709
15710 Expr *InnerE = E->IgnoreParenImpCasts();
15711 // We also want to warn on, e.g., "int i = -1.234"
15712 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(Val: InnerE))
15713 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
15714 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
15715
15716 const bool IsLiteral =
15717 isa<FloatingLiteral>(Val: E) || isa<FloatingLiteral>(Val: InnerE);
15718
15719 llvm::APFloat Value(0.0);
15720 bool IsConstant =
15721 E->EvaluateAsFloat(Result&: Value, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects);
15722 if (!IsConstant) {
15723 if (isObjCSignedCharBool(S, Ty: T)) {
15724 return adornObjCBoolConversionDiagWithTernaryFixit(
15725 S, E,
15726 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool)
15727 << E->getType());
15728 }
15729
15730 return DiagnoseImpCast(S, E, T, CContext,
15731 diag::warn_impcast_float_integer, PruneWarnings);
15732 }
15733
15734 bool isExact = false;
15735
15736 llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
15737 T->hasUnsignedIntegerRepresentation());
15738 llvm::APFloat::opStatus Result = Value.convertToInteger(
15739 Result&: IntegerValue, RM: llvm::APFloat::rmTowardZero, IsExact: &isExact);
15740
15741 // FIXME: Force the precision of the source value down so we don't print
15742 // digits which are usually useless (we don't really care here if we
15743 // truncate a digit by accident in edge cases). Ideally, APFloat::toString
15744 // would automatically print the shortest representation, but it's a bit
15745 // tricky to implement.
15746 SmallString<16> PrettySourceValue;
15747 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics());
15748 precision = (precision * 59 + 195) / 196;
15749 Value.toString(Str&: PrettySourceValue, FormatPrecision: precision);
15750
15751 if (isObjCSignedCharBool(S, Ty: T) && IntegerValue != 0 && IntegerValue != 1) {
15752 return adornObjCBoolConversionDiagWithTernaryFixit(
15753 S, E,
15754 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool)
15755 << PrettySourceValue);
15756 }
15757
15758 if (Result == llvm::APFloat::opOK && isExact) {
15759 if (IsLiteral) return;
15760 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer,
15761 PruneWarnings);
15762 }
15763
15764 // Conversion of a floating-point value to a non-bool integer where the
15765 // integral part cannot be represented by the integer type is undefined.
15766 if (!IsBool && Result == llvm::APFloat::opInvalidOp)
15767 return DiagnoseImpCast(
15768 S, E, T, CContext,
15769 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range
15770 : diag::warn_impcast_float_to_integer_out_of_range,
15771 PruneWarnings);
15772
15773 unsigned DiagID = 0;
15774 if (IsLiteral) {
15775 // Warn on floating point literal to integer.
15776 DiagID = diag::warn_impcast_literal_float_to_integer;
15777 } else if (IntegerValue == 0) {
15778 if (Value.isZero()) { // Skip -0.0 to 0 conversion.
15779 return DiagnoseImpCast(S, E, T, CContext,
15780 diag::warn_impcast_float_integer, PruneWarnings);
15781 }
15782 // Warn on non-zero to zero conversion.
15783 DiagID = diag::warn_impcast_float_to_integer_zero;
15784 } else {
15785 if (IntegerValue.isUnsigned()) {
15786 if (!IntegerValue.isMaxValue()) {
15787 return DiagnoseImpCast(S, E, T, CContext,
15788 diag::warn_impcast_float_integer, PruneWarnings);
15789 }
15790 } else { // IntegerValue.isSigned()
15791 if (!IntegerValue.isMaxSignedValue() &&
15792 !IntegerValue.isMinSignedValue()) {
15793 return DiagnoseImpCast(S, E, T, CContext,
15794 diag::warn_impcast_float_integer, PruneWarnings);
15795 }
15796 }
15797 // Warn on evaluatable floating point expression to integer conversion.
15798 DiagID = diag::warn_impcast_float_to_integer;
15799 }
15800
15801 SmallString<16> PrettyTargetValue;
15802 if (IsBool)
15803 PrettyTargetValue = Value.isZero() ? "false" : "true";
15804 else
15805 IntegerValue.toString(Str&: PrettyTargetValue);
15806
15807 if (PruneWarnings) {
15808 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15809 S.PDiag(DiagID)
15810 << E->getType() << T.getUnqualifiedType()
15811 << PrettySourceValue << PrettyTargetValue
15812 << E->getSourceRange() << SourceRange(CContext));
15813 } else {
15814 S.Diag(E->getExprLoc(), DiagID)
15815 << E->getType() << T.getUnqualifiedType() << PrettySourceValue
15816 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext);
15817 }
15818}
15819
15820/// Analyze the given compound assignment for the possible losing of
15821/// floating-point precision.
15822static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
15823 assert(isa<CompoundAssignOperator>(E) &&
15824 "Must be compound assignment operation");
15825 // Recurse on the LHS and RHS in here
15826 AnalyzeImplicitConversions(S, E: E->getLHS(), CC: E->getOperatorLoc());
15827 AnalyzeImplicitConversions(S, E: E->getRHS(), CC: E->getOperatorLoc());
15828
15829 if (E->getLHS()->getType()->isAtomicType())
15830 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst);
15831
15832 // Now check the outermost expression
15833 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
15834 const auto *RBT = cast<CompoundAssignOperator>(Val: E)
15835 ->getComputationResultType()
15836 ->getAs<BuiltinType>();
15837
15838 // The below checks assume source is floating point.
15839 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return;
15840
15841 // If source is floating point but target is an integer.
15842 if (ResultBT->isInteger())
15843 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(),
15844 E->getExprLoc(), diag::warn_impcast_float_integer);
15845
15846 if (!ResultBT->isFloatingPoint())
15847 return;
15848
15849 // If both source and target are floating points, warn about losing precision.
15850 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
15851 LHS: QualType(ResultBT, 0), RHS: QualType(RBT, 0));
15852 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
15853 // warn about dropping FP rank.
15854 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(),
15855 diag::warn_impcast_float_result_precision);
15856}
15857
15858static std::string PrettyPrintInRange(const llvm::APSInt &Value,
15859 IntRange Range) {
15860 if (!Range.Width) return "0";
15861
15862 llvm::APSInt ValueInRange = Value;
15863 ValueInRange.setIsSigned(!Range.NonNegative);
15864 ValueInRange = ValueInRange.trunc(width: Range.Width);
15865 return toString(I: ValueInRange, Radix: 10);
15866}
15867
15868static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
15869 if (!isa<ImplicitCastExpr>(Val: Ex))
15870 return false;
15871
15872 Expr *InnerE = Ex->IgnoreParenImpCasts();
15873 const Type *Target = S.Context.getCanonicalType(T: Ex->getType()).getTypePtr();
15874 const Type *Source =
15875 S.Context.getCanonicalType(T: InnerE->getType()).getTypePtr();
15876 if (Target->isDependentType())
15877 return false;
15878
15879 const BuiltinType *FloatCandidateBT =
15880 dyn_cast<BuiltinType>(Val: ToBool ? Source : Target);
15881 const Type *BoolCandidateType = ToBool ? Target : Source;
15882
15883 return (BoolCandidateType->isSpecificBuiltinType(K: BuiltinType::Bool) &&
15884 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
15885}
15886
15887static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
15888 SourceLocation CC) {
15889 unsigned NumArgs = TheCall->getNumArgs();
15890 for (unsigned i = 0; i < NumArgs; ++i) {
15891 Expr *CurrA = TheCall->getArg(Arg: i);
15892 if (!IsImplicitBoolFloatConversion(S, Ex: CurrA, ToBool: true))
15893 continue;
15894
15895 bool IsSwapped = ((i > 0) &&
15896 IsImplicitBoolFloatConversion(S, Ex: TheCall->getArg(Arg: i - 1), ToBool: false));
15897 IsSwapped |= ((i < (NumArgs - 1)) &&
15898 IsImplicitBoolFloatConversion(S, Ex: TheCall->getArg(Arg: i + 1), ToBool: false));
15899 if (IsSwapped) {
15900 // Warn on this floating-point to bool conversion.
15901 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
15902 CurrA->getType(), CC,
15903 diag::warn_impcast_floating_point_to_bool);
15904 }
15905 }
15906}
15907
15908static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
15909 SourceLocation CC) {
15910 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
15911 E->getExprLoc()))
15912 return;
15913
15914 // Don't warn on functions which have return type nullptr_t.
15915 if (isa<CallExpr>(Val: E))
15916 return;
15917
15918 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
15919 const Expr *NewE = E->IgnoreParenImpCasts();
15920 bool IsGNUNullExpr = isa<GNUNullExpr>(Val: NewE);
15921 bool HasNullPtrType = NewE->getType()->isNullPtrType();
15922 if (!IsGNUNullExpr && !HasNullPtrType)
15923 return;
15924
15925 // Return if target type is a safe conversion.
15926 if (T->isAnyPointerType() || T->isBlockPointerType() ||
15927 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
15928 return;
15929
15930 SourceLocation Loc = E->getSourceRange().getBegin();
15931
15932 // Venture through the macro stacks to get to the source of macro arguments.
15933 // The new location is a better location than the complete location that was
15934 // passed in.
15935 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc);
15936 CC = S.SourceMgr.getTopMacroCallerLoc(Loc: CC);
15937
15938 // __null is usually wrapped in a macro. Go up a macro if that is the case.
15939 if (IsGNUNullExpr && Loc.isMacroID()) {
15940 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
15941 Loc, SM: S.SourceMgr, LangOpts: S.getLangOpts());
15942 if (MacroName == "NULL")
15943 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin();
15944 }
15945
15946 // Only warn if the null and context location are in the same macro expansion.
15947 if (S.SourceMgr.getFileID(SpellingLoc: Loc) != S.SourceMgr.getFileID(SpellingLoc: CC))
15948 return;
15949
15950 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
15951 << HasNullPtrType << T << SourceRange(CC)
15952 << FixItHint::CreateReplacement(Loc,
15953 S.getFixItZeroLiteralForType(T, Loc));
15954}
15955
15956static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
15957 ObjCArrayLiteral *ArrayLiteral);
15958
15959static void
15960checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
15961 ObjCDictionaryLiteral *DictionaryLiteral);
15962
15963/// Check a single element within a collection literal against the
15964/// target element type.
15965static void checkObjCCollectionLiteralElement(Sema &S,
15966 QualType TargetElementType,
15967 Expr *Element,
15968 unsigned ElementKind) {
15969 // Skip a bitcast to 'id' or qualified 'id'.
15970 if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: Element)) {
15971 if (ICE->getCastKind() == CK_BitCast &&
15972 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
15973 Element = ICE->getSubExpr();
15974 }
15975
15976 QualType ElementType = Element->getType();
15977 ExprResult ElementResult(Element);
15978 if (ElementType->getAs<ObjCObjectPointerType>() &&
15979 S.CheckSingleAssignmentConstraints(LHSType: TargetElementType,
15980 RHS&: ElementResult,
15981 Diagnose: false, DiagnoseCFAudited: false)
15982 != Sema::Compatible) {
15983 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
15984 << ElementType << ElementKind << TargetElementType
15985 << Element->getSourceRange();
15986 }
15987
15988 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Val: Element))
15989 checkObjCArrayLiteral(S, TargetType: TargetElementType, ArrayLiteral);
15990 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Val: Element))
15991 checkObjCDictionaryLiteral(S, TargetType: TargetElementType, DictionaryLiteral);
15992}
15993
15994/// Check an Objective-C array literal being converted to the given
15995/// target type.
15996static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
15997 ObjCArrayLiteral *ArrayLiteral) {
15998 if (!S.NSArrayDecl)
15999 return;
16000
16001 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
16002 if (!TargetObjCPtr)
16003 return;
16004
16005 if (TargetObjCPtr->isUnspecialized() ||
16006 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
16007 != S.NSArrayDecl->getCanonicalDecl())
16008 return;
16009
16010 auto TypeArgs = TargetObjCPtr->getTypeArgs();
16011 if (TypeArgs.size() != 1)
16012 return;
16013
16014 QualType TargetElementType = TypeArgs[0];
16015 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
16016 checkObjCCollectionLiteralElement(S, TargetElementType,
16017 Element: ArrayLiteral->getElement(Index: I),
16018 ElementKind: 0);
16019 }
16020}
16021
16022/// Check an Objective-C dictionary literal being converted to the given
16023/// target type.
16024static void
16025checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
16026 ObjCDictionaryLiteral *DictionaryLiteral) {
16027 if (!S.NSDictionaryDecl)
16028 return;
16029
16030 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
16031 if (!TargetObjCPtr)
16032 return;
16033
16034 if (TargetObjCPtr->isUnspecialized() ||
16035 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
16036 != S.NSDictionaryDecl->getCanonicalDecl())
16037 return;
16038
16039 auto TypeArgs = TargetObjCPtr->getTypeArgs();
16040 if (TypeArgs.size() != 2)
16041 return;
16042
16043 QualType TargetKeyType = TypeArgs[0];
16044 QualType TargetObjectType = TypeArgs[1];
16045 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
16046 auto Element = DictionaryLiteral->getKeyValueElement(Index: I);
16047 checkObjCCollectionLiteralElement(S, TargetElementType: TargetKeyType, Element: Element.Key, ElementKind: 1);
16048 checkObjCCollectionLiteralElement(S, TargetElementType: TargetObjectType, Element: Element.Value, ElementKind: 2);
16049 }
16050}
16051
16052// Helper function to filter out cases for constant width constant conversion.
16053// Don't warn on char array initialization or for non-decimal values.
16054static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
16055 SourceLocation CC) {
16056 // If initializing from a constant, and the constant starts with '0',
16057 // then it is a binary, octal, or hexadecimal. Allow these constants
16058 // to fill all the bits, even if there is a sign change.
16059 if (auto *IntLit = dyn_cast<IntegerLiteral>(Val: E->IgnoreParenImpCasts())) {
16060 const char FirstLiteralCharacter =
16061 S.getSourceManager().getCharacterData(SL: IntLit->getBeginLoc())[0];
16062 if (FirstLiteralCharacter == '0')
16063 return false;
16064 }
16065
16066 // If the CC location points to a '{', and the type is char, then assume
16067 // assume it is an array initialization.
16068 if (CC.isValid() && T->isCharType()) {
16069 const char FirstContextCharacter =
16070 S.getSourceManager().getCharacterData(SL: CC)[0];
16071 if (FirstContextCharacter == '{')
16072 return false;
16073 }
16074
16075 return true;
16076}
16077
16078static const IntegerLiteral *getIntegerLiteral(Expr *E) {
16079 const auto *IL = dyn_cast<IntegerLiteral>(Val: E);
16080 if (!IL) {
16081 if (auto *UO = dyn_cast<UnaryOperator>(Val: E)) {
16082 if (UO->getOpcode() == UO_Minus)
16083 return dyn_cast<IntegerLiteral>(Val: UO->getSubExpr());
16084 }
16085 }
16086
16087 return IL;
16088}
16089
16090static void DiagnoseIntInBoolContext(Sema &S, Expr *E) {
16091 E = E->IgnoreParenImpCasts();
16092 SourceLocation ExprLoc = E->getExprLoc();
16093
16094 if (const auto *BO = dyn_cast<BinaryOperator>(Val: E)) {
16095 BinaryOperator::Opcode Opc = BO->getOpcode();
16096 Expr::EvalResult Result;
16097 // Do not diagnose unsigned shifts.
16098 if (Opc == BO_Shl) {
16099 const auto *LHS = getIntegerLiteral(E: BO->getLHS());
16100 const auto *RHS = getIntegerLiteral(E: BO->getRHS());
16101 if (LHS && LHS->getValue() == 0)
16102 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0;
16103 else if (!E->isValueDependent() && LHS && RHS &&
16104 RHS->getValue().isNonNegative() &&
16105 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects))
16106 S.Diag(ExprLoc, diag::warn_left_shift_always)
16107 << (Result.Val.getInt() != 0);
16108 else if (E->getType()->isSignedIntegerType())
16109 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E;
16110 }
16111 }
16112
16113 if (const auto *CO = dyn_cast<ConditionalOperator>(Val: E)) {
16114 const auto *LHS = getIntegerLiteral(E: CO->getTrueExpr());
16115 const auto *RHS = getIntegerLiteral(E: CO->getFalseExpr());
16116 if (!LHS || !RHS)
16117 return;
16118 if ((LHS->getValue() == 0 || LHS->getValue() == 1) &&
16119 (RHS->getValue() == 0 || RHS->getValue() == 1))
16120 // Do not diagnose common idioms.
16121 return;
16122 if (LHS->getValue() != 0 && RHS->getValue() != 0)
16123 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true);
16124 }
16125}
16126
16127static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
16128 SourceLocation CC,
16129 bool *ICContext = nullptr,
16130 bool IsListInit = false) {
16131 if (E->isTypeDependent() || E->isValueDependent()) return;
16132
16133 const Type *Source = S.Context.getCanonicalType(T: E->getType()).getTypePtr();
16134 const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
16135 if (Source == Target) return;
16136 if (Target->isDependentType()) return;
16137
16138 // If the conversion context location is invalid don't complain. We also
16139 // don't want to emit a warning if the issue occurs from the expansion of
16140 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
16141 // delay this check as long as possible. Once we detect we are in that
16142 // scenario, we just return.
16143 if (CC.isInvalid())
16144 return;
16145
16146 if (Source->isAtomicType())
16147 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
16148
16149 // Diagnose implicit casts to bool.
16150 if (Target->isSpecificBuiltinType(K: BuiltinType::Bool)) {
16151 if (isa<StringLiteral>(E))
16152 // Warn on string literal to bool. Checks for string literals in logical
16153 // and expressions, for instance, assert(0 && "error here"), are
16154 // prevented by a check in AnalyzeImplicitConversions().
16155 return DiagnoseImpCast(S, E, T, CC,
16156 diag::warn_impcast_string_literal_to_bool);
16157 if (isa<ObjCStringLiteral>(Val: E) || isa<ObjCArrayLiteral>(Val: E) ||
16158 isa<ObjCDictionaryLiteral>(Val: E) || isa<ObjCBoxedExpr>(Val: E)) {
16159 // This covers the literal expressions that evaluate to Objective-C
16160 // objects.
16161 return DiagnoseImpCast(S, E, T, CC,
16162 diag::warn_impcast_objective_c_literal_to_bool);
16163 }
16164 if (Source->isPointerType() || Source->canDecayToPointerType()) {
16165 // Warn on pointer to bool conversion that is always true.
16166 S.DiagnoseAlwaysNonNullPointer(E, NullType: Expr::NPCK_NotNull, /*IsEqual*/ false,
16167 Range: SourceRange(CC));
16168 }
16169 }
16170
16171 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL
16172 // is a typedef for signed char (macOS), then that constant value has to be 1
16173 // or 0.
16174 if (isObjCSignedCharBool(S, Ty: T) && Source->isIntegralType(Ctx: S.Context)) {
16175 Expr::EvalResult Result;
16176 if (E->EvaluateAsInt(Result, Ctx: S.getASTContext(),
16177 AllowSideEffects: Expr::SE_AllowSideEffects)) {
16178 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) {
16179 adornObjCBoolConversionDiagWithTernaryFixit(
16180 S, E,
16181 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool)
16182 << toString(Result.Val.getInt(), 10));
16183 }
16184 return;
16185 }
16186 }
16187
16188 // Check implicit casts from Objective-C collection literals to specialized
16189 // collection types, e.g., NSArray<NSString *> *.
16190 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Val: E))
16191 checkObjCArrayLiteral(S, TargetType: QualType(Target, 0), ArrayLiteral);
16192 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Val: E))
16193 checkObjCDictionaryLiteral(S, TargetType: QualType(Target, 0), DictionaryLiteral);
16194
16195 // Strip vector types.
16196 if (isa<VectorType>(Val: Source)) {
16197 if (Target->isSveVLSBuiltinType() &&
16198 (S.Context.areCompatibleSveTypes(FirstType: QualType(Target, 0),
16199 SecondType: QualType(Source, 0)) ||
16200 S.Context.areLaxCompatibleSveTypes(FirstType: QualType(Target, 0),
16201 SecondType: QualType(Source, 0))))
16202 return;
16203
16204 if (Target->isRVVVLSBuiltinType() &&
16205 (S.Context.areCompatibleRVVTypes(FirstType: QualType(Target, 0),
16206 SecondType: QualType(Source, 0)) ||
16207 S.Context.areLaxCompatibleRVVTypes(FirstType: QualType(Target, 0),
16208 SecondType: QualType(Source, 0))))
16209 return;
16210
16211 if (!isa<VectorType>(Val: Target)) {
16212 if (S.SourceMgr.isInSystemMacro(loc: CC))
16213 return;
16214 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
16215 } else if (S.getLangOpts().HLSL &&
16216 Target->castAs<VectorType>()->getNumElements() <
16217 Source->castAs<VectorType>()->getNumElements()) {
16218 // Diagnose vector truncation but don't return. We may also want to
16219 // diagnose an element conversion.
16220 DiagnoseImpCast(S, E, T, CC, diag::warn_hlsl_impcast_vector_truncation);
16221 }
16222
16223 // If the vector cast is cast between two vectors of the same size, it is
16224 // a bitcast, not a conversion, except under HLSL where it is a conversion.
16225 if (!S.getLangOpts().HLSL &&
16226 S.Context.getTypeSize(T: Source) == S.Context.getTypeSize(T: Target))
16227 return;
16228
16229 Source = cast<VectorType>(Val: Source)->getElementType().getTypePtr();
16230 Target = cast<VectorType>(Val: Target)->getElementType().getTypePtr();
16231 }
16232 if (auto VecTy = dyn_cast<VectorType>(Val: Target))
16233 Target = VecTy->getElementType().getTypePtr();
16234
16235 // Strip complex types.
16236 if (isa<ComplexType>(Val: Source)) {
16237 if (!isa<ComplexType>(Val: Target)) {
16238 if (S.SourceMgr.isInSystemMacro(loc: CC) || Target->isBooleanType())
16239 return;
16240
16241 return DiagnoseImpCast(S, E, T, CC,
16242 S.getLangOpts().CPlusPlus
16243 ? diag::err_impcast_complex_scalar
16244 : diag::warn_impcast_complex_scalar);
16245 }
16246
16247 Source = cast<ComplexType>(Val: Source)->getElementType().getTypePtr();
16248 Target = cast<ComplexType>(Val: Target)->getElementType().getTypePtr();
16249 }
16250
16251 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Val: Source);
16252 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Val: Target);
16253
16254 // Strip SVE vector types
16255 if (SourceBT && SourceBT->isSveVLSBuiltinType()) {
16256 // Need the original target type for vector type checks
16257 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr();
16258 // Handle conversion from scalable to fixed when msve-vector-bits is
16259 // specified
16260 if (S.Context.areCompatibleSveTypes(FirstType: QualType(OriginalTarget, 0),
16261 SecondType: QualType(Source, 0)) ||
16262 S.Context.areLaxCompatibleSveTypes(FirstType: QualType(OriginalTarget, 0),
16263 SecondType: QualType(Source, 0)))
16264 return;
16265
16266 // If the vector cast is cast between two vectors of the same size, it is
16267 // a bitcast, not a conversion.
16268 if (S.Context.getTypeSize(T: Source) == S.Context.getTypeSize(T: Target))
16269 return;
16270
16271 Source = SourceBT->getSveEltType(S.Context).getTypePtr();
16272 }
16273
16274 if (TargetBT && TargetBT->isSveVLSBuiltinType())
16275 Target = TargetBT->getSveEltType(S.Context).getTypePtr();
16276
16277 // If the source is floating point...
16278 if (SourceBT && SourceBT->isFloatingPoint()) {
16279 // ...and the target is floating point...
16280 if (TargetBT && TargetBT->isFloatingPoint()) {
16281 // ...then warn if we're dropping FP rank.
16282
16283 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
16284 LHS: QualType(SourceBT, 0), RHS: QualType(TargetBT, 0));
16285 if (Order > 0) {
16286 // Don't warn about float constants that are precisely
16287 // representable in the target type.
16288 Expr::EvalResult result;
16289 if (E->EvaluateAsRValue(Result&: result, Ctx: S.Context)) {
16290 // Value might be a float, a float vector, or a float complex.
16291 if (IsSameFloatAfterCast(value: result.Val,
16292 Src: S.Context.getFloatTypeSemantics(T: QualType(TargetBT, 0)),
16293 Tgt: S.Context.getFloatTypeSemantics(T: QualType(SourceBT, 0))))
16294 return;
16295 }
16296
16297 if (S.SourceMgr.isInSystemMacro(loc: CC))
16298 return;
16299
16300 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
16301 }
16302 // ... or possibly if we're increasing rank, too
16303 else if (Order < 0) {
16304 if (S.SourceMgr.isInSystemMacro(loc: CC))
16305 return;
16306
16307 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
16308 }
16309 return;
16310 }
16311
16312 // If the target is integral, always warn.
16313 if (TargetBT && TargetBT->isInteger()) {
16314 if (S.SourceMgr.isInSystemMacro(loc: CC))
16315 return;
16316
16317 DiagnoseFloatingImpCast(S, E, T, CContext: CC);
16318 }
16319
16320 // Detect the case where a call result is converted from floating-point to
16321 // to bool, and the final argument to the call is converted from bool, to
16322 // discover this typo:
16323 //
16324 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
16325 //
16326 // FIXME: This is an incredibly special case; is there some more general
16327 // way to detect this class of misplaced-parentheses bug?
16328 if (Target->isBooleanType() && isa<CallExpr>(Val: E)) {
16329 // Check last argument of function call to see if it is an
16330 // implicit cast from a type matching the type the result
16331 // is being cast to.
16332 CallExpr *CEx = cast<CallExpr>(Val: E);
16333 if (unsigned NumArgs = CEx->getNumArgs()) {
16334 Expr *LastA = CEx->getArg(Arg: NumArgs - 1);
16335 Expr *InnerE = LastA->IgnoreParenImpCasts();
16336 if (isa<ImplicitCastExpr>(Val: LastA) &&
16337 InnerE->getType()->isBooleanType()) {
16338 // Warn on this floating-point to bool conversion
16339 DiagnoseImpCast(S, E, T, CC,
16340 diag::warn_impcast_floating_point_to_bool);
16341 }
16342 }
16343 }
16344 return;
16345 }
16346
16347 // Valid casts involving fixed point types should be accounted for here.
16348 if (Source->isFixedPointType()) {
16349 if (Target->isUnsaturatedFixedPointType()) {
16350 Expr::EvalResult Result;
16351 if (E->EvaluateAsFixedPoint(Result, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects,
16352 InConstantContext: S.isConstantEvaluatedContext())) {
16353 llvm::APFixedPoint Value = Result.Val.getFixedPoint();
16354 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(Ty: T);
16355 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(Ty: T);
16356 if (Value > MaxVal || Value < MinVal) {
16357 S.DiagRuntimeBehavior(E->getExprLoc(), E,
16358 S.PDiag(diag::warn_impcast_fixed_point_range)
16359 << Value.toString() << T
16360 << E->getSourceRange()
16361 << clang::SourceRange(CC));
16362 return;
16363 }
16364 }
16365 } else if (Target->isIntegerType()) {
16366 Expr::EvalResult Result;
16367 if (!S.isConstantEvaluatedContext() &&
16368 E->EvaluateAsFixedPoint(Result, Ctx: S.Context,
16369 AllowSideEffects: Expr::SE_AllowSideEffects)) {
16370 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint();
16371
16372 bool Overflowed;
16373 llvm::APSInt IntResult = FXResult.convertToInt(
16374 DstWidth: S.Context.getIntWidth(T),
16375 DstSign: Target->isSignedIntegerOrEnumerationType(), Overflow: &Overflowed);
16376
16377 if (Overflowed) {
16378 S.DiagRuntimeBehavior(E->getExprLoc(), E,
16379 S.PDiag(diag::warn_impcast_fixed_point_range)
16380 << FXResult.toString() << T
16381 << E->getSourceRange()
16382 << clang::SourceRange(CC));
16383 return;
16384 }
16385 }
16386 }
16387 } else if (Target->isUnsaturatedFixedPointType()) {
16388 if (Source->isIntegerType()) {
16389 Expr::EvalResult Result;
16390 if (!S.isConstantEvaluatedContext() &&
16391 E->EvaluateAsInt(Result, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects)) {
16392 llvm::APSInt Value = Result.Val.getInt();
16393
16394 bool Overflowed;
16395 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue(
16396 Value, DstFXSema: S.Context.getFixedPointSemantics(Ty: T), Overflow: &Overflowed);
16397
16398 if (Overflowed) {
16399 S.DiagRuntimeBehavior(E->getExprLoc(), E,
16400 S.PDiag(diag::warn_impcast_fixed_point_range)
16401 << toString(Value, /*Radix=*/10) << T
16402 << E->getSourceRange()
16403 << clang::SourceRange(CC));
16404 return;
16405 }
16406 }
16407 }
16408 }
16409
16410 // If we are casting an integer type to a floating point type without
16411 // initialization-list syntax, we might lose accuracy if the floating
16412 // point type has a narrower significand than the integer type.
16413 if (SourceBT && TargetBT && SourceBT->isIntegerType() &&
16414 TargetBT->isFloatingType() && !IsListInit) {
16415 // Determine the number of precision bits in the source integer type.
16416 IntRange SourceRange =
16417 GetExprRange(C&: S.Context, E, InConstantContext: S.isConstantEvaluatedContext(),
16418 /*Approximate=*/true);
16419 unsigned int SourcePrecision = SourceRange.Width;
16420
16421 // Determine the number of precision bits in the
16422 // target floating point type.
16423 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision(
16424 S.Context.getFloatTypeSemantics(T: QualType(TargetBT, 0)));
16425
16426 if (SourcePrecision > 0 && TargetPrecision > 0 &&
16427 SourcePrecision > TargetPrecision) {
16428
16429 if (std::optional<llvm::APSInt> SourceInt =
16430 E->getIntegerConstantExpr(Ctx: S.Context)) {
16431 // If the source integer is a constant, convert it to the target
16432 // floating point type. Issue a warning if the value changes
16433 // during the whole conversion.
16434 llvm::APFloat TargetFloatValue(
16435 S.Context.getFloatTypeSemantics(T: QualType(TargetBT, 0)));
16436 llvm::APFloat::opStatus ConversionStatus =
16437 TargetFloatValue.convertFromAPInt(
16438 Input: *SourceInt, IsSigned: SourceBT->isSignedInteger(),
16439 RM: llvm::APFloat::rmNearestTiesToEven);
16440
16441 if (ConversionStatus != llvm::APFloat::opOK) {
16442 SmallString<32> PrettySourceValue;
16443 SourceInt->toString(Str&: PrettySourceValue, Radix: 10);
16444 SmallString<32> PrettyTargetValue;
16445 TargetFloatValue.toString(Str&: PrettyTargetValue, FormatPrecision: TargetPrecision);
16446
16447 S.DiagRuntimeBehavior(
16448 E->getExprLoc(), E,
16449 S.PDiag(diag::warn_impcast_integer_float_precision_constant)
16450 << PrettySourceValue << PrettyTargetValue << E->getType() << T
16451 << E->getSourceRange() << clang::SourceRange(CC));
16452 }
16453 } else {
16454 // Otherwise, the implicit conversion may lose precision.
16455 DiagnoseImpCast(S, E, T, CC,
16456 diag::warn_impcast_integer_float_precision);
16457 }
16458 }
16459 }
16460
16461 DiagnoseNullConversion(S, E, T, CC);
16462
16463 S.DiscardMisalignedMemberAddress(T: Target, E);
16464
16465 if (Target->isBooleanType())
16466 DiagnoseIntInBoolContext(S, E);
16467
16468 if (!Source->isIntegerType() || !Target->isIntegerType())
16469 return;
16470
16471 // TODO: remove this early return once the false positives for constant->bool
16472 // in templates, macros, etc, are reduced or removed.
16473 if (Target->isSpecificBuiltinType(K: BuiltinType::Bool))
16474 return;
16475
16476 if (isObjCSignedCharBool(S, Ty: T) && !Source->isCharType() &&
16477 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) {
16478 return adornObjCBoolConversionDiagWithTernaryFixit(
16479 S, E,
16480 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool)
16481 << E->getType());
16482 }
16483
16484 IntRange SourceTypeRange =
16485 IntRange::forTargetOfCanonicalType(C&: S.Context, T: Source);
16486 IntRange LikelySourceRange = GetExprRange(
16487 C&: S.Context, E, InConstantContext: S.isConstantEvaluatedContext(), /*Approximate=*/true);
16488 IntRange TargetRange = IntRange::forTargetOfCanonicalType(C&: S.Context, T: Target);
16489
16490 if (LikelySourceRange.Width > TargetRange.Width) {
16491 // If the source is a constant, use a default-on diagnostic.
16492 // TODO: this should happen for bitfield stores, too.
16493 Expr::EvalResult Result;
16494 if (E->EvaluateAsInt(Result, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects,
16495 InConstantContext: S.isConstantEvaluatedContext())) {
16496 llvm::APSInt Value(32);
16497 Value = Result.Val.getInt();
16498
16499 if (S.SourceMgr.isInSystemMacro(loc: CC))
16500 return;
16501
16502 std::string PrettySourceValue = toString(I: Value, Radix: 10);
16503 std::string PrettyTargetValue = PrettyPrintInRange(Value, Range: TargetRange);
16504
16505 S.DiagRuntimeBehavior(
16506 E->getExprLoc(), E,
16507 S.PDiag(diag::warn_impcast_integer_precision_constant)
16508 << PrettySourceValue << PrettyTargetValue << E->getType() << T
16509 << E->getSourceRange() << SourceRange(CC));
16510 return;
16511 }
16512
16513 // People want to build with -Wshorten-64-to-32 and not -Wconversion.
16514 if (S.SourceMgr.isInSystemMacro(loc: CC))
16515 return;
16516
16517 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
16518 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
16519 /* pruneControlFlow */ true);
16520 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
16521 }
16522
16523 if (TargetRange.Width > SourceTypeRange.Width) {
16524 if (auto *UO = dyn_cast<UnaryOperator>(Val: E))
16525 if (UO->getOpcode() == UO_Minus)
16526 if (Source->isUnsignedIntegerType()) {
16527 if (Target->isUnsignedIntegerType())
16528 return DiagnoseImpCast(S, E, T, CC,
16529 diag::warn_impcast_high_order_zero_bits);
16530 if (Target->isSignedIntegerType())
16531 return DiagnoseImpCast(S, E, T, CC,
16532 diag::warn_impcast_nonnegative_result);
16533 }
16534 }
16535
16536 if (TargetRange.Width == LikelySourceRange.Width &&
16537 !TargetRange.NonNegative && LikelySourceRange.NonNegative &&
16538 Source->isSignedIntegerType()) {
16539 // Warn when doing a signed to signed conversion, warn if the positive
16540 // source value is exactly the width of the target type, which will
16541 // cause a negative value to be stored.
16542
16543 Expr::EvalResult Result;
16544 if (E->EvaluateAsInt(Result, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects) &&
16545 !S.SourceMgr.isInSystemMacro(loc: CC)) {
16546 llvm::APSInt Value = Result.Val.getInt();
16547 if (isSameWidthConstantConversion(S, E, T, CC)) {
16548 std::string PrettySourceValue = toString(I: Value, Radix: 10);
16549 std::string PrettyTargetValue = PrettyPrintInRange(Value, Range: TargetRange);
16550
16551 S.DiagRuntimeBehavior(
16552 E->getExprLoc(), E,
16553 S.PDiag(diag::warn_impcast_integer_precision_constant)
16554 << PrettySourceValue << PrettyTargetValue << E->getType() << T
16555 << E->getSourceRange() << SourceRange(CC));
16556 return;
16557 }
16558 }
16559
16560 // Fall through for non-constants to give a sign conversion warning.
16561 }
16562
16563 if ((!isa<EnumType>(Val: Target) || !isa<EnumType>(Val: Source)) &&
16564 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
16565 (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
16566 LikelySourceRange.Width == TargetRange.Width))) {
16567 if (S.SourceMgr.isInSystemMacro(loc: CC))
16568 return;
16569
16570 if (SourceBT && SourceBT->isInteger() && TargetBT &&
16571 TargetBT->isInteger() &&
16572 Source->isSignedIntegerType() == Target->isSignedIntegerType()) {
16573 return;
16574 }
16575
16576 unsigned DiagID = diag::warn_impcast_integer_sign;
16577
16578 // Traditionally, gcc has warned about this under -Wsign-compare.
16579 // We also want to warn about it in -Wconversion.
16580 // So if -Wconversion is off, use a completely identical diagnostic
16581 // in the sign-compare group.
16582 // The conditional-checking code will
16583 if (ICContext) {
16584 DiagID = diag::warn_impcast_integer_sign_conditional;
16585 *ICContext = true;
16586 }
16587
16588 return DiagnoseImpCast(S, E, T, CContext: CC, diag: DiagID);
16589 }
16590
16591 // Diagnose conversions between different enumeration types.
16592 // In C, we pretend that the type of an EnumConstantDecl is its enumeration
16593 // type, to give us better diagnostics.
16594 QualType SourceType = E->getEnumCoercedType(Ctx: S.Context);
16595 Source = S.Context.getCanonicalType(T: SourceType).getTypePtr();
16596
16597 if (const EnumType *SourceEnum = Source->getAs<EnumType>())
16598 if (const EnumType *TargetEnum = Target->getAs<EnumType>())
16599 if (SourceEnum->getDecl()->hasNameForLinkage() &&
16600 TargetEnum->getDecl()->hasNameForLinkage() &&
16601 SourceEnum != TargetEnum) {
16602 if (S.SourceMgr.isInSystemMacro(loc: CC))
16603 return;
16604
16605 return DiagnoseImpCast(S, E, SourceType, T, CC,
16606 diag::warn_impcast_different_enum_types);
16607 }
16608}
16609
16610static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
16611 SourceLocation CC, QualType T);
16612
16613static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
16614 SourceLocation CC, bool &ICContext) {
16615 E = E->IgnoreParenImpCasts();
16616 // Diagnose incomplete type for second or third operand in C.
16617 if (!S.getLangOpts().CPlusPlus && E->getType()->isRecordType())
16618 S.RequireCompleteExprType(E, diag::err_incomplete_type);
16619
16620 if (auto *CO = dyn_cast<AbstractConditionalOperator>(Val: E))
16621 return CheckConditionalOperator(S, E: CO, CC, T);
16622
16623 AnalyzeImplicitConversions(S, E, CC);
16624 if (E->getType() != T)
16625 return CheckImplicitConversion(S, E, T, CC, ICContext: &ICContext);
16626}
16627
16628static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
16629 SourceLocation CC, QualType T) {
16630 AnalyzeImplicitConversions(S, E: E->getCond(), CC: E->getQuestionLoc());
16631
16632 Expr *TrueExpr = E->getTrueExpr();
16633 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(Val: E))
16634 TrueExpr = BCO->getCommon();
16635
16636 bool Suspicious = false;
16637 CheckConditionalOperand(S, E: TrueExpr, T, CC, ICContext&: Suspicious);
16638 CheckConditionalOperand(S, E: E->getFalseExpr(), T, CC, ICContext&: Suspicious);
16639
16640 if (T->isBooleanType())
16641 DiagnoseIntInBoolContext(S, E);
16642
16643 // If -Wconversion would have warned about either of the candidates
16644 // for a signedness conversion to the context type...
16645 if (!Suspicious) return;
16646
16647 // ...but it's currently ignored...
16648 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC))
16649 return;
16650
16651 // ...then check whether it would have warned about either of the
16652 // candidates for a signedness conversion to the condition type.
16653 if (E->getType() == T) return;
16654
16655 Suspicious = false;
16656 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(),
16657 E->getType(), CC, &Suspicious);
16658 if (!Suspicious)
16659 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
16660 E->getType(), CC, &Suspicious);
16661}
16662
16663/// Check conversion of given expression to boolean.
16664/// Input argument E is a logical expression.
16665static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
16666 // Run the bool-like conversion checks only for C since there bools are
16667 // still not used as the return type from "boolean" operators or as the input
16668 // type for conditional operators.
16669 if (S.getLangOpts().CPlusPlus)
16670 return;
16671 if (E->IgnoreParenImpCasts()->getType()->isAtomicType())
16672 return;
16673 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
16674}
16675
16676namespace {
16677struct AnalyzeImplicitConversionsWorkItem {
16678 Expr *E;
16679 SourceLocation CC;
16680 bool IsListInit;
16681};
16682}
16683
16684/// Data recursive variant of AnalyzeImplicitConversions. Subexpressions
16685/// that should be visited are added to WorkList.
16686static void AnalyzeImplicitConversions(
16687 Sema &S, AnalyzeImplicitConversionsWorkItem Item,
16688 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) {
16689 Expr *OrigE = Item.E;
16690 SourceLocation CC = Item.CC;
16691
16692 QualType T = OrigE->getType();
16693 Expr *E = OrigE->IgnoreParenImpCasts();
16694
16695 // Propagate whether we are in a C++ list initialization expression.
16696 // If so, we do not issue warnings for implicit int-float conversion
16697 // precision loss, because C++11 narrowing already handles it.
16698 bool IsListInit = Item.IsListInit ||
16699 (isa<InitListExpr>(Val: OrigE) && S.getLangOpts().CPlusPlus);
16700
16701 if (E->isTypeDependent() || E->isValueDependent())
16702 return;
16703
16704 Expr *SourceExpr = E;
16705 // Examine, but don't traverse into the source expression of an
16706 // OpaqueValueExpr, since it may have multiple parents and we don't want to
16707 // emit duplicate diagnostics. Its fine to examine the form or attempt to
16708 // evaluate it in the context of checking the specific conversion to T though.
16709 if (auto *OVE = dyn_cast<OpaqueValueExpr>(Val: E))
16710 if (auto *Src = OVE->getSourceExpr())
16711 SourceExpr = Src;
16712
16713 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr))
16714 if (UO->getOpcode() == UO_Not &&
16715 UO->getSubExpr()->isKnownToHaveBooleanValue())
16716 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool)
16717 << OrigE->getSourceRange() << T->isBooleanType()
16718 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!");
16719
16720 if (const auto *BO = dyn_cast<BinaryOperator>(Val: SourceExpr))
16721 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) &&
16722 BO->getLHS()->isKnownToHaveBooleanValue() &&
16723 BO->getRHS()->isKnownToHaveBooleanValue() &&
16724 BO->getLHS()->HasSideEffects(Ctx: S.Context) &&
16725 BO->getRHS()->HasSideEffects(Ctx: S.Context)) {
16726 SourceManager &SM = S.getSourceManager();
16727 const LangOptions &LO = S.getLangOpts();
16728 SourceLocation BLoc = BO->getOperatorLoc();
16729 SourceLocation ELoc = Lexer::getLocForEndOfToken(Loc: BLoc, Offset: 0, SM, LangOpts: LO);
16730 StringRef SR = clang::Lexer::getSourceText(
16731 Range: clang::CharSourceRange::getTokenRange(B: BLoc, E: ELoc), SM, LangOpts: LO);
16732 // To reduce false positives, only issue the diagnostic if the operator
16733 // is explicitly spelled as a punctuator. This suppresses the diagnostic
16734 // when using 'bitand' or 'bitor' either as keywords in C++ or as macros
16735 // in C, along with other macro spellings the user might invent.
16736 if (SR.str() == "&" || SR.str() == "|") {
16737
16738 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical)
16739 << (BO->getOpcode() == BO_And ? "&" : "|")
16740 << OrigE->getSourceRange()
16741 << FixItHint::CreateReplacement(
16742 BO->getOperatorLoc(),
16743 (BO->getOpcode() == BO_And ? "&&" : "||"));
16744 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int);
16745 }
16746 }
16747
16748 // For conditional operators, we analyze the arguments as if they
16749 // were being fed directly into the output.
16750 if (auto *CO = dyn_cast<AbstractConditionalOperator>(Val: SourceExpr)) {
16751 CheckConditionalOperator(S, E: CO, CC, T);
16752 return;
16753 }
16754
16755 // Check implicit argument conversions for function calls.
16756 if (CallExpr *Call = dyn_cast<CallExpr>(Val: SourceExpr))
16757 CheckImplicitArgumentConversions(S, TheCall: Call, CC);
16758
16759 // Go ahead and check any implicit conversions we might have skipped.
16760 // The non-canonical typecheck is just an optimization;
16761 // CheckImplicitConversion will filter out dead implicit conversions.
16762 if (SourceExpr->getType() != T)
16763 CheckImplicitConversion(S, E: SourceExpr, T, CC, ICContext: nullptr, IsListInit);
16764
16765 // Now continue drilling into this expression.
16766
16767 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Val: E)) {
16768 // The bound subexpressions in a PseudoObjectExpr are not reachable
16769 // as transitive children.
16770 // FIXME: Use a more uniform representation for this.
16771 for (auto *SE : POE->semantics())
16772 if (auto *OVE = dyn_cast<OpaqueValueExpr>(Val: SE))
16773 WorkList.push_back(Elt: {.E: OVE->getSourceExpr(), .CC: CC, .IsListInit: IsListInit});
16774 }
16775
16776 // Skip past explicit casts.
16777 if (auto *CE = dyn_cast<ExplicitCastExpr>(Val: E)) {
16778 E = CE->getSubExpr()->IgnoreParenImpCasts();
16779 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
16780 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
16781 WorkList.push_back(Elt: {.E: E, .CC: CC, .IsListInit: IsListInit});
16782 return;
16783 }
16784
16785 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
16786 // Do a somewhat different check with comparison operators.
16787 if (BO->isComparisonOp())
16788 return AnalyzeComparison(S, E: BO);
16789
16790 // And with simple assignments.
16791 if (BO->getOpcode() == BO_Assign)
16792 return AnalyzeAssignment(S, E: BO);
16793 // And with compound assignments.
16794 if (BO->isAssignmentOp())
16795 return AnalyzeCompoundAssignment(S, E: BO);
16796 }
16797
16798 // These break the otherwise-useful invariant below. Fortunately,
16799 // we don't really need to recurse into them, because any internal
16800 // expressions should have been analyzed already when they were
16801 // built into statements.
16802 if (isa<StmtExpr>(Val: E)) return;
16803
16804 // Don't descend into unevaluated contexts.
16805 if (isa<UnaryExprOrTypeTraitExpr>(Val: E)) return;
16806
16807 // Now just recurse over the expression's children.
16808 CC = E->getExprLoc();
16809 BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E);
16810 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
16811 for (Stmt *SubStmt : E->children()) {
16812 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt);
16813 if (!ChildExpr)
16814 continue;
16815
16816 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E))
16817 if (ChildExpr == CSE->getOperand())
16818 // Do not recurse over a CoroutineSuspendExpr's operand.
16819 // The operand is also a subexpression of getCommonExpr(), and
16820 // recursing into it directly would produce duplicate diagnostics.
16821 continue;
16822
16823 if (IsLogicalAndOperator &&
16824 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
16825 // Ignore checking string literals that are in logical and operators.
16826 // This is a common pattern for asserts.
16827 continue;
16828 WorkList.push_back({ChildExpr, CC, IsListInit});
16829 }
16830
16831 if (BO && BO->isLogicalOp()) {
16832 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
16833 if (!IsLogicalAndOperator || !isa<StringLiteral>(Val: SubExpr))
16834 ::CheckBoolLikeConversion(S, E: SubExpr, CC: BO->getExprLoc());
16835
16836 SubExpr = BO->getRHS()->IgnoreParenImpCasts();
16837 if (!IsLogicalAndOperator || !isa<StringLiteral>(Val: SubExpr))
16838 ::CheckBoolLikeConversion(S, E: SubExpr, CC: BO->getExprLoc());
16839 }
16840
16841 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(Val: E)) {
16842 if (U->getOpcode() == UO_LNot) {
16843 ::CheckBoolLikeConversion(S, E: U->getSubExpr(), CC);
16844 } else if (U->getOpcode() != UO_AddrOf) {
16845 if (U->getSubExpr()->getType()->isAtomicType())
16846 S.Diag(U->getSubExpr()->getBeginLoc(),
16847 diag::warn_atomic_implicit_seq_cst);
16848 }
16849 }
16850}
16851
16852/// AnalyzeImplicitConversions - Find and report any interesting
16853/// implicit conversions in the given expression. There are a couple
16854/// of competing diagnostics here, -Wconversion and -Wsign-compare.
16855static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
16856 bool IsListInit/*= false*/) {
16857 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList;
16858 WorkList.push_back(Elt: {.E: OrigE, .CC: CC, .IsListInit: IsListInit});
16859 while (!WorkList.empty())
16860 AnalyzeImplicitConversions(S, Item: WorkList.pop_back_val(), WorkList);
16861}
16862
16863/// Diagnose integer type and any valid implicit conversion to it.
16864static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
16865 // Taking into account implicit conversions,
16866 // allow any integer.
16867 if (!E->getType()->isIntegerType()) {
16868 S.Diag(E->getBeginLoc(),
16869 diag::err_opencl_enqueue_kernel_invalid_local_size_type);
16870 return true;
16871 }
16872 // Potentially emit standard warnings for implicit conversions if enabled
16873 // using -Wconversion.
16874 CheckImplicitConversion(S, E, IntT, E->getBeginLoc());
16875 return false;
16876}
16877
16878// Helper function for Sema::DiagnoseAlwaysNonNullPointer.
16879// Returns true when emitting a warning about taking the address of a reference.
16880static bool CheckForReference(Sema &SemaRef, const Expr *E,
16881 const PartialDiagnostic &PD) {
16882 E = E->IgnoreParenImpCasts();
16883
16884 const FunctionDecl *FD = nullptr;
16885
16886 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
16887 if (!DRE->getDecl()->getType()->isReferenceType())
16888 return false;
16889 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(Val: E)) {
16890 if (!M->getMemberDecl()->getType()->isReferenceType())
16891 return false;
16892 } else if (const CallExpr *Call = dyn_cast<CallExpr>(Val: E)) {
16893 if (!Call->getCallReturnType(Ctx: SemaRef.Context)->isReferenceType())
16894 return false;
16895 FD = Call->getDirectCallee();
16896 } else {
16897 return false;
16898 }
16899
16900 SemaRef.Diag(E->getExprLoc(), PD);
16901
16902 // If possible, point to location of function.
16903 if (FD) {
16904 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD;
16905 }
16906
16907 return true;
16908}
16909
16910// Returns true if the SourceLocation is expanded from any macro body.
16911// Returns false if the SourceLocation is invalid, is from not in a macro
16912// expansion, or is from expanded from a top-level macro argument.
16913static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
16914 if (Loc.isInvalid())
16915 return false;
16916
16917 while (Loc.isMacroID()) {
16918 if (SM.isMacroBodyExpansion(Loc))
16919 return true;
16920 Loc = SM.getImmediateMacroCallerLoc(Loc);
16921 }
16922
16923 return false;
16924}
16925
16926/// Diagnose pointers that are always non-null.
16927/// \param E the expression containing the pointer
16928/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
16929/// compared to a null pointer
16930/// \param IsEqual True when the comparison is equal to a null pointer
16931/// \param Range Extra SourceRange to highlight in the diagnostic
16932void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
16933 Expr::NullPointerConstantKind NullKind,
16934 bool IsEqual, SourceRange Range) {
16935 if (!E)
16936 return;
16937
16938 // Don't warn inside macros.
16939 if (E->getExprLoc().isMacroID()) {
16940 const SourceManager &SM = getSourceManager();
16941 if (IsInAnyMacroBody(SM, Loc: E->getExprLoc()) ||
16942 IsInAnyMacroBody(SM, Loc: Range.getBegin()))
16943 return;
16944 }
16945 E = E->IgnoreImpCasts();
16946
16947 const bool IsCompare = NullKind != Expr::NPCK_NotNull;
16948
16949 if (isa<CXXThisExpr>(Val: E)) {
16950 unsigned DiagID = IsCompare ? diag::warn_this_null_compare
16951 : diag::warn_this_bool_conversion;
16952 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual;
16953 return;
16954 }
16955
16956 bool IsAddressOf = false;
16957
16958 if (auto *UO = dyn_cast<UnaryOperator>(Val: E->IgnoreParens())) {
16959 if (UO->getOpcode() != UO_AddrOf)
16960 return;
16961 IsAddressOf = true;
16962 E = UO->getSubExpr();
16963 }
16964
16965 if (IsAddressOf) {
16966 unsigned DiagID = IsCompare
16967 ? diag::warn_address_of_reference_null_compare
16968 : diag::warn_address_of_reference_bool_conversion;
16969 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range
16970 << IsEqual;
16971 if (CheckForReference(SemaRef&: *this, E, PD)) {
16972 return;
16973 }
16974 }
16975
16976 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) {
16977 bool IsParam = isa<NonNullAttr>(NonnullAttr);
16978 std::string Str;
16979 llvm::raw_string_ostream S(Str);
16980 E->printPretty(S, nullptr, getPrintingPolicy());
16981 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
16982 : diag::warn_cast_nonnull_to_bool;
16983 Diag(E->getExprLoc(), DiagID) << IsParam << S.str()
16984 << E->getSourceRange() << Range << IsEqual;
16985 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam;
16986 };
16987
16988 // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
16989 if (auto *Call = dyn_cast<CallExpr>(Val: E->IgnoreParenImpCasts())) {
16990 if (auto *Callee = Call->getDirectCallee()) {
16991 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) {
16992 ComplainAboutNonnullParamOrCall(A);
16993 return;
16994 }
16995 }
16996 }
16997
16998 // Complain if we are converting a lambda expression to a boolean value
16999 // outside of instantiation.
17000 if (!inTemplateInstantiation()) {
17001 if (const auto *MCallExpr = dyn_cast<CXXMemberCallExpr>(Val: E)) {
17002 if (const auto *MRecordDecl = MCallExpr->getRecordDecl();
17003 MRecordDecl && MRecordDecl->isLambda()) {
17004 Diag(E->getExprLoc(), diag::warn_impcast_pointer_to_bool)
17005 << /*LambdaPointerConversionOperatorType=*/3
17006 << MRecordDecl->getSourceRange() << Range << IsEqual;
17007 return;
17008 }
17009 }
17010 }
17011
17012 // Expect to find a single Decl. Skip anything more complicated.
17013 ValueDecl *D = nullptr;
17014 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(Val: E)) {
17015 D = R->getDecl();
17016 } else if (MemberExpr *M = dyn_cast<MemberExpr>(Val: E)) {
17017 D = M->getMemberDecl();
17018 }
17019
17020 // Weak Decls can be null.
17021 if (!D || D->isWeak())
17022 return;
17023
17024 // Check for parameter decl with nonnull attribute
17025 if (const auto* PV = dyn_cast<ParmVarDecl>(Val: D)) {
17026 if (getCurFunction() &&
17027 !getCurFunction()->ModifiedNonNullParams.count(Ptr: PV)) {
17028 if (const Attr *A = PV->getAttr<NonNullAttr>()) {
17029 ComplainAboutNonnullParamOrCall(A);
17030 return;
17031 }
17032
17033 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
17034 // Skip function template not specialized yet.
17035 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
17036 return;
17037 auto ParamIter = llvm::find(FD->parameters(), PV);
17038 assert(ParamIter != FD->param_end());
17039 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
17040
17041 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
17042 if (!NonNull->args_size()) {
17043 ComplainAboutNonnullParamOrCall(NonNull);
17044 return;
17045 }
17046
17047 for (const ParamIdx &ArgNo : NonNull->args()) {
17048 if (ArgNo.getASTIndex() == ParamNo) {
17049 ComplainAboutNonnullParamOrCall(NonNull);
17050 return;
17051 }
17052 }
17053 }
17054 }
17055 }
17056 }
17057
17058 QualType T = D->getType();
17059 const bool IsArray = T->isArrayType();
17060 const bool IsFunction = T->isFunctionType();
17061
17062 // Address of function is used to silence the function warning.
17063 if (IsAddressOf && IsFunction) {
17064 return;
17065 }
17066
17067 // Found nothing.
17068 if (!IsAddressOf && !IsFunction && !IsArray)
17069 return;
17070
17071 // Pretty print the expression for the diagnostic.
17072 std::string Str;
17073 llvm::raw_string_ostream S(Str);
17074 E->printPretty(S, nullptr, getPrintingPolicy());
17075
17076 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
17077 : diag::warn_impcast_pointer_to_bool;
17078 enum {
17079 AddressOf,
17080 FunctionPointer,
17081 ArrayPointer
17082 } DiagType;
17083 if (IsAddressOf)
17084 DiagType = AddressOf;
17085 else if (IsFunction)
17086 DiagType = FunctionPointer;
17087 else if (IsArray)
17088 DiagType = ArrayPointer;
17089 else
17090 llvm_unreachable("Could not determine diagnostic.");
17091 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
17092 << Range << IsEqual;
17093
17094 if (!IsFunction)
17095 return;
17096
17097 // Suggest '&' to silence the function warning.
17098 Diag(E->getExprLoc(), diag::note_function_warning_silence)
17099 << FixItHint::CreateInsertion(E->getBeginLoc(), "&");
17100
17101 // Check to see if '()' fixit should be emitted.
17102 QualType ReturnType;
17103 UnresolvedSet<4> NonTemplateOverloads;
17104 tryExprAsCall(E&: *E, ZeroArgCallReturnTy&: ReturnType, NonTemplateOverloads);
17105 if (ReturnType.isNull())
17106 return;
17107
17108 if (IsCompare) {
17109 // There are two cases here. If there is null constant, the only suggest
17110 // for a pointer return type. If the null is 0, then suggest if the return
17111 // type is a pointer or an integer type.
17112 if (!ReturnType->isPointerType()) {
17113 if (NullKind == Expr::NPCK_ZeroExpression ||
17114 NullKind == Expr::NPCK_ZeroLiteral) {
17115 if (!ReturnType->isIntegerType())
17116 return;
17117 } else {
17118 return;
17119 }
17120 }
17121 } else { // !IsCompare
17122 // For function to bool, only suggest if the function pointer has bool
17123 // return type.
17124 if (!ReturnType->isSpecificBuiltinType(K: BuiltinType::Bool))
17125 return;
17126 }
17127 Diag(E->getExprLoc(), diag::note_function_to_function_call)
17128 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()");
17129}
17130
17131/// Diagnoses "dangerous" implicit conversions within the given
17132/// expression (which is a full expression). Implements -Wconversion
17133/// and -Wsign-compare.
17134///
17135/// \param CC the "context" location of the implicit conversion, i.e.
17136/// the most location of the syntactic entity requiring the implicit
17137/// conversion
17138void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
17139 // Don't diagnose in unevaluated contexts.
17140 if (isUnevaluatedContext())
17141 return;
17142
17143 // Don't diagnose for value- or type-dependent expressions.
17144 if (E->isTypeDependent() || E->isValueDependent())
17145 return;
17146
17147 // Check for array bounds violations in cases where the check isn't triggered
17148 // elsewhere for other Expr types (like BinaryOperators), e.g. when an
17149 // ArraySubscriptExpr is on the RHS of a variable initialization.
17150 CheckArrayAccess(E);
17151
17152 // This is not the right CC for (e.g.) a variable initialization.
17153 AnalyzeImplicitConversions(S&: *this, OrigE: E, CC);
17154}
17155
17156/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
17157/// Input argument E is a logical expression.
17158void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
17159 ::CheckBoolLikeConversion(S&: *this, E, CC);
17160}
17161
17162/// Diagnose when expression is an integer constant expression and its evaluation
17163/// results in integer overflow
17164void Sema::CheckForIntOverflow (const Expr *E) {
17165 // Use a work list to deal with nested struct initializers.
17166 SmallVector<const Expr *, 2> Exprs(1, E);
17167
17168 do {
17169 const Expr *OriginalE = Exprs.pop_back_val();
17170 const Expr *E = OriginalE->IgnoreParenCasts();
17171
17172 if (isa<BinaryOperator, UnaryOperator>(Val: E)) {
17173 E->EvaluateForOverflow(Ctx: Context);
17174 continue;
17175 }
17176
17177 if (const auto *InitList = dyn_cast<InitListExpr>(Val: OriginalE))
17178 Exprs.append(in_start: InitList->inits().begin(), in_end: InitList->inits().end());
17179 else if (isa<ObjCBoxedExpr>(Val: OriginalE))
17180 E->EvaluateForOverflow(Ctx: Context);
17181 else if (const auto *Call = dyn_cast<CallExpr>(Val: E))
17182 Exprs.append(Call->arg_begin(), Call->arg_end());
17183 else if (const auto *Message = dyn_cast<ObjCMessageExpr>(Val: E))
17184 Exprs.append(Message->arg_begin(), Message->arg_end());
17185 else if (const auto *Construct = dyn_cast<CXXConstructExpr>(Val: E))
17186 Exprs.append(Construct->arg_begin(), Construct->arg_end());
17187 else if (const auto *Temporary = dyn_cast<CXXBindTemporaryExpr>(Val: E))
17188 Exprs.push_back(Elt: Temporary->getSubExpr());
17189 else if (const auto *Array = dyn_cast<ArraySubscriptExpr>(Val: E))
17190 Exprs.push_back(Elt: Array->getIdx());
17191 else if (const auto *Compound = dyn_cast<CompoundLiteralExpr>(Val: E))
17192 Exprs.push_back(Elt: Compound->getInitializer());
17193 else if (const auto *New = dyn_cast<CXXNewExpr>(Val: E);
17194 New && New->isArray()) {
17195 if (auto ArraySize = New->getArraySize())
17196 Exprs.push_back(Elt: *ArraySize);
17197 }
17198 } while (!Exprs.empty());
17199}
17200
17201namespace {
17202
17203/// Visitor for expressions which looks for unsequenced operations on the
17204/// same object.
17205class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
17206 using Base = ConstEvaluatedExprVisitor<SequenceChecker>;
17207
17208 /// A tree of sequenced regions within an expression. Two regions are
17209 /// unsequenced if one is an ancestor or a descendent of the other. When we
17210 /// finish processing an expression with sequencing, such as a comma
17211 /// expression, we fold its tree nodes into its parent, since they are
17212 /// unsequenced with respect to nodes we will visit later.
17213 class SequenceTree {
17214 struct Value {
17215 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
17216 unsigned Parent : 31;
17217 LLVM_PREFERRED_TYPE(bool)
17218 unsigned Merged : 1;
17219 };
17220 SmallVector<Value, 8> Values;
17221
17222 public:
17223 /// A region within an expression which may be sequenced with respect
17224 /// to some other region.
17225 class Seq {
17226 friend class SequenceTree;
17227
17228 unsigned Index;
17229
17230 explicit Seq(unsigned N) : Index(N) {}
17231
17232 public:
17233 Seq() : Index(0) {}
17234 };
17235
17236 SequenceTree() { Values.push_back(Elt: Value(0)); }
17237 Seq root() const { return Seq(0); }
17238
17239 /// Create a new sequence of operations, which is an unsequenced
17240 /// subset of \p Parent. This sequence of operations is sequenced with
17241 /// respect to other children of \p Parent.
17242 Seq allocate(Seq Parent) {
17243 Values.push_back(Elt: Value(Parent.Index));
17244 return Seq(Values.size() - 1);
17245 }
17246
17247 /// Merge a sequence of operations into its parent.
17248 void merge(Seq S) {
17249 Values[S.Index].Merged = true;
17250 }
17251
17252 /// Determine whether two operations are unsequenced. This operation
17253 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
17254 /// should have been merged into its parent as appropriate.
17255 bool isUnsequenced(Seq Cur, Seq Old) {
17256 unsigned C = representative(K: Cur.Index);
17257 unsigned Target = representative(K: Old.Index);
17258 while (C >= Target) {
17259 if (C == Target)
17260 return true;
17261 C = Values[C].Parent;
17262 }
17263 return false;
17264 }
17265
17266 private:
17267 /// Pick a representative for a sequence.
17268 unsigned representative(unsigned K) {
17269 if (Values[K].Merged)
17270 // Perform path compression as we go.
17271 return Values[K].Parent = representative(K: Values[K].Parent);
17272 return K;
17273 }
17274 };
17275
17276 /// An object for which we can track unsequenced uses.
17277 using Object = const NamedDecl *;
17278
17279 /// Different flavors of object usage which we track. We only track the
17280 /// least-sequenced usage of each kind.
17281 enum UsageKind {
17282 /// A read of an object. Multiple unsequenced reads are OK.
17283 UK_Use,
17284
17285 /// A modification of an object which is sequenced before the value
17286 /// computation of the expression, such as ++n in C++.
17287 UK_ModAsValue,
17288
17289 /// A modification of an object which is not sequenced before the value
17290 /// computation of the expression, such as n++.
17291 UK_ModAsSideEffect,
17292
17293 UK_Count = UK_ModAsSideEffect + 1
17294 };
17295
17296 /// Bundle together a sequencing region and the expression corresponding
17297 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo.
17298 struct Usage {
17299 const Expr *UsageExpr = nullptr;
17300 SequenceTree::Seq Seq;
17301
17302 Usage() = default;
17303 };
17304
17305 struct UsageInfo {
17306 Usage Uses[UK_Count];
17307
17308 /// Have we issued a diagnostic for this object already?
17309 bool Diagnosed = false;
17310
17311 UsageInfo();
17312 };
17313 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
17314
17315 Sema &SemaRef;
17316
17317 /// Sequenced regions within the expression.
17318 SequenceTree Tree;
17319
17320 /// Declaration modifications and references which we have seen.
17321 UsageInfoMap UsageMap;
17322
17323 /// The region we are currently within.
17324 SequenceTree::Seq Region;
17325
17326 /// Filled in with declarations which were modified as a side-effect
17327 /// (that is, post-increment operations).
17328 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr;
17329
17330 /// Expressions to check later. We defer checking these to reduce
17331 /// stack usage.
17332 SmallVectorImpl<const Expr *> &WorkList;
17333
17334 /// RAII object wrapping the visitation of a sequenced subexpression of an
17335 /// expression. At the end of this process, the side-effects of the evaluation
17336 /// become sequenced with respect to the value computation of the result, so
17337 /// we downgrade any UK_ModAsSideEffect within the evaluation to
17338 /// UK_ModAsValue.
17339 struct SequencedSubexpression {
17340 SequencedSubexpression(SequenceChecker &Self)
17341 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) {
17342 Self.ModAsSideEffect = &ModAsSideEffect;
17343 }
17344
17345 ~SequencedSubexpression() {
17346 for (const std::pair<Object, Usage> &M : llvm::reverse(C&: ModAsSideEffect)) {
17347 // Add a new usage with usage kind UK_ModAsValue, and then restore
17348 // the previous usage with UK_ModAsSideEffect (thus clearing it if
17349 // the previous one was empty).
17350 UsageInfo &UI = Self.UsageMap[M.first];
17351 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect];
17352 Self.addUsage(O: M.first, UI, UsageExpr: SideEffectUsage.UsageExpr, UK: UK_ModAsValue);
17353 SideEffectUsage = M.second;
17354 }
17355 Self.ModAsSideEffect = OldModAsSideEffect;
17356 }
17357
17358 SequenceChecker &Self;
17359 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect;
17360 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect;
17361 };
17362
17363 /// RAII object wrapping the visitation of a subexpression which we might
17364 /// choose to evaluate as a constant. If any subexpression is evaluated and
17365 /// found to be non-constant, this allows us to suppress the evaluation of
17366 /// the outer expression.
17367 class EvaluationTracker {
17368 public:
17369 EvaluationTracker(SequenceChecker &Self)
17370 : Self(Self), Prev(Self.EvalTracker) {
17371 Self.EvalTracker = this;
17372 }
17373
17374 ~EvaluationTracker() {
17375 Self.EvalTracker = Prev;
17376 if (Prev)
17377 Prev->EvalOK &= EvalOK;
17378 }
17379
17380 bool evaluate(const Expr *E, bool &Result) {
17381 if (!EvalOK || E->isValueDependent())
17382 return false;
17383 EvalOK = E->EvaluateAsBooleanCondition(
17384 Result, Ctx: Self.SemaRef.Context,
17385 InConstantContext: Self.SemaRef.isConstantEvaluatedContext());
17386 return EvalOK;
17387 }
17388
17389 private:
17390 SequenceChecker &Self;
17391 EvaluationTracker *Prev;
17392 bool EvalOK = true;
17393 } *EvalTracker = nullptr;
17394
17395 /// Find the object which is produced by the specified expression,
17396 /// if any.
17397 Object getObject(const Expr *E, bool Mod) const {
17398 E = E->IgnoreParenCasts();
17399 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) {
17400 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
17401 return getObject(E: UO->getSubExpr(), Mod);
17402 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
17403 if (BO->getOpcode() == BO_Comma)
17404 return getObject(E: BO->getRHS(), Mod);
17405 if (Mod && BO->isAssignmentOp())
17406 return getObject(E: BO->getLHS(), Mod);
17407 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: E)) {
17408 // FIXME: Check for more interesting cases, like "x.n = ++x.n".
17409 if (isa<CXXThisExpr>(Val: ME->getBase()->IgnoreParenCasts()))
17410 return ME->getMemberDecl();
17411 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E))
17412 // FIXME: If this is a reference, map through to its value.
17413 return DRE->getDecl();
17414 return nullptr;
17415 }
17416
17417 /// Note that an object \p O was modified or used by an expression
17418 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for
17419 /// the object \p O as obtained via the \p UsageMap.
17420 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) {
17421 // Get the old usage for the given object and usage kind.
17422 Usage &U = UI.Uses[UK];
17423 if (!U.UsageExpr || !Tree.isUnsequenced(Cur: Region, Old: U.Seq)) {
17424 // If we have a modification as side effect and are in a sequenced
17425 // subexpression, save the old Usage so that we can restore it later
17426 // in SequencedSubexpression::~SequencedSubexpression.
17427 if (UK == UK_ModAsSideEffect && ModAsSideEffect)
17428 ModAsSideEffect->push_back(Elt: std::make_pair(x&: O, y&: U));
17429 // Then record the new usage with the current sequencing region.
17430 U.UsageExpr = UsageExpr;
17431 U.Seq = Region;
17432 }
17433 }
17434
17435 /// Check whether a modification or use of an object \p O in an expression
17436 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is
17437 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap.
17438 /// \p IsModMod is true when we are checking for a mod-mod unsequenced
17439 /// usage and false we are checking for a mod-use unsequenced usage.
17440 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr,
17441 UsageKind OtherKind, bool IsModMod) {
17442 if (UI.Diagnosed)
17443 return;
17444
17445 const Usage &U = UI.Uses[OtherKind];
17446 if (!U.UsageExpr || !Tree.isUnsequenced(Cur: Region, Old: U.Seq))
17447 return;
17448
17449 const Expr *Mod = U.UsageExpr;
17450 const Expr *ModOrUse = UsageExpr;
17451 if (OtherKind == UK_Use)
17452 std::swap(a&: Mod, b&: ModOrUse);
17453
17454 SemaRef.DiagRuntimeBehavior(
17455 Mod->getExprLoc(), {Mod, ModOrUse},
17456 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod
17457 : diag::warn_unsequenced_mod_use)
17458 << O << SourceRange(ModOrUse->getExprLoc()));
17459 UI.Diagnosed = true;
17460 }
17461
17462 // A note on note{Pre, Post}{Use, Mod}:
17463 //
17464 // (It helps to follow the algorithm with an expression such as
17465 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced
17466 // operations before C++17 and both are well-defined in C++17).
17467 //
17468 // When visiting a node which uses/modify an object we first call notePreUse
17469 // or notePreMod before visiting its sub-expression(s). At this point the
17470 // children of the current node have not yet been visited and so the eventual
17471 // uses/modifications resulting from the children of the current node have not
17472 // been recorded yet.
17473 //
17474 // We then visit the children of the current node. After that notePostUse or
17475 // notePostMod is called. These will 1) detect an unsequenced modification
17476 // as side effect (as in "k++ + k") and 2) add a new usage with the
17477 // appropriate usage kind.
17478 //
17479 // We also have to be careful that some operation sequences modification as
17480 // side effect as well (for example: || or ,). To account for this we wrap
17481 // the visitation of such a sub-expression (for example: the LHS of || or ,)
17482 // with SequencedSubexpression. SequencedSubexpression is an RAII object
17483 // which record usages which are modifications as side effect, and then
17484 // downgrade them (or more accurately restore the previous usage which was a
17485 // modification as side effect) when exiting the scope of the sequenced
17486 // subexpression.
17487
17488 void notePreUse(Object O, const Expr *UseExpr) {
17489 UsageInfo &UI = UsageMap[O];
17490 // Uses conflict with other modifications.
17491 checkUsage(O, UI, UsageExpr: UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false);
17492 }
17493
17494 void notePostUse(Object O, const Expr *UseExpr) {
17495 UsageInfo &UI = UsageMap[O];
17496 checkUsage(O, UI, UsageExpr: UseExpr, /*OtherKind=*/UK_ModAsSideEffect,
17497 /*IsModMod=*/false);
17498 addUsage(O, UI, UsageExpr: UseExpr, /*UsageKind=*/UK: UK_Use);
17499 }
17500
17501 void notePreMod(Object O, const Expr *ModExpr) {
17502 UsageInfo &UI = UsageMap[O];
17503 // Modifications conflict with other modifications and with uses.
17504 checkUsage(O, UI, UsageExpr: ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true);
17505 checkUsage(O, UI, UsageExpr: ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false);
17506 }
17507
17508 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) {
17509 UsageInfo &UI = UsageMap[O];
17510 checkUsage(O, UI, UsageExpr: ModExpr, /*OtherKind=*/UK_ModAsSideEffect,
17511 /*IsModMod=*/true);
17512 addUsage(O, UI, UsageExpr: ModExpr, /*UsageKind=*/UK);
17513 }
17514
17515public:
17516 SequenceChecker(Sema &S, const Expr *E,
17517 SmallVectorImpl<const Expr *> &WorkList)
17518 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) {
17519 Visit(E);
17520 // Silence a -Wunused-private-field since WorkList is now unused.
17521 // TODO: Evaluate if it can be used, and if not remove it.
17522 (void)this->WorkList;
17523 }
17524
17525 void VisitStmt(const Stmt *S) {
17526 // Skip all statements which aren't expressions for now.
17527 }
17528
17529 void VisitExpr(const Expr *E) {
17530 // By default, just recurse to evaluated subexpressions.
17531 Base::VisitStmt(E);
17532 }
17533
17534 void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *CSE) {
17535 for (auto *Sub : CSE->children()) {
17536 const Expr *ChildExpr = dyn_cast_or_null<Expr>(Val: Sub);
17537 if (!ChildExpr)
17538 continue;
17539
17540 if (ChildExpr == CSE->getOperand())
17541 // Do not recurse over a CoroutineSuspendExpr's operand.
17542 // The operand is also a subexpression of getCommonExpr(), and
17543 // recursing into it directly could confuse object management
17544 // for the sake of sequence tracking.
17545 continue;
17546
17547 Visit(S: Sub);
17548 }
17549 }
17550
17551 void VisitCastExpr(const CastExpr *E) {
17552 Object O = Object();
17553 if (E->getCastKind() == CK_LValueToRValue)
17554 O = getObject(E: E->getSubExpr(), Mod: false);
17555
17556 if (O)
17557 notePreUse(O, E);
17558 VisitExpr(E);
17559 if (O)
17560 notePostUse(O, E);
17561 }
17562
17563 void VisitSequencedExpressions(const Expr *SequencedBefore,
17564 const Expr *SequencedAfter) {
17565 SequenceTree::Seq BeforeRegion = Tree.allocate(Parent: Region);
17566 SequenceTree::Seq AfterRegion = Tree.allocate(Parent: Region);
17567 SequenceTree::Seq OldRegion = Region;
17568
17569 {
17570 SequencedSubexpression SeqBefore(*this);
17571 Region = BeforeRegion;
17572 Visit(SequencedBefore);
17573 }
17574
17575 Region = AfterRegion;
17576 Visit(SequencedAfter);
17577
17578 Region = OldRegion;
17579
17580 Tree.merge(S: BeforeRegion);
17581 Tree.merge(S: AfterRegion);
17582 }
17583
17584 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) {
17585 // C++17 [expr.sub]p1:
17586 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
17587 // expression E1 is sequenced before the expression E2.
17588 if (SemaRef.getLangOpts().CPlusPlus17)
17589 VisitSequencedExpressions(SequencedBefore: ASE->getLHS(), SequencedAfter: ASE->getRHS());
17590 else {
17591 Visit(ASE->getLHS());
17592 Visit(ASE->getRHS());
17593 }
17594 }
17595
17596 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
17597 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
17598 void VisitBinPtrMem(const BinaryOperator *BO) {
17599 // C++17 [expr.mptr.oper]p4:
17600 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...]
17601 // the expression E1 is sequenced before the expression E2.
17602 if (SemaRef.getLangOpts().CPlusPlus17)
17603 VisitSequencedExpressions(SequencedBefore: BO->getLHS(), SequencedAfter: BO->getRHS());
17604 else {
17605 Visit(BO->getLHS());
17606 Visit(BO->getRHS());
17607 }
17608 }
17609
17610 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); }
17611 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); }
17612 void VisitBinShlShr(const BinaryOperator *BO) {
17613 // C++17 [expr.shift]p4:
17614 // The expression E1 is sequenced before the expression E2.
17615 if (SemaRef.getLangOpts().CPlusPlus17)
17616 VisitSequencedExpressions(SequencedBefore: BO->getLHS(), SequencedAfter: BO->getRHS());
17617 else {
17618 Visit(BO->getLHS());
17619 Visit(BO->getRHS());
17620 }
17621 }
17622
17623 void VisitBinComma(const BinaryOperator *BO) {
17624 // C++11 [expr.comma]p1:
17625 // Every value computation and side effect associated with the left
17626 // expression is sequenced before every value computation and side
17627 // effect associated with the right expression.
17628 VisitSequencedExpressions(SequencedBefore: BO->getLHS(), SequencedAfter: BO->getRHS());
17629 }
17630
17631 void VisitBinAssign(const BinaryOperator *BO) {
17632 SequenceTree::Seq RHSRegion;
17633 SequenceTree::Seq LHSRegion;
17634 if (SemaRef.getLangOpts().CPlusPlus17) {
17635 RHSRegion = Tree.allocate(Parent: Region);
17636 LHSRegion = Tree.allocate(Parent: Region);
17637 } else {
17638 RHSRegion = Region;
17639 LHSRegion = Region;
17640 }
17641 SequenceTree::Seq OldRegion = Region;
17642
17643 // C++11 [expr.ass]p1:
17644 // [...] the assignment is sequenced after the value computation
17645 // of the right and left operands, [...]
17646 //
17647 // so check it before inspecting the operands and update the
17648 // map afterwards.
17649 Object O = getObject(E: BO->getLHS(), /*Mod=*/true);
17650 if (O)
17651 notePreMod(O, BO);
17652
17653 if (SemaRef.getLangOpts().CPlusPlus17) {
17654 // C++17 [expr.ass]p1:
17655 // [...] The right operand is sequenced before the left operand. [...]
17656 {
17657 SequencedSubexpression SeqBefore(*this);
17658 Region = RHSRegion;
17659 Visit(BO->getRHS());
17660 }
17661
17662 Region = LHSRegion;
17663 Visit(BO->getLHS());
17664
17665 if (O && isa<CompoundAssignOperator>(Val: BO))
17666 notePostUse(O, BO);
17667
17668 } else {
17669 // C++11 does not specify any sequencing between the LHS and RHS.
17670 Region = LHSRegion;
17671 Visit(BO->getLHS());
17672
17673 if (O && isa<CompoundAssignOperator>(Val: BO))
17674 notePostUse(O, BO);
17675
17676 Region = RHSRegion;
17677 Visit(BO->getRHS());
17678 }
17679
17680 // C++11 [expr.ass]p1:
17681 // the assignment is sequenced [...] before the value computation of the
17682 // assignment expression.
17683 // C11 6.5.16/3 has no such rule.
17684 Region = OldRegion;
17685 if (O)
17686 notePostMod(O, BO,
17687 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
17688 : UK_ModAsSideEffect);
17689 if (SemaRef.getLangOpts().CPlusPlus17) {
17690 Tree.merge(S: RHSRegion);
17691 Tree.merge(S: LHSRegion);
17692 }
17693 }
17694
17695 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) {
17696 VisitBinAssign(CAO);
17697 }
17698
17699 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
17700 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
17701 void VisitUnaryPreIncDec(const UnaryOperator *UO) {
17702 Object O = getObject(E: UO->getSubExpr(), Mod: true);
17703 if (!O)
17704 return VisitExpr(UO);
17705
17706 notePreMod(O, UO);
17707 Visit(UO->getSubExpr());
17708 // C++11 [expr.pre.incr]p1:
17709 // the expression ++x is equivalent to x+=1
17710 notePostMod(O, UO,
17711 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
17712 : UK_ModAsSideEffect);
17713 }
17714
17715 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
17716 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
17717 void VisitUnaryPostIncDec(const UnaryOperator *UO) {
17718 Object O = getObject(E: UO->getSubExpr(), Mod: true);
17719 if (!O)
17720 return VisitExpr(UO);
17721
17722 notePreMod(O, UO);
17723 Visit(UO->getSubExpr());
17724 notePostMod(O, UO, UK_ModAsSideEffect);
17725 }
17726
17727 void VisitBinLOr(const BinaryOperator *BO) {
17728 // C++11 [expr.log.or]p2:
17729 // If the second expression is evaluated, every value computation and
17730 // side effect associated with the first expression is sequenced before
17731 // every value computation and side effect associated with the
17732 // second expression.
17733 SequenceTree::Seq LHSRegion = Tree.allocate(Parent: Region);
17734 SequenceTree::Seq RHSRegion = Tree.allocate(Parent: Region);
17735 SequenceTree::Seq OldRegion = Region;
17736
17737 EvaluationTracker Eval(*this);
17738 {
17739 SequencedSubexpression Sequenced(*this);
17740 Region = LHSRegion;
17741 Visit(BO->getLHS());
17742 }
17743
17744 // C++11 [expr.log.or]p1:
17745 // [...] the second operand is not evaluated if the first operand
17746 // evaluates to true.
17747 bool EvalResult = false;
17748 bool EvalOK = Eval.evaluate(E: BO->getLHS(), Result&: EvalResult);
17749 bool ShouldVisitRHS = !EvalOK || !EvalResult;
17750 if (ShouldVisitRHS) {
17751 Region = RHSRegion;
17752 Visit(BO->getRHS());
17753 }
17754
17755 Region = OldRegion;
17756 Tree.merge(S: LHSRegion);
17757 Tree.merge(S: RHSRegion);
17758 }
17759
17760 void VisitBinLAnd(const BinaryOperator *BO) {
17761 // C++11 [expr.log.and]p2:
17762 // If the second expression is evaluated, every value computation and
17763 // side effect associated with the first expression is sequenced before
17764 // every value computation and side effect associated with the
17765 // second expression.
17766 SequenceTree::Seq LHSRegion = Tree.allocate(Parent: Region);
17767 SequenceTree::Seq RHSRegion = Tree.allocate(Parent: Region);
17768 SequenceTree::Seq OldRegion = Region;
17769
17770 EvaluationTracker Eval(*this);
17771 {
17772 SequencedSubexpression Sequenced(*this);
17773 Region = LHSRegion;
17774 Visit(BO->getLHS());
17775 }
17776
17777 // C++11 [expr.log.and]p1:
17778 // [...] the second operand is not evaluated if the first operand is false.
17779 bool EvalResult = false;
17780 bool EvalOK = Eval.evaluate(E: BO->getLHS(), Result&: EvalResult);
17781 bool ShouldVisitRHS = !EvalOK || EvalResult;
17782 if (ShouldVisitRHS) {
17783 Region = RHSRegion;
17784 Visit(BO->getRHS());
17785 }
17786
17787 Region = OldRegion;
17788 Tree.merge(S: LHSRegion);
17789 Tree.merge(S: RHSRegion);
17790 }
17791
17792 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) {
17793 // C++11 [expr.cond]p1:
17794 // [...] Every value computation and side effect associated with the first
17795 // expression is sequenced before every value computation and side effect
17796 // associated with the second or third expression.
17797 SequenceTree::Seq ConditionRegion = Tree.allocate(Parent: Region);
17798
17799 // No sequencing is specified between the true and false expression.
17800 // However since exactly one of both is going to be evaluated we can
17801 // consider them to be sequenced. This is needed to avoid warning on
17802 // something like "x ? y+= 1 : y += 2;" in the case where we will visit
17803 // both the true and false expressions because we can't evaluate x.
17804 // This will still allow us to detect an expression like (pre C++17)
17805 // "(x ? y += 1 : y += 2) = y".
17806 //
17807 // We don't wrap the visitation of the true and false expression with
17808 // SequencedSubexpression because we don't want to downgrade modifications
17809 // as side effect in the true and false expressions after the visition
17810 // is done. (for example in the expression "(x ? y++ : y++) + y" we should
17811 // not warn between the two "y++", but we should warn between the "y++"
17812 // and the "y".
17813 SequenceTree::Seq TrueRegion = Tree.allocate(Parent: Region);
17814 SequenceTree::Seq FalseRegion = Tree.allocate(Parent: Region);
17815 SequenceTree::Seq OldRegion = Region;
17816
17817 EvaluationTracker Eval(*this);
17818 {
17819 SequencedSubexpression Sequenced(*this);
17820 Region = ConditionRegion;
17821 Visit(CO->getCond());
17822 }
17823
17824 // C++11 [expr.cond]p1:
17825 // [...] The first expression is contextually converted to bool (Clause 4).
17826 // It is evaluated and if it is true, the result of the conditional
17827 // expression is the value of the second expression, otherwise that of the
17828 // third expression. Only one of the second and third expressions is
17829 // evaluated. [...]
17830 bool EvalResult = false;
17831 bool EvalOK = Eval.evaluate(E: CO->getCond(), Result&: EvalResult);
17832 bool ShouldVisitTrueExpr = !EvalOK || EvalResult;
17833 bool ShouldVisitFalseExpr = !EvalOK || !EvalResult;
17834 if (ShouldVisitTrueExpr) {
17835 Region = TrueRegion;
17836 Visit(CO->getTrueExpr());
17837 }
17838 if (ShouldVisitFalseExpr) {
17839 Region = FalseRegion;
17840 Visit(CO->getFalseExpr());
17841 }
17842
17843 Region = OldRegion;
17844 Tree.merge(S: ConditionRegion);
17845 Tree.merge(S: TrueRegion);
17846 Tree.merge(S: FalseRegion);
17847 }
17848
17849 void VisitCallExpr(const CallExpr *CE) {
17850 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
17851
17852 if (CE->isUnevaluatedBuiltinCall(Ctx: Context))
17853 return;
17854
17855 // C++11 [intro.execution]p15:
17856 // When calling a function [...], every value computation and side effect
17857 // associated with any argument expression, or with the postfix expression
17858 // designating the called function, is sequenced before execution of every
17859 // expression or statement in the body of the function [and thus before
17860 // the value computation of its result].
17861 SequencedSubexpression Sequenced(*this);
17862 SemaRef.runWithSufficientStackSpace(Loc: CE->getExprLoc(), Fn: [&] {
17863 // C++17 [expr.call]p5
17864 // The postfix-expression is sequenced before each expression in the
17865 // expression-list and any default argument. [...]
17866 SequenceTree::Seq CalleeRegion;
17867 SequenceTree::Seq OtherRegion;
17868 if (SemaRef.getLangOpts().CPlusPlus17) {
17869 CalleeRegion = Tree.allocate(Parent: Region);
17870 OtherRegion = Tree.allocate(Parent: Region);
17871 } else {
17872 CalleeRegion = Region;
17873 OtherRegion = Region;
17874 }
17875 SequenceTree::Seq OldRegion = Region;
17876
17877 // Visit the callee expression first.
17878 Region = CalleeRegion;
17879 if (SemaRef.getLangOpts().CPlusPlus17) {
17880 SequencedSubexpression Sequenced(*this);
17881 Visit(CE->getCallee());
17882 } else {
17883 Visit(CE->getCallee());
17884 }
17885
17886 // Then visit the argument expressions.
17887 Region = OtherRegion;
17888 for (const Expr *Argument : CE->arguments())
17889 Visit(Argument);
17890
17891 Region = OldRegion;
17892 if (SemaRef.getLangOpts().CPlusPlus17) {
17893 Tree.merge(S: CalleeRegion);
17894 Tree.merge(S: OtherRegion);
17895 }
17896 });
17897 }
17898
17899 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) {
17900 // C++17 [over.match.oper]p2:
17901 // [...] the operator notation is first transformed to the equivalent
17902 // function-call notation as summarized in Table 12 (where @ denotes one
17903 // of the operators covered in the specified subclause). However, the
17904 // operands are sequenced in the order prescribed for the built-in
17905 // operator (Clause 8).
17906 //
17907 // From the above only overloaded binary operators and overloaded call
17908 // operators have sequencing rules in C++17 that we need to handle
17909 // separately.
17910 if (!SemaRef.getLangOpts().CPlusPlus17 ||
17911 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call))
17912 return VisitCallExpr(CXXOCE);
17913
17914 enum {
17915 NoSequencing,
17916 LHSBeforeRHS,
17917 RHSBeforeLHS,
17918 LHSBeforeRest
17919 } SequencingKind;
17920 switch (CXXOCE->getOperator()) {
17921 case OO_Equal:
17922 case OO_PlusEqual:
17923 case OO_MinusEqual:
17924 case OO_StarEqual:
17925 case OO_SlashEqual:
17926 case OO_PercentEqual:
17927 case OO_CaretEqual:
17928 case OO_AmpEqual:
17929 case OO_PipeEqual:
17930 case OO_LessLessEqual:
17931 case OO_GreaterGreaterEqual:
17932 SequencingKind = RHSBeforeLHS;
17933 break;
17934
17935 case OO_LessLess:
17936 case OO_GreaterGreater:
17937 case OO_AmpAmp:
17938 case OO_PipePipe:
17939 case OO_Comma:
17940 case OO_ArrowStar:
17941 case OO_Subscript:
17942 SequencingKind = LHSBeforeRHS;
17943 break;
17944
17945 case OO_Call:
17946 SequencingKind = LHSBeforeRest;
17947 break;
17948
17949 default:
17950 SequencingKind = NoSequencing;
17951 break;
17952 }
17953
17954 if (SequencingKind == NoSequencing)
17955 return VisitCallExpr(CXXOCE);
17956
17957 // This is a call, so all subexpressions are sequenced before the result.
17958 SequencedSubexpression Sequenced(*this);
17959
17960 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] {
17961 assert(SemaRef.getLangOpts().CPlusPlus17 &&
17962 "Should only get there with C++17 and above!");
17963 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) &&
17964 "Should only get there with an overloaded binary operator"
17965 " or an overloaded call operator!");
17966
17967 if (SequencingKind == LHSBeforeRest) {
17968 assert(CXXOCE->getOperator() == OO_Call &&
17969 "We should only have an overloaded call operator here!");
17970
17971 // This is very similar to VisitCallExpr, except that we only have the
17972 // C++17 case. The postfix-expression is the first argument of the
17973 // CXXOperatorCallExpr. The expressions in the expression-list, if any,
17974 // are in the following arguments.
17975 //
17976 // Note that we intentionally do not visit the callee expression since
17977 // it is just a decayed reference to a function.
17978 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Parent: Region);
17979 SequenceTree::Seq ArgsRegion = Tree.allocate(Parent: Region);
17980 SequenceTree::Seq OldRegion = Region;
17981
17982 assert(CXXOCE->getNumArgs() >= 1 &&
17983 "An overloaded call operator must have at least one argument"
17984 " for the postfix-expression!");
17985 const Expr *PostfixExpr = CXXOCE->getArgs()[0];
17986 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1,
17987 CXXOCE->getNumArgs() - 1);
17988
17989 // Visit the postfix-expression first.
17990 {
17991 Region = PostfixExprRegion;
17992 SequencedSubexpression Sequenced(*this);
17993 Visit(PostfixExpr);
17994 }
17995
17996 // Then visit the argument expressions.
17997 Region = ArgsRegion;
17998 for (const Expr *Arg : Args)
17999 Visit(Arg);
18000
18001 Region = OldRegion;
18002 Tree.merge(S: PostfixExprRegion);
18003 Tree.merge(S: ArgsRegion);
18004 } else {
18005 assert(CXXOCE->getNumArgs() == 2 &&
18006 "Should only have two arguments here!");
18007 assert((SequencingKind == LHSBeforeRHS ||
18008 SequencingKind == RHSBeforeLHS) &&
18009 "Unexpected sequencing kind!");
18010
18011 // We do not visit the callee expression since it is just a decayed
18012 // reference to a function.
18013 const Expr *E1 = CXXOCE->getArg(0);
18014 const Expr *E2 = CXXOCE->getArg(1);
18015 if (SequencingKind == RHSBeforeLHS)
18016 std::swap(a&: E1, b&: E2);
18017
18018 return VisitSequencedExpressions(E1, E2);
18019 }
18020 });
18021 }
18022
18023 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) {
18024 // This is a call, so all subexpressions are sequenced before the result.
18025 SequencedSubexpression Sequenced(*this);
18026
18027 if (!CCE->isListInitialization())
18028 return VisitExpr(CCE);
18029
18030 // In C++11, list initializations are sequenced.
18031 SequenceExpressionsInOrder(
18032 ExpressionList: llvm::ArrayRef(CCE->getArgs(), CCE->getNumArgs()));
18033 }
18034
18035 void VisitInitListExpr(const InitListExpr *ILE) {
18036 if (!SemaRef.getLangOpts().CPlusPlus11)
18037 return VisitExpr(ILE);
18038
18039 // In C++11, list initializations are sequenced.
18040 SequenceExpressionsInOrder(ExpressionList: ILE->inits());
18041 }
18042
18043 void VisitCXXParenListInitExpr(const CXXParenListInitExpr *PLIE) {
18044 // C++20 parenthesized list initializations are sequenced. See C++20
18045 // [decl.init.general]p16.5 and [decl.init.general]p16.6.2.2.
18046 SequenceExpressionsInOrder(ExpressionList: PLIE->getInitExprs());
18047 }
18048
18049private:
18050 void SequenceExpressionsInOrder(ArrayRef<const Expr *> ExpressionList) {
18051 SmallVector<SequenceTree::Seq, 32> Elts;
18052 SequenceTree::Seq Parent = Region;
18053 for (const Expr *E : ExpressionList) {
18054 if (!E)
18055 continue;
18056 Region = Tree.allocate(Parent);
18057 Elts.push_back(Elt: Region);
18058 Visit(E);
18059 }
18060
18061 // Forget that the initializers are sequenced.
18062 Region = Parent;
18063 for (unsigned I = 0; I < Elts.size(); ++I)
18064 Tree.merge(S: Elts[I]);
18065 }
18066};
18067
18068SequenceChecker::UsageInfo::UsageInfo() = default;
18069
18070} // namespace
18071
18072void Sema::CheckUnsequencedOperations(const Expr *E) {
18073 SmallVector<const Expr *, 8> WorkList;
18074 WorkList.push_back(Elt: E);
18075 while (!WorkList.empty()) {
18076 const Expr *Item = WorkList.pop_back_val();
18077 SequenceChecker(*this, Item, WorkList);
18078 }
18079}
18080
18081void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
18082 bool IsConstexpr) {
18083 llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride,
18084 IsConstexpr || isa<ConstantExpr>(Val: E));
18085 CheckImplicitConversions(E, CC: CheckLoc);
18086 if (!E->isInstantiationDependent())
18087 CheckUnsequencedOperations(E);
18088 if (!IsConstexpr && !E->isValueDependent())
18089 CheckForIntOverflow(E);
18090 DiagnoseMisalignedMembers();
18091}
18092
18093void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
18094 FieldDecl *BitField,
18095 Expr *Init) {
18096 (void) AnalyzeBitFieldAssignment(S&: *this, Bitfield: BitField, Init, InitLoc);
18097}
18098
18099static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
18100 SourceLocation Loc) {
18101 if (!PType->isVariablyModifiedType())
18102 return;
18103 if (const auto *PointerTy = dyn_cast<PointerType>(Val&: PType)) {
18104 diagnoseArrayStarInParamType(S, PType: PointerTy->getPointeeType(), Loc);
18105 return;
18106 }
18107 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(Val&: PType)) {
18108 diagnoseArrayStarInParamType(S, PType: ReferenceTy->getPointeeType(), Loc);
18109 return;
18110 }
18111 if (const auto *ParenTy = dyn_cast<ParenType>(Val&: PType)) {
18112 diagnoseArrayStarInParamType(S, PType: ParenTy->getInnerType(), Loc);
18113 return;
18114 }
18115
18116 const ArrayType *AT = S.Context.getAsArrayType(T: PType);
18117 if (!AT)
18118 return;
18119
18120 if (AT->getSizeModifier() != ArraySizeModifier::Star) {
18121 diagnoseArrayStarInParamType(S, PType: AT->getElementType(), Loc);
18122 return;
18123 }
18124
18125 S.Diag(Loc, diag::err_array_star_in_function_definition);
18126}
18127
18128/// CheckParmsForFunctionDef - Check that the parameters of the given
18129/// function are appropriate for the definition of a function. This
18130/// takes care of any checks that cannot be performed on the
18131/// declaration itself, e.g., that the types of each of the function
18132/// parameters are complete.
18133bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
18134 bool CheckParameterNames) {
18135 bool HasInvalidParm = false;
18136 for (ParmVarDecl *Param : Parameters) {
18137 assert(Param && "null in a parameter list");
18138 // C99 6.7.5.3p4: the parameters in a parameter type list in a
18139 // function declarator that is part of a function definition of
18140 // that function shall not have incomplete type.
18141 //
18142 // C++23 [dcl.fct.def.general]/p2
18143 // The type of a parameter [...] for a function definition
18144 // shall not be a (possibly cv-qualified) class type that is incomplete
18145 // or abstract within the function body unless the function is deleted.
18146 if (!Param->isInvalidDecl() &&
18147 (RequireCompleteType(Param->getLocation(), Param->getType(),
18148 diag::err_typecheck_decl_incomplete_type) ||
18149 RequireNonAbstractType(Param->getBeginLoc(), Param->getOriginalType(),
18150 diag::err_abstract_type_in_decl,
18151 AbstractParamType))) {
18152 Param->setInvalidDecl();
18153 HasInvalidParm = true;
18154 }
18155
18156 // C99 6.9.1p5: If the declarator includes a parameter type list, the
18157 // declaration of each parameter shall include an identifier.
18158 if (CheckParameterNames && Param->getIdentifier() == nullptr &&
18159 !Param->isImplicit() && !getLangOpts().CPlusPlus) {
18160 // Diagnose this as an extension in C17 and earlier.
18161 if (!getLangOpts().C23)
18162 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c23);
18163 }
18164
18165 // C99 6.7.5.3p12:
18166 // If the function declarator is not part of a definition of that
18167 // function, parameters may have incomplete type and may use the [*]
18168 // notation in their sequences of declarator specifiers to specify
18169 // variable length array types.
18170 QualType PType = Param->getOriginalType();
18171 // FIXME: This diagnostic should point the '[*]' if source-location
18172 // information is added for it.
18173 diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
18174
18175 // If the parameter is a c++ class type and it has to be destructed in the
18176 // callee function, declare the destructor so that it can be called by the
18177 // callee function. Do not perform any direct access check on the dtor here.
18178 if (!Param->isInvalidDecl()) {
18179 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) {
18180 if (!ClassDecl->isInvalidDecl() &&
18181 !ClassDecl->hasIrrelevantDestructor() &&
18182 !ClassDecl->isDependentContext() &&
18183 ClassDecl->isParamDestroyedInCallee()) {
18184 CXXDestructorDecl *Destructor = LookupDestructor(Class: ClassDecl);
18185 MarkFunctionReferenced(Loc: Param->getLocation(), Func: Destructor);
18186 DiagnoseUseOfDecl(D: Destructor, Locs: Param->getLocation());
18187 }
18188 }
18189 }
18190
18191 // Parameters with the pass_object_size attribute only need to be marked
18192 // constant at function definitions. Because we lack information about
18193 // whether we're on a declaration or definition when we're instantiating the
18194 // attribute, we need to check for constness here.
18195 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
18196 if (!Param->getType().isConstQualified())
18197 Diag(Param->getLocation(), diag::err_attribute_pointers_only)
18198 << Attr->getSpelling() << 1;
18199
18200 // Check for parameter names shadowing fields from the class.
18201 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) {
18202 // The owning context for the parameter should be the function, but we
18203 // want to see if this function's declaration context is a record.
18204 DeclContext *DC = Param->getDeclContext();
18205 if (DC && DC->isFunctionOrMethod()) {
18206 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent()))
18207 CheckShadowInheritedFields(Loc: Param->getLocation(), FieldName: Param->getDeclName(),
18208 RD: RD, /*DeclIsField*/ false);
18209 }
18210 }
18211
18212 if (!Param->isInvalidDecl() &&
18213 Param->getOriginalType()->isWebAssemblyTableType()) {
18214 Param->setInvalidDecl();
18215 HasInvalidParm = true;
18216 Diag(Param->getLocation(), diag::err_wasm_table_as_function_parameter);
18217 }
18218 }
18219
18220 return HasInvalidParm;
18221}
18222
18223std::optional<std::pair<
18224 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr
18225 *E,
18226 ASTContext
18227 &Ctx);
18228
18229/// Compute the alignment and offset of the base class object given the
18230/// derived-to-base cast expression and the alignment and offset of the derived
18231/// class object.
18232static std::pair<CharUnits, CharUnits>
18233getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType,
18234 CharUnits BaseAlignment, CharUnits Offset,
18235 ASTContext &Ctx) {
18236 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE;
18237 ++PathI) {
18238 const CXXBaseSpecifier *Base = *PathI;
18239 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
18240 if (Base->isVirtual()) {
18241 // The complete object may have a lower alignment than the non-virtual
18242 // alignment of the base, in which case the base may be misaligned. Choose
18243 // the smaller of the non-virtual alignment and BaseAlignment, which is a
18244 // conservative lower bound of the complete object alignment.
18245 CharUnits NonVirtualAlignment =
18246 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment();
18247 BaseAlignment = std::min(a: BaseAlignment, b: NonVirtualAlignment);
18248 Offset = CharUnits::Zero();
18249 } else {
18250 const ASTRecordLayout &RL =
18251 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl());
18252 Offset += RL.getBaseClassOffset(Base: BaseDecl);
18253 }
18254 DerivedType = Base->getType();
18255 }
18256
18257 return std::make_pair(x&: BaseAlignment, y&: Offset);
18258}
18259
18260/// Compute the alignment and offset of a binary additive operator.
18261static std::optional<std::pair<CharUnits, CharUnits>>
18262getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
18263 bool IsSub, ASTContext &Ctx) {
18264 QualType PointeeType = PtrE->getType()->getPointeeType();
18265
18266 if (!PointeeType->isConstantSizeType())
18267 return std::nullopt;
18268
18269 auto P = getBaseAlignmentAndOffsetFromPtr(E: PtrE, Ctx);
18270
18271 if (!P)
18272 return std::nullopt;
18273
18274 CharUnits EltSize = Ctx.getTypeSizeInChars(T: PointeeType);
18275 if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) {
18276 CharUnits Offset = EltSize * IdxRes->getExtValue();
18277 if (IsSub)
18278 Offset = -Offset;
18279 return std::make_pair(x&: P->first, y: P->second + Offset);
18280 }
18281
18282 // If the integer expression isn't a constant expression, compute the lower
18283 // bound of the alignment using the alignment and offset of the pointer
18284 // expression and the element size.
18285 return std::make_pair(
18286 x: P->first.alignmentAtOffset(offset: P->second).alignmentAtOffset(offset: EltSize),
18287 y: CharUnits::Zero());
18288}
18289
18290/// This helper function takes an lvalue expression and returns the alignment of
18291/// a VarDecl and a constant offset from the VarDecl.
18292std::optional<std::pair<
18293 CharUnits,
18294 CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E,
18295 ASTContext &Ctx) {
18296 E = E->IgnoreParens();
18297 switch (E->getStmtClass()) {
18298 default:
18299 break;
18300 case Stmt::CStyleCastExprClass:
18301 case Stmt::CXXStaticCastExprClass:
18302 case Stmt::ImplicitCastExprClass: {
18303 auto *CE = cast<CastExpr>(Val: E);
18304 const Expr *From = CE->getSubExpr();
18305 switch (CE->getCastKind()) {
18306 default:
18307 break;
18308 case CK_NoOp:
18309 return getBaseAlignmentAndOffsetFromLValue(E: From, Ctx);
18310 case CK_UncheckedDerivedToBase:
18311 case CK_DerivedToBase: {
18312 auto P = getBaseAlignmentAndOffsetFromLValue(E: From, Ctx);
18313 if (!P)
18314 break;
18315 return getDerivedToBaseAlignmentAndOffset(CE, DerivedType: From->getType(), BaseAlignment: P->first,
18316 Offset: P->second, Ctx);
18317 }
18318 }
18319 break;
18320 }
18321 case Stmt::ArraySubscriptExprClass: {
18322 auto *ASE = cast<ArraySubscriptExpr>(Val: E);
18323 return getAlignmentAndOffsetFromBinAddOrSub(PtrE: ASE->getBase(), IntE: ASE->getIdx(),
18324 IsSub: false, Ctx);
18325 }
18326 case Stmt::DeclRefExprClass: {
18327 if (auto *VD = dyn_cast<VarDecl>(Val: cast<DeclRefExpr>(Val: E)->getDecl())) {
18328 // FIXME: If VD is captured by copy or is an escaping __block variable,
18329 // use the alignment of VD's type.
18330 if (!VD->getType()->isReferenceType()) {
18331 // Dependent alignment cannot be resolved -> bail out.
18332 if (VD->hasDependentAlignment())
18333 break;
18334 return std::make_pair(x: Ctx.getDeclAlign(VD), y: CharUnits::Zero());
18335 }
18336 if (VD->hasInit())
18337 return getBaseAlignmentAndOffsetFromLValue(E: VD->getInit(), Ctx);
18338 }
18339 break;
18340 }
18341 case Stmt::MemberExprClass: {
18342 auto *ME = cast<MemberExpr>(Val: E);
18343 auto *FD = dyn_cast<FieldDecl>(Val: ME->getMemberDecl());
18344 if (!FD || FD->getType()->isReferenceType() ||
18345 FD->getParent()->isInvalidDecl())
18346 break;
18347 std::optional<std::pair<CharUnits, CharUnits>> P;
18348 if (ME->isArrow())
18349 P = getBaseAlignmentAndOffsetFromPtr(E: ME->getBase(), Ctx);
18350 else
18351 P = getBaseAlignmentAndOffsetFromLValue(E: ME->getBase(), Ctx);
18352 if (!P)
18353 break;
18354 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: FD->getParent());
18355 uint64_t Offset = Layout.getFieldOffset(FieldNo: FD->getFieldIndex());
18356 return std::make_pair(x&: P->first,
18357 y: P->second + CharUnits::fromQuantity(Quantity: Offset));
18358 }
18359 case Stmt::UnaryOperatorClass: {
18360 auto *UO = cast<UnaryOperator>(Val: E);
18361 switch (UO->getOpcode()) {
18362 default:
18363 break;
18364 case UO_Deref:
18365 return getBaseAlignmentAndOffsetFromPtr(E: UO->getSubExpr(), Ctx);
18366 }
18367 break;
18368 }
18369 case Stmt::BinaryOperatorClass: {
18370 auto *BO = cast<BinaryOperator>(Val: E);
18371 auto Opcode = BO->getOpcode();
18372 switch (Opcode) {
18373 default:
18374 break;
18375 case BO_Comma:
18376 return getBaseAlignmentAndOffsetFromLValue(E: BO->getRHS(), Ctx);
18377 }
18378 break;
18379 }
18380 }
18381 return std::nullopt;
18382}
18383
18384/// This helper function takes a pointer expression and returns the alignment of
18385/// a VarDecl and a constant offset from the VarDecl.
18386std::optional<std::pair<
18387 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr
18388 *E,
18389 ASTContext
18390 &Ctx) {
18391 E = E->IgnoreParens();
18392 switch (E->getStmtClass()) {
18393 default:
18394 break;
18395 case Stmt::CStyleCastExprClass:
18396 case Stmt::CXXStaticCastExprClass:
18397 case Stmt::ImplicitCastExprClass: {
18398 auto *CE = cast<CastExpr>(Val: E);
18399 const Expr *From = CE->getSubExpr();
18400 switch (CE->getCastKind()) {
18401 default:
18402 break;
18403 case CK_NoOp:
18404 return getBaseAlignmentAndOffsetFromPtr(E: From, Ctx);
18405 case CK_ArrayToPointerDecay:
18406 return getBaseAlignmentAndOffsetFromLValue(E: From, Ctx);
18407 case CK_UncheckedDerivedToBase:
18408 case CK_DerivedToBase: {
18409 auto P = getBaseAlignmentAndOffsetFromPtr(E: From, Ctx);
18410 if (!P)
18411 break;
18412 return getDerivedToBaseAlignmentAndOffset(
18413 CE, DerivedType: From->getType()->getPointeeType(), BaseAlignment: P->first, Offset: P->second, Ctx);
18414 }
18415 }
18416 break;
18417 }
18418 case Stmt::CXXThisExprClass: {
18419 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl();
18420 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment();
18421 return std::make_pair(x&: Alignment, y: CharUnits::Zero());
18422 }
18423 case Stmt::UnaryOperatorClass: {
18424 auto *UO = cast<UnaryOperator>(Val: E);
18425 if (UO->getOpcode() == UO_AddrOf)
18426 return getBaseAlignmentAndOffsetFromLValue(E: UO->getSubExpr(), Ctx);
18427 break;
18428 }
18429 case Stmt::BinaryOperatorClass: {
18430 auto *BO = cast<BinaryOperator>(Val: E);
18431 auto Opcode = BO->getOpcode();
18432 switch (Opcode) {
18433 default:
18434 break;
18435 case BO_Add:
18436 case BO_Sub: {
18437 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS();
18438 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType())
18439 std::swap(a&: LHS, b&: RHS);
18440 return getAlignmentAndOffsetFromBinAddOrSub(PtrE: LHS, IntE: RHS, IsSub: Opcode == BO_Sub,
18441 Ctx);
18442 }
18443 case BO_Comma:
18444 return getBaseAlignmentAndOffsetFromPtr(E: BO->getRHS(), Ctx);
18445 }
18446 break;
18447 }
18448 }
18449 return std::nullopt;
18450}
18451
18452static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) {
18453 // See if we can compute the alignment of a VarDecl and an offset from it.
18454 std::optional<std::pair<CharUnits, CharUnits>> P =
18455 getBaseAlignmentAndOffsetFromPtr(E, Ctx&: S.Context);
18456
18457 if (P)
18458 return P->first.alignmentAtOffset(offset: P->second);
18459
18460 // If that failed, return the type's alignment.
18461 return S.Context.getTypeAlignInChars(T: E->getType()->getPointeeType());
18462}
18463
18464/// CheckCastAlign - Implements -Wcast-align, which warns when a
18465/// pointer cast increases the alignment requirements.
18466void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
18467 // This is actually a lot of work to potentially be doing on every
18468 // cast; don't do it if we're ignoring -Wcast_align (as is the default).
18469 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin()))
18470 return;
18471
18472 // Ignore dependent types.
18473 if (T->isDependentType() || Op->getType()->isDependentType())
18474 return;
18475
18476 // Require that the destination be a pointer type.
18477 const PointerType *DestPtr = T->getAs<PointerType>();
18478 if (!DestPtr) return;
18479
18480 // If the destination has alignment 1, we're done.
18481 QualType DestPointee = DestPtr->getPointeeType();
18482 if (DestPointee->isIncompleteType()) return;
18483 CharUnits DestAlign = Context.getTypeAlignInChars(T: DestPointee);
18484 if (DestAlign.isOne()) return;
18485
18486 // Require that the source be a pointer type.
18487 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
18488 if (!SrcPtr) return;
18489 QualType SrcPointee = SrcPtr->getPointeeType();
18490
18491 // Explicitly allow casts from cv void*. We already implicitly
18492 // allowed casts to cv void*, since they have alignment 1.
18493 // Also allow casts involving incomplete types, which implicitly
18494 // includes 'void'.
18495 if (SrcPointee->isIncompleteType()) return;
18496
18497 CharUnits SrcAlign = getPresumedAlignmentOfPointer(E: Op, S&: *this);
18498
18499 if (SrcAlign >= DestAlign) return;
18500
18501 Diag(TRange.getBegin(), diag::warn_cast_align)
18502 << Op->getType() << T
18503 << static_cast<unsigned>(SrcAlign.getQuantity())
18504 << static_cast<unsigned>(DestAlign.getQuantity())
18505 << TRange << Op->getSourceRange();
18506}
18507
18508void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
18509 const ArraySubscriptExpr *ASE,
18510 bool AllowOnePastEnd, bool IndexNegated) {
18511 // Already diagnosed by the constant evaluator.
18512 if (isConstantEvaluatedContext())
18513 return;
18514
18515 IndexExpr = IndexExpr->IgnoreParenImpCasts();
18516 if (IndexExpr->isValueDependent())
18517 return;
18518
18519 const Type *EffectiveType =
18520 BaseExpr->getType()->getPointeeOrArrayElementType();
18521 BaseExpr = BaseExpr->IgnoreParenCasts();
18522 const ConstantArrayType *ArrayTy =
18523 Context.getAsConstantArrayType(T: BaseExpr->getType());
18524
18525 LangOptions::StrictFlexArraysLevelKind
18526 StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel();
18527
18528 const Type *BaseType =
18529 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr();
18530 bool IsUnboundedArray =
18531 BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike(
18532 Context, StrictFlexArraysLevel,
18533 /*IgnoreTemplateOrMacroSubstitution=*/true);
18534 if (EffectiveType->isDependentType() ||
18535 (!IsUnboundedArray && BaseType->isDependentType()))
18536 return;
18537
18538 Expr::EvalResult Result;
18539 if (!IndexExpr->EvaluateAsInt(Result, Ctx: Context, AllowSideEffects: Expr::SE_AllowSideEffects))
18540 return;
18541
18542 llvm::APSInt index = Result.Val.getInt();
18543 if (IndexNegated) {
18544 index.setIsUnsigned(false);
18545 index = -index;
18546 }
18547
18548 if (IsUnboundedArray) {
18549 if (EffectiveType->isFunctionType())
18550 return;
18551 if (index.isUnsigned() || !index.isNegative()) {
18552 const auto &ASTC = getASTContext();
18553 unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth(
18554 AddrSpace: EffectiveType->getCanonicalTypeInternal().getAddressSpace());
18555 if (index.getBitWidth() < AddrBits)
18556 index = index.zext(width: AddrBits);
18557 std::optional<CharUnits> ElemCharUnits =
18558 ASTC.getTypeSizeInCharsIfKnown(Ty: EffectiveType);
18559 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void
18560 // pointer) bounds-checking isn't meaningful.
18561 if (!ElemCharUnits || ElemCharUnits->isZero())
18562 return;
18563 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity());
18564 // If index has more active bits than address space, we already know
18565 // we have a bounds violation to warn about. Otherwise, compute
18566 // address of (index + 1)th element, and warn about bounds violation
18567 // only if that address exceeds address space.
18568 if (index.getActiveBits() <= AddrBits) {
18569 bool Overflow;
18570 llvm::APInt Product(index);
18571 Product += 1;
18572 Product = Product.umul_ov(RHS: ElemBytes, Overflow);
18573 if (!Overflow && Product.getActiveBits() <= AddrBits)
18574 return;
18575 }
18576
18577 // Need to compute max possible elements in address space, since that
18578 // is included in diag message.
18579 llvm::APInt MaxElems = llvm::APInt::getMaxValue(numBits: AddrBits);
18580 MaxElems = MaxElems.zext(width: std::max(a: AddrBits + 1, b: ElemBytes.getBitWidth()));
18581 MaxElems += 1;
18582 ElemBytes = ElemBytes.zextOrTrunc(width: MaxElems.getBitWidth());
18583 MaxElems = MaxElems.udiv(RHS: ElemBytes);
18584
18585 unsigned DiagID =
18586 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds
18587 : diag::warn_ptr_arith_exceeds_max_addressable_bounds;
18588
18589 // Diag message shows element size in bits and in "bytes" (platform-
18590 // dependent CharUnits)
18591 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
18592 PDiag(DiagID)
18593 << toString(I: index, Radix: 10, Signed: true) << AddrBits
18594 << (unsigned)ASTC.toBits(CharSize: *ElemCharUnits)
18595 << toString(I: ElemBytes, Radix: 10, Signed: false)
18596 << toString(I: MaxElems, Radix: 10, Signed: false)
18597 << (unsigned)MaxElems.getLimitedValue(Limit: ~0U)
18598 << IndexExpr->getSourceRange());
18599
18600 const NamedDecl *ND = nullptr;
18601 // Try harder to find a NamedDecl to point at in the note.
18602 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: BaseExpr))
18603 BaseExpr = ASE->getBase()->IgnoreParenCasts();
18604 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: BaseExpr))
18605 ND = DRE->getDecl();
18606 if (const auto *ME = dyn_cast<MemberExpr>(Val: BaseExpr))
18607 ND = ME->getMemberDecl();
18608
18609 if (ND)
18610 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
18611 PDiag(diag::note_array_declared_here) << ND);
18612 }
18613 return;
18614 }
18615
18616 if (index.isUnsigned() || !index.isNegative()) {
18617 // It is possible that the type of the base expression after
18618 // IgnoreParenCasts is incomplete, even though the type of the base
18619 // expression before IgnoreParenCasts is complete (see PR39746 for an
18620 // example). In this case we have no information about whether the array
18621 // access exceeds the array bounds. However we can still diagnose an array
18622 // access which precedes the array bounds.
18623 if (BaseType->isIncompleteType())
18624 return;
18625
18626 llvm::APInt size = ArrayTy->getSize();
18627
18628 if (BaseType != EffectiveType) {
18629 // Make sure we're comparing apples to apples when comparing index to
18630 // size.
18631 uint64_t ptrarith_typesize = Context.getTypeSize(T: EffectiveType);
18632 uint64_t array_typesize = Context.getTypeSize(T: BaseType);
18633
18634 // Handle ptrarith_typesize being zero, such as when casting to void*.
18635 // Use the size in bits (what "getTypeSize()" returns) rather than bytes.
18636 if (!ptrarith_typesize)
18637 ptrarith_typesize = Context.getCharWidth();
18638
18639 if (ptrarith_typesize != array_typesize) {
18640 // There's a cast to a different size type involved.
18641 uint64_t ratio = array_typesize / ptrarith_typesize;
18642
18643 // TODO: Be smarter about handling cases where array_typesize is not a
18644 // multiple of ptrarith_typesize.
18645 if (ptrarith_typesize * ratio == array_typesize)
18646 size *= llvm::APInt(size.getBitWidth(), ratio);
18647 }
18648 }
18649
18650 if (size.getBitWidth() > index.getBitWidth())
18651 index = index.zext(width: size.getBitWidth());
18652 else if (size.getBitWidth() < index.getBitWidth())
18653 size = size.zext(width: index.getBitWidth());
18654
18655 // For array subscripting the index must be less than size, but for pointer
18656 // arithmetic also allow the index (offset) to be equal to size since
18657 // computing the next address after the end of the array is legal and
18658 // commonly done e.g. in C++ iterators and range-based for loops.
18659 if (AllowOnePastEnd ? index.ule(RHS: size) : index.ult(RHS: size))
18660 return;
18661
18662 // Suppress the warning if the subscript expression (as identified by the
18663 // ']' location) and the index expression are both from macro expansions
18664 // within a system header.
18665 if (ASE) {
18666 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
18667 Loc: ASE->getRBracketLoc());
18668 if (SourceMgr.isInSystemHeader(Loc: RBracketLoc)) {
18669 SourceLocation IndexLoc =
18670 SourceMgr.getSpellingLoc(Loc: IndexExpr->getBeginLoc());
18671 if (SourceMgr.isWrittenInSameFile(Loc1: RBracketLoc, Loc2: IndexLoc))
18672 return;
18673 }
18674 }
18675
18676 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds
18677 : diag::warn_ptr_arith_exceeds_bounds;
18678 unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1;
18679 QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType();
18680
18681 DiagRuntimeBehavior(
18682 BaseExpr->getBeginLoc(), BaseExpr,
18683 PDiag(DiagID) << toString(I: index, Radix: 10, Signed: true) << ArrayTy->desugar()
18684 << CastMsg << CastMsgTy << IndexExpr->getSourceRange());
18685 } else {
18686 unsigned DiagID = diag::warn_array_index_precedes_bounds;
18687 if (!ASE) {
18688 DiagID = diag::warn_ptr_arith_precedes_bounds;
18689 if (index.isNegative()) index = -index;
18690 }
18691
18692 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
18693 PDiag(DiagID) << toString(I: index, Radix: 10, Signed: true)
18694 << IndexExpr->getSourceRange());
18695 }
18696
18697 const NamedDecl *ND = nullptr;
18698 // Try harder to find a NamedDecl to point at in the note.
18699 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: BaseExpr))
18700 BaseExpr = ASE->getBase()->IgnoreParenCasts();
18701 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: BaseExpr))
18702 ND = DRE->getDecl();
18703 if (const auto *ME = dyn_cast<MemberExpr>(Val: BaseExpr))
18704 ND = ME->getMemberDecl();
18705
18706 if (ND)
18707 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
18708 PDiag(diag::note_array_declared_here) << ND);
18709}
18710
18711void Sema::CheckArrayAccess(const Expr *expr) {
18712 int AllowOnePastEnd = 0;
18713 while (expr) {
18714 expr = expr->IgnoreParenImpCasts();
18715 switch (expr->getStmtClass()) {
18716 case Stmt::ArraySubscriptExprClass: {
18717 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(Val: expr);
18718 CheckArrayAccess(BaseExpr: ASE->getBase(), IndexExpr: ASE->getIdx(), ASE,
18719 AllowOnePastEnd: AllowOnePastEnd > 0);
18720 expr = ASE->getBase();
18721 break;
18722 }
18723 case Stmt::MemberExprClass: {
18724 expr = cast<MemberExpr>(Val: expr)->getBase();
18725 break;
18726 }
18727 case Stmt::OMPArraySectionExprClass: {
18728 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(Val: expr);
18729 if (ASE->getLowerBound())
18730 CheckArrayAccess(BaseExpr: ASE->getBase(), IndexExpr: ASE->getLowerBound(),
18731 /*ASE=*/nullptr, AllowOnePastEnd: AllowOnePastEnd > 0);
18732 return;
18733 }
18734 case Stmt::UnaryOperatorClass: {
18735 // Only unwrap the * and & unary operators
18736 const UnaryOperator *UO = cast<UnaryOperator>(Val: expr);
18737 expr = UO->getSubExpr();
18738 switch (UO->getOpcode()) {
18739 case UO_AddrOf:
18740 AllowOnePastEnd++;
18741 break;
18742 case UO_Deref:
18743 AllowOnePastEnd--;
18744 break;
18745 default:
18746 return;
18747 }
18748 break;
18749 }
18750 case Stmt::ConditionalOperatorClass: {
18751 const ConditionalOperator *cond = cast<ConditionalOperator>(Val: expr);
18752 if (const Expr *lhs = cond->getLHS())
18753 CheckArrayAccess(expr: lhs);
18754 if (const Expr *rhs = cond->getRHS())
18755 CheckArrayAccess(expr: rhs);
18756 return;
18757 }
18758 case Stmt::CXXOperatorCallExprClass: {
18759 const auto *OCE = cast<CXXOperatorCallExpr>(Val: expr);
18760 for (const auto *Arg : OCE->arguments())
18761 CheckArrayAccess(Arg);
18762 return;
18763 }
18764 default:
18765 return;
18766 }
18767 }
18768}
18769
18770//===--- CHECK: Objective-C retain cycles ----------------------------------//
18771
18772namespace {
18773
18774struct RetainCycleOwner {
18775 VarDecl *Variable = nullptr;
18776 SourceRange Range;
18777 SourceLocation Loc;
18778 bool Indirect = false;
18779
18780 RetainCycleOwner() = default;
18781
18782 void setLocsFrom(Expr *e) {
18783 Loc = e->getExprLoc();
18784 Range = e->getSourceRange();
18785 }
18786};
18787
18788} // namespace
18789
18790/// Consider whether capturing the given variable can possibly lead to
18791/// a retain cycle.
18792static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
18793 // In ARC, it's captured strongly iff the variable has __strong
18794 // lifetime. In MRR, it's captured strongly if the variable is
18795 // __block and has an appropriate type.
18796 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
18797 return false;
18798
18799 owner.Variable = var;
18800 if (ref)
18801 owner.setLocsFrom(ref);
18802 return true;
18803}
18804
18805static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
18806 while (true) {
18807 e = e->IgnoreParens();
18808 if (CastExpr *cast = dyn_cast<CastExpr>(Val: e)) {
18809 switch (cast->getCastKind()) {
18810 case CK_BitCast:
18811 case CK_LValueBitCast:
18812 case CK_LValueToRValue:
18813 case CK_ARCReclaimReturnedObject:
18814 e = cast->getSubExpr();
18815 continue;
18816
18817 default:
18818 return false;
18819 }
18820 }
18821
18822 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(Val: e)) {
18823 ObjCIvarDecl *ivar = ref->getDecl();
18824 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
18825 return false;
18826
18827 // Try to find a retain cycle in the base.
18828 if (!findRetainCycleOwner(S, e: ref->getBase(), owner))
18829 return false;
18830
18831 if (ref->isFreeIvar()) owner.setLocsFrom(ref);
18832 owner.Indirect = true;
18833 return true;
18834 }
18835
18836 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(Val: e)) {
18837 VarDecl *var = dyn_cast<VarDecl>(Val: ref->getDecl());
18838 if (!var) return false;
18839 return considerVariable(var, ref, owner);
18840 }
18841
18842 if (MemberExpr *member = dyn_cast<MemberExpr>(Val: e)) {
18843 if (member->isArrow()) return false;
18844
18845 // Don't count this as an indirect ownership.
18846 e = member->getBase();
18847 continue;
18848 }
18849
18850 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(Val: e)) {
18851 // Only pay attention to pseudo-objects on property references.
18852 ObjCPropertyRefExpr *pre
18853 = dyn_cast<ObjCPropertyRefExpr>(Val: pseudo->getSyntacticForm()
18854 ->IgnoreParens());
18855 if (!pre) return false;
18856 if (pre->isImplicitProperty()) return false;
18857 ObjCPropertyDecl *property = pre->getExplicitProperty();
18858 if (!property->isRetaining() &&
18859 !(property->getPropertyIvarDecl() &&
18860 property->getPropertyIvarDecl()->getType()
18861 .getObjCLifetime() == Qualifiers::OCL_Strong))
18862 return false;
18863
18864 owner.Indirect = true;
18865 if (pre->isSuperReceiver()) {
18866 owner.Variable = S.getCurMethodDecl()->getSelfDecl();
18867 if (!owner.Variable)
18868 return false;
18869 owner.Loc = pre->getLocation();
18870 owner.Range = pre->getSourceRange();
18871 return true;
18872 }
18873 e = const_cast<Expr*>(cast<OpaqueValueExpr>(Val: pre->getBase())
18874 ->getSourceExpr());
18875 continue;
18876 }
18877
18878 // Array ivars?
18879
18880 return false;
18881 }
18882}
18883
18884namespace {
18885
18886 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
18887 VarDecl *Variable;
18888 Expr *Capturer = nullptr;
18889 bool VarWillBeReased = false;
18890
18891 FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
18892 : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
18893 Variable(variable) {}
18894
18895 void VisitDeclRefExpr(DeclRefExpr *ref) {
18896 if (ref->getDecl() == Variable && !Capturer)
18897 Capturer = ref;
18898 }
18899
18900 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
18901 if (Capturer) return;
18902 Visit(ref->getBase());
18903 if (Capturer && ref->isFreeIvar())
18904 Capturer = ref;
18905 }
18906
18907 void VisitBlockExpr(BlockExpr *block) {
18908 // Look inside nested blocks
18909 if (block->getBlockDecl()->capturesVariable(var: Variable))
18910 Visit(S: block->getBlockDecl()->getBody());
18911 }
18912
18913 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
18914 if (Capturer) return;
18915 if (OVE->getSourceExpr())
18916 Visit(OVE->getSourceExpr());
18917 }
18918
18919 void VisitBinaryOperator(BinaryOperator *BinOp) {
18920 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
18921 return;
18922 Expr *LHS = BinOp->getLHS();
18923 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(Val: LHS)) {
18924 if (DRE->getDecl() != Variable)
18925 return;
18926 if (Expr *RHS = BinOp->getRHS()) {
18927 RHS = RHS->IgnoreParenCasts();
18928 std::optional<llvm::APSInt> Value;
18929 VarWillBeReased =
18930 (RHS && (Value = RHS->getIntegerConstantExpr(Ctx: Context)) &&
18931 *Value == 0);
18932 }
18933 }
18934 }
18935 };
18936
18937} // namespace
18938
18939/// Check whether the given argument is a block which captures a
18940/// variable.
18941static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
18942 assert(owner.Variable && owner.Loc.isValid());
18943
18944 e = e->IgnoreParenCasts();
18945
18946 // Look through [^{...} copy] and Block_copy(^{...}).
18947 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(Val: e)) {
18948 Selector Cmd = ME->getSelector();
18949 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(argIndex: 0) == "copy") {
18950 e = ME->getInstanceReceiver();
18951 if (!e)
18952 return nullptr;
18953 e = e->IgnoreParenCasts();
18954 }
18955 } else if (CallExpr *CE = dyn_cast<CallExpr>(Val: e)) {
18956 if (CE->getNumArgs() == 1) {
18957 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(Val: CE->getCalleeDecl());
18958 if (Fn) {
18959 const IdentifierInfo *FnI = Fn->getIdentifier();
18960 if (FnI && FnI->isStr(Str: "_Block_copy")) {
18961 e = CE->getArg(Arg: 0)->IgnoreParenCasts();
18962 }
18963 }
18964 }
18965 }
18966
18967 BlockExpr *block = dyn_cast<BlockExpr>(Val: e);
18968 if (!block || !block->getBlockDecl()->capturesVariable(var: owner.Variable))
18969 return nullptr;
18970
18971 FindCaptureVisitor visitor(S.Context, owner.Variable);
18972 visitor.Visit(S: block->getBlockDecl()->getBody());
18973 return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
18974}
18975
18976static void diagnoseRetainCycle(Sema &S, Expr *capturer,
18977 RetainCycleOwner &owner) {
18978 assert(capturer);
18979 assert(owner.Variable && owner.Loc.isValid());
18980
18981 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
18982 << owner.Variable << capturer->getSourceRange();
18983 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
18984 << owner.Indirect << owner.Range;
18985}
18986
18987/// Check for a keyword selector that starts with the word 'add' or
18988/// 'set'.
18989static bool isSetterLikeSelector(Selector sel) {
18990 if (sel.isUnarySelector()) return false;
18991
18992 StringRef str = sel.getNameForSlot(argIndex: 0);
18993 str = str.ltrim(Char: '_');
18994 if (str.starts_with(Prefix: "set"))
18995 str = str.substr(Start: 3);
18996 else if (str.starts_with(Prefix: "add")) {
18997 // Specially allow 'addOperationWithBlock:'.
18998 if (sel.getNumArgs() == 1 && str.starts_with(Prefix: "addOperationWithBlock"))
18999 return false;
19000 str = str.substr(Start: 3);
19001 } else
19002 return false;
19003
19004 if (str.empty()) return true;
19005 return !isLowercase(c: str.front());
19006}
19007
19008static std::optional<int>
19009GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
19010 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
19011 InterfaceDecl: Message->getReceiverInterface(),
19012 NSClassKind: NSAPI::ClassId_NSMutableArray);
19013 if (!IsMutableArray) {
19014 return std::nullopt;
19015 }
19016
19017 Selector Sel = Message->getSelector();
19018
19019 std::optional<NSAPI::NSArrayMethodKind> MKOpt =
19020 S.NSAPIObj->getNSArrayMethodKind(Sel);
19021 if (!MKOpt) {
19022 return std::nullopt;
19023 }
19024
19025 NSAPI::NSArrayMethodKind MK = *MKOpt;
19026
19027 switch (MK) {
19028 case NSAPI::NSMutableArr_addObject:
19029 case NSAPI::NSMutableArr_insertObjectAtIndex:
19030 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
19031 return 0;
19032 case NSAPI::NSMutableArr_replaceObjectAtIndex:
19033 return 1;
19034
19035 default:
19036 return std::nullopt;
19037 }
19038
19039 return std::nullopt;
19040}
19041
19042static std::optional<int>
19043GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
19044 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
19045 InterfaceDecl: Message->getReceiverInterface(),
19046 NSClassKind: NSAPI::ClassId_NSMutableDictionary);
19047 if (!IsMutableDictionary) {
19048 return std::nullopt;
19049 }
19050
19051 Selector Sel = Message->getSelector();
19052
19053 std::optional<NSAPI::NSDictionaryMethodKind> MKOpt =
19054 S.NSAPIObj->getNSDictionaryMethodKind(Sel);
19055 if (!MKOpt) {
19056 return std::nullopt;
19057 }
19058
19059 NSAPI::NSDictionaryMethodKind MK = *MKOpt;
19060
19061 switch (MK) {
19062 case NSAPI::NSMutableDict_setObjectForKey:
19063 case NSAPI::NSMutableDict_setValueForKey:
19064 case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
19065 return 0;
19066
19067 default:
19068 return std::nullopt;
19069 }
19070
19071 return std::nullopt;
19072}
19073
19074static std::optional<int> GetNSSetArgumentIndex(Sema &S,
19075 ObjCMessageExpr *Message) {
19076 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
19077 InterfaceDecl: Message->getReceiverInterface(),
19078 NSClassKind: NSAPI::ClassId_NSMutableSet);
19079
19080 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
19081 InterfaceDecl: Message->getReceiverInterface(),
19082 NSClassKind: NSAPI::ClassId_NSMutableOrderedSet);
19083 if (!IsMutableSet && !IsMutableOrderedSet) {
19084 return std::nullopt;
19085 }
19086
19087 Selector Sel = Message->getSelector();
19088
19089 std::optional<NSAPI::NSSetMethodKind> MKOpt =
19090 S.NSAPIObj->getNSSetMethodKind(Sel);
19091 if (!MKOpt) {
19092 return std::nullopt;
19093 }
19094
19095 NSAPI::NSSetMethodKind MK = *MKOpt;
19096
19097 switch (MK) {
19098 case NSAPI::NSMutableSet_addObject:
19099 case NSAPI::NSOrderedSet_setObjectAtIndex:
19100 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
19101 case NSAPI::NSOrderedSet_insertObjectAtIndex:
19102 return 0;
19103 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
19104 return 1;
19105 }
19106
19107 return std::nullopt;
19108}
19109
19110void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
19111 if (!Message->isInstanceMessage()) {
19112 return;
19113 }
19114
19115 std::optional<int> ArgOpt;
19116
19117 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(S&: *this, Message)) &&
19118 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(S&: *this, Message)) &&
19119 !(ArgOpt = GetNSSetArgumentIndex(S&: *this, Message))) {
19120 return;
19121 }
19122
19123 int ArgIndex = *ArgOpt;
19124
19125 Expr *Arg = Message->getArg(Arg: ArgIndex)->IgnoreImpCasts();
19126 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Val: Arg)) {
19127 Arg = OE->getSourceExpr()->IgnoreImpCasts();
19128 }
19129
19130 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
19131 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Val: Arg)) {
19132 if (ArgRE->isObjCSelfExpr()) {
19133 Diag(Message->getSourceRange().getBegin(),
19134 diag::warn_objc_circular_container)
19135 << ArgRE->getDecl() << StringRef("'super'");
19136 }
19137 }
19138 } else {
19139 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
19140
19141 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Val: Receiver)) {
19142 Receiver = OE->getSourceExpr()->IgnoreImpCasts();
19143 }
19144
19145 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Val: Receiver)) {
19146 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Val: Arg)) {
19147 if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
19148 ValueDecl *Decl = ReceiverRE->getDecl();
19149 Diag(Message->getSourceRange().getBegin(),
19150 diag::warn_objc_circular_container)
19151 << Decl << Decl;
19152 if (!ArgRE->isObjCSelfExpr()) {
19153 Diag(Decl->getLocation(),
19154 diag::note_objc_circular_container_declared_here)
19155 << Decl;
19156 }
19157 }
19158 }
19159 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Val: Receiver)) {
19160 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Val: Arg)) {
19161 if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
19162 ObjCIvarDecl *Decl = IvarRE->getDecl();
19163 Diag(Message->getSourceRange().getBegin(),
19164 diag::warn_objc_circular_container)
19165 << Decl << Decl;
19166 Diag(Decl->getLocation(),
19167 diag::note_objc_circular_container_declared_here)
19168 << Decl;
19169 }
19170 }
19171 }
19172 }
19173}
19174
19175/// Check a message send to see if it's likely to cause a retain cycle.
19176void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
19177 // Only check instance methods whose selector looks like a setter.
19178 if (!msg->isInstanceMessage() || !isSetterLikeSelector(sel: msg->getSelector()))
19179 return;
19180
19181 // Try to find a variable that the receiver is strongly owned by.
19182 RetainCycleOwner owner;
19183 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
19184 if (!findRetainCycleOwner(S&: *this, e: msg->getInstanceReceiver(), owner))
19185 return;
19186 } else {
19187 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
19188 owner.Variable = getCurMethodDecl()->getSelfDecl();
19189 owner.Loc = msg->getSuperLoc();
19190 owner.Range = msg->getSuperLoc();
19191 }
19192
19193 // Check whether the receiver is captured by any of the arguments.
19194 const ObjCMethodDecl *MD = msg->getMethodDecl();
19195 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
19196 if (Expr *capturer = findCapturingExpr(S&: *this, e: msg->getArg(Arg: i), owner)) {
19197 // noescape blocks should not be retained by the method.
19198 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
19199 continue;
19200 return diagnoseRetainCycle(S&: *this, capturer, owner);
19201 }
19202 }
19203}
19204
19205/// Check a property assign to see if it's likely to cause a retain cycle.
19206void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
19207 RetainCycleOwner owner;
19208 if (!findRetainCycleOwner(S&: *this, e: receiver, owner))
19209 return;
19210
19211 if (Expr *capturer = findCapturingExpr(S&: *this, e: argument, owner))
19212 diagnoseRetainCycle(S&: *this, capturer, owner);
19213}
19214
19215void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
19216 RetainCycleOwner Owner;
19217 if (!considerVariable(var: Var, /*DeclRefExpr=*/ref: nullptr, owner&: Owner))
19218 return;
19219
19220 // Because we don't have an expression for the variable, we have to set the
19221 // location explicitly here.
19222 Owner.Loc = Var->getLocation();
19223 Owner.Range = Var->getSourceRange();
19224
19225 if (Expr *Capturer = findCapturingExpr(S&: *this, e: Init, owner&: Owner))
19226 diagnoseRetainCycle(S&: *this, capturer: Capturer, owner&: Owner);
19227}
19228
19229static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
19230 Expr *RHS, bool isProperty) {
19231 // Check if RHS is an Objective-C object literal, which also can get
19232 // immediately zapped in a weak reference. Note that we explicitly
19233 // allow ObjCStringLiterals, since those are designed to never really die.
19234 RHS = RHS->IgnoreParenImpCasts();
19235
19236 // This enum needs to match with the 'select' in
19237 // warn_objc_arc_literal_assign (off-by-1).
19238 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(FromE: RHS);
19239 if (Kind == Sema::LK_String || Kind == Sema::LK_None)
19240 return false;
19241
19242 S.Diag(Loc, diag::warn_arc_literal_assign)
19243 << (unsigned) Kind
19244 << (isProperty ? 0 : 1)
19245 << RHS->getSourceRange();
19246
19247 return true;
19248}
19249
19250static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc,
19251 Qualifiers::ObjCLifetime LT,
19252 Expr *RHS, bool isProperty) {
19253 // Strip off any implicit cast added to get to the one ARC-specific.
19254 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(Val: RHS)) {
19255 if (cast->getCastKind() == CK_ARCConsumeObject) {
19256 S.Diag(Loc, diag::warn_arc_retained_assign)
19257 << (LT == Qualifiers::OCL_ExplicitNone)
19258 << (isProperty ? 0 : 1)
19259 << RHS->getSourceRange();
19260 return true;
19261 }
19262 RHS = cast->getSubExpr();
19263 }
19264
19265 if (LT == Qualifiers::OCL_Weak &&
19266 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty))
19267 return true;
19268
19269 return false;
19270}
19271
19272bool Sema::checkUnsafeAssigns(SourceLocation Loc,
19273 QualType LHS, Expr *RHS) {
19274 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
19275
19276 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
19277 return false;
19278
19279 if (checkUnsafeAssignObject(S&: *this, Loc, LT, RHS, isProperty: false))
19280 return true;
19281
19282 return false;
19283}
19284
19285void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
19286 Expr *LHS, Expr *RHS) {
19287 QualType LHSType;
19288 // PropertyRef on LHS type need be directly obtained from
19289 // its declaration as it has a PseudoType.
19290 ObjCPropertyRefExpr *PRE
19291 = dyn_cast<ObjCPropertyRefExpr>(Val: LHS->IgnoreParens());
19292 if (PRE && !PRE->isImplicitProperty()) {
19293 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
19294 if (PD)
19295 LHSType = PD->getType();
19296 }
19297
19298 if (LHSType.isNull())
19299 LHSType = LHS->getType();
19300
19301 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
19302
19303 if (LT == Qualifiers::OCL_Weak) {
19304 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
19305 getCurFunction()->markSafeWeakUse(E: LHS);
19306 }
19307
19308 if (checkUnsafeAssigns(Loc, LHS: LHSType, RHS))
19309 return;
19310
19311 // FIXME. Check for other life times.
19312 if (LT != Qualifiers::OCL_None)
19313 return;
19314
19315 if (PRE) {
19316 if (PRE->isImplicitProperty())
19317 return;
19318 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
19319 if (!PD)
19320 return;
19321
19322 unsigned Attributes = PD->getPropertyAttributes();
19323 if (Attributes & ObjCPropertyAttribute::kind_assign) {
19324 // when 'assign' attribute was not explicitly specified
19325 // by user, ignore it and rely on property type itself
19326 // for lifetime info.
19327 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
19328 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) &&
19329 LHSType->isObjCRetainableType())
19330 return;
19331
19332 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(Val: RHS)) {
19333 if (cast->getCastKind() == CK_ARCConsumeObject) {
19334 Diag(Loc, diag::warn_arc_retained_property_assign)
19335 << RHS->getSourceRange();
19336 return;
19337 }
19338 RHS = cast->getSubExpr();
19339 }
19340 } else if (Attributes & ObjCPropertyAttribute::kind_weak) {
19341 if (checkUnsafeAssignObject(S&: *this, Loc, LT: Qualifiers::OCL_Weak, RHS, isProperty: true))
19342 return;
19343 }
19344 }
19345}
19346
19347//===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
19348
19349static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
19350 SourceLocation StmtLoc,
19351 const NullStmt *Body) {
19352 // Do not warn if the body is a macro that expands to nothing, e.g:
19353 //
19354 // #define CALL(x)
19355 // if (condition)
19356 // CALL(0);
19357 if (Body->hasLeadingEmptyMacro())
19358 return false;
19359
19360 // Get line numbers of statement and body.
19361 bool StmtLineInvalid;
19362 unsigned StmtLine = SourceMgr.getPresumedLineNumber(Loc: StmtLoc,
19363 Invalid: &StmtLineInvalid);
19364 if (StmtLineInvalid)
19365 return false;
19366
19367 bool BodyLineInvalid;
19368 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Loc: Body->getSemiLoc(),
19369 Invalid: &BodyLineInvalid);
19370 if (BodyLineInvalid)
19371 return false;
19372
19373 // Warn if null statement and body are on the same line.
19374 if (StmtLine != BodyLine)
19375 return false;
19376
19377 return true;
19378}
19379
19380void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
19381 const Stmt *Body,
19382 unsigned DiagID) {
19383 // Since this is a syntactic check, don't emit diagnostic for template
19384 // instantiations, this just adds noise.
19385 if (CurrentInstantiationScope)
19386 return;
19387
19388 // The body should be a null statement.
19389 const NullStmt *NBody = dyn_cast<NullStmt>(Val: Body);
19390 if (!NBody)
19391 return;
19392
19393 // Do the usual checks.
19394 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, Body: NBody))
19395 return;
19396
19397 Diag(NBody->getSemiLoc(), DiagID);
19398 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
19399}
19400
19401void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
19402 const Stmt *PossibleBody) {
19403 assert(!CurrentInstantiationScope); // Ensured by caller
19404
19405 SourceLocation StmtLoc;
19406 const Stmt *Body;
19407 unsigned DiagID;
19408 if (const ForStmt *FS = dyn_cast<ForStmt>(Val: S)) {
19409 StmtLoc = FS->getRParenLoc();
19410 Body = FS->getBody();
19411 DiagID = diag::warn_empty_for_body;
19412 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Val: S)) {
19413 StmtLoc = WS->getRParenLoc();
19414 Body = WS->getBody();
19415 DiagID = diag::warn_empty_while_body;
19416 } else
19417 return; // Neither `for' nor `while'.
19418
19419 // The body should be a null statement.
19420 const NullStmt *NBody = dyn_cast<NullStmt>(Val: Body);
19421 if (!NBody)
19422 return;
19423
19424 // Skip expensive checks if diagnostic is disabled.
19425 if (Diags.isIgnored(DiagID, Loc: NBody->getSemiLoc()))
19426 return;
19427
19428 // Do the usual checks.
19429 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, Body: NBody))
19430 return;
19431
19432 // `for(...);' and `while(...);' are popular idioms, so in order to keep
19433 // noise level low, emit diagnostics only if for/while is followed by a
19434 // CompoundStmt, e.g.:
19435 // for (int i = 0; i < n; i++);
19436 // {
19437 // a(i);
19438 // }
19439 // or if for/while is followed by a statement with more indentation
19440 // than for/while itself:
19441 // for (int i = 0; i < n; i++);
19442 // a(i);
19443 bool ProbableTypo = isa<CompoundStmt>(Val: PossibleBody);
19444 if (!ProbableTypo) {
19445 bool BodyColInvalid;
19446 unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
19447 Loc: PossibleBody->getBeginLoc(), Invalid: &BodyColInvalid);
19448 if (BodyColInvalid)
19449 return;
19450
19451 bool StmtColInvalid;
19452 unsigned StmtCol =
19453 SourceMgr.getPresumedColumnNumber(Loc: S->getBeginLoc(), Invalid: &StmtColInvalid);
19454 if (StmtColInvalid)
19455 return;
19456
19457 if (BodyCol > StmtCol)
19458 ProbableTypo = true;
19459 }
19460
19461 if (ProbableTypo) {
19462 Diag(NBody->getSemiLoc(), DiagID);
19463 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
19464 }
19465}
19466
19467//===--- CHECK: Warn on self move with std::move. -------------------------===//
19468
19469/// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
19470void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
19471 SourceLocation OpLoc) {
19472 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
19473 return;
19474
19475 if (inTemplateInstantiation())
19476 return;
19477
19478 // Strip parens and casts away.
19479 LHSExpr = LHSExpr->IgnoreParenImpCasts();
19480 RHSExpr = RHSExpr->IgnoreParenImpCasts();
19481
19482 // Check for a call to std::move or for a static_cast<T&&>(..) to an xvalue
19483 // which we can treat as an inlined std::move
19484 if (const auto *CE = dyn_cast<CallExpr>(Val: RHSExpr);
19485 CE && CE->getNumArgs() == 1 && CE->isCallToStdMove())
19486 RHSExpr = CE->getArg(Arg: 0);
19487 else if (const auto *CXXSCE = dyn_cast<CXXStaticCastExpr>(Val: RHSExpr);
19488 CXXSCE && CXXSCE->isXValue())
19489 RHSExpr = CXXSCE->getSubExpr();
19490 else
19491 return;
19492
19493 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(Val: LHSExpr);
19494 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(Val: RHSExpr);
19495
19496 // Two DeclRefExpr's, check that the decls are the same.
19497 if (LHSDeclRef && RHSDeclRef) {
19498 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
19499 return;
19500 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
19501 RHSDeclRef->getDecl()->getCanonicalDecl())
19502 return;
19503
19504 auto D = Diag(OpLoc, diag::warn_self_move)
19505 << LHSExpr->getType() << LHSExpr->getSourceRange()
19506 << RHSExpr->getSourceRange();
19507 if (const FieldDecl *F =
19508 getSelfAssignmentClassMemberCandidate(SelfAssigned: RHSDeclRef->getDecl()))
19509 D << 1 << F
19510 << FixItHint::CreateInsertion(InsertionLoc: LHSDeclRef->getBeginLoc(), Code: "this->");
19511 else
19512 D << 0;
19513 return;
19514 }
19515
19516 // Member variables require a different approach to check for self moves.
19517 // MemberExpr's are the same if every nested MemberExpr refers to the same
19518 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
19519 // the base Expr's are CXXThisExpr's.
19520 const Expr *LHSBase = LHSExpr;
19521 const Expr *RHSBase = RHSExpr;
19522 const MemberExpr *LHSME = dyn_cast<MemberExpr>(Val: LHSExpr);
19523 const MemberExpr *RHSME = dyn_cast<MemberExpr>(Val: RHSExpr);
19524 if (!LHSME || !RHSME)
19525 return;
19526
19527 while (LHSME && RHSME) {
19528 if (LHSME->getMemberDecl()->getCanonicalDecl() !=
19529 RHSME->getMemberDecl()->getCanonicalDecl())
19530 return;
19531
19532 LHSBase = LHSME->getBase();
19533 RHSBase = RHSME->getBase();
19534 LHSME = dyn_cast<MemberExpr>(Val: LHSBase);
19535 RHSME = dyn_cast<MemberExpr>(Val: RHSBase);
19536 }
19537
19538 LHSDeclRef = dyn_cast<DeclRefExpr>(Val: LHSBase);
19539 RHSDeclRef = dyn_cast<DeclRefExpr>(Val: RHSBase);
19540 if (LHSDeclRef && RHSDeclRef) {
19541 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
19542 return;
19543 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
19544 RHSDeclRef->getDecl()->getCanonicalDecl())
19545 return;
19546
19547 Diag(OpLoc, diag::warn_self_move)
19548 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange()
19549 << RHSExpr->getSourceRange();
19550 return;
19551 }
19552
19553 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
19554 Diag(OpLoc, diag::warn_self_move)
19555 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange()
19556 << RHSExpr->getSourceRange();
19557}
19558
19559//===--- Layout compatibility ----------------------------------------------//
19560
19561static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
19562
19563/// Check if two enumeration types are layout-compatible.
19564static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
19565 // C++11 [dcl.enum] p8:
19566 // Two enumeration types are layout-compatible if they have the same
19567 // underlying type.
19568 return ED1->isComplete() && ED2->isComplete() &&
19569 C.hasSameType(T1: ED1->getIntegerType(), T2: ED2->getIntegerType());
19570}
19571
19572/// Check if two fields are layout-compatible.
19573/// Can be used on union members, which are exempt from alignment requirement
19574/// of common initial sequence.
19575static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
19576 FieldDecl *Field2,
19577 bool AreUnionMembers = false) {
19578 [[maybe_unused]] const Type *Field1Parent =
19579 Field1->getParent()->getTypeForDecl();
19580 [[maybe_unused]] const Type *Field2Parent =
19581 Field2->getParent()->getTypeForDecl();
19582 assert(((Field1Parent->isStructureOrClassType() &&
19583 Field2Parent->isStructureOrClassType()) ||
19584 (Field1Parent->isUnionType() && Field2Parent->isUnionType())) &&
19585 "Can't evaluate layout compatibility between a struct field and a "
19586 "union field.");
19587 assert(((!AreUnionMembers && Field1Parent->isStructureOrClassType()) ||
19588 (AreUnionMembers && Field1Parent->isUnionType())) &&
19589 "AreUnionMembers should be 'true' for union fields (only).");
19590
19591 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
19592 return false;
19593
19594 if (Field1->isBitField() != Field2->isBitField())
19595 return false;
19596
19597 if (Field1->isBitField()) {
19598 // Make sure that the bit-fields are the same length.
19599 unsigned Bits1 = Field1->getBitWidthValue(Ctx: C);
19600 unsigned Bits2 = Field2->getBitWidthValue(Ctx: C);
19601
19602 if (Bits1 != Bits2)
19603 return false;
19604 }
19605
19606 if (Field1->hasAttr<clang::NoUniqueAddressAttr>() ||
19607 Field2->hasAttr<clang::NoUniqueAddressAttr>())
19608 return false;
19609
19610 if (!AreUnionMembers &&
19611 Field1->getMaxAlignment() != Field2->getMaxAlignment())
19612 return false;
19613
19614 return true;
19615}
19616
19617/// Check if two standard-layout structs are layout-compatible.
19618/// (C++11 [class.mem] p17)
19619static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
19620 RecordDecl *RD2) {
19621 // If both records are C++ classes, check that base classes match.
19622 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(Val: RD1)) {
19623 // If one of records is a CXXRecordDecl we are in C++ mode,
19624 // thus the other one is a CXXRecordDecl, too.
19625 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(Val: RD2);
19626 // Check number of base classes.
19627 if (D1CXX->getNumBases() != D2CXX->getNumBases())
19628 return false;
19629
19630 // Check the base classes.
19631 for (CXXRecordDecl::base_class_const_iterator
19632 Base1 = D1CXX->bases_begin(),
19633 BaseEnd1 = D1CXX->bases_end(),
19634 Base2 = D2CXX->bases_begin();
19635 Base1 != BaseEnd1;
19636 ++Base1, ++Base2) {
19637 if (!isLayoutCompatible(C, T1: Base1->getType(), T2: Base2->getType()))
19638 return false;
19639 }
19640 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(Val: RD2)) {
19641 // If only RD2 is a C++ class, it should have zero base classes.
19642 if (D2CXX->getNumBases() > 0)
19643 return false;
19644 }
19645
19646 // Check the fields.
19647 RecordDecl::field_iterator Field2 = RD2->field_begin(),
19648 Field2End = RD2->field_end(),
19649 Field1 = RD1->field_begin(),
19650 Field1End = RD1->field_end();
19651 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
19652 if (!isLayoutCompatible(C, Field1: *Field1, Field2: *Field2))
19653 return false;
19654 }
19655 if (Field1 != Field1End || Field2 != Field2End)
19656 return false;
19657
19658 return true;
19659}
19660
19661/// Check if two standard-layout unions are layout-compatible.
19662/// (C++11 [class.mem] p18)
19663static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
19664 RecordDecl *RD2) {
19665 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
19666 for (auto *Field2 : RD2->fields())
19667 UnmatchedFields.insert(Ptr: Field2);
19668
19669 for (auto *Field1 : RD1->fields()) {
19670 llvm::SmallPtrSet<FieldDecl *, 8>::iterator
19671 I = UnmatchedFields.begin(),
19672 E = UnmatchedFields.end();
19673
19674 for ( ; I != E; ++I) {
19675 if (isLayoutCompatible(C, Field1, Field2: *I, /*IsUnionMember=*/AreUnionMembers: true)) {
19676 bool Result = UnmatchedFields.erase(Ptr: *I);
19677 (void) Result;
19678 assert(Result);
19679 break;
19680 }
19681 }
19682 if (I == E)
19683 return false;
19684 }
19685
19686 return UnmatchedFields.empty();
19687}
19688
19689static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
19690 RecordDecl *RD2) {
19691 if (RD1->isUnion() != RD2->isUnion())
19692 return false;
19693
19694 if (RD1->isUnion())
19695 return isLayoutCompatibleUnion(C, RD1, RD2);
19696 else
19697 return isLayoutCompatibleStruct(C, RD1, RD2);
19698}
19699
19700/// Check if two types are layout-compatible in C++11 sense.
19701static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
19702 if (T1.isNull() || T2.isNull())
19703 return false;
19704
19705 // C++20 [basic.types] p11:
19706 // Two types cv1 T1 and cv2 T2 are layout-compatible types
19707 // if T1 and T2 are the same type, layout-compatible enumerations (9.7.1),
19708 // or layout-compatible standard-layout class types (11.4).
19709 T1 = T1.getCanonicalType().getUnqualifiedType();
19710 T2 = T2.getCanonicalType().getUnqualifiedType();
19711
19712 if (C.hasSameType(T1, T2))
19713 return true;
19714
19715 const Type::TypeClass TC1 = T1->getTypeClass();
19716 const Type::TypeClass TC2 = T2->getTypeClass();
19717
19718 if (TC1 != TC2)
19719 return false;
19720
19721 if (TC1 == Type::Enum) {
19722 return isLayoutCompatible(C,
19723 ED1: cast<EnumType>(Val&: T1)->getDecl(),
19724 ED2: cast<EnumType>(Val&: T2)->getDecl());
19725 } else if (TC1 == Type::Record) {
19726 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
19727 return false;
19728
19729 return isLayoutCompatible(C,
19730 RD1: cast<RecordType>(Val&: T1)->getDecl(),
19731 RD2: cast<RecordType>(Val&: T2)->getDecl());
19732 }
19733
19734 return false;
19735}
19736
19737bool Sema::IsLayoutCompatible(QualType T1, QualType T2) const {
19738 return isLayoutCompatible(C&: getASTContext(), T1, T2);
19739}
19740
19741//===-------------- Pointer interconvertibility ----------------------------//
19742
19743bool Sema::IsPointerInterconvertibleBaseOf(const TypeSourceInfo *Base,
19744 const TypeSourceInfo *Derived) {
19745 QualType BaseT = Base->getType()->getCanonicalTypeUnqualified();
19746 QualType DerivedT = Derived->getType()->getCanonicalTypeUnqualified();
19747
19748 if (BaseT->isStructureOrClassType() && DerivedT->isStructureOrClassType() &&
19749 getASTContext().hasSameType(T1: BaseT, T2: DerivedT))
19750 return true;
19751
19752 if (!IsDerivedFrom(Loc: Derived->getTypeLoc().getBeginLoc(), Derived: DerivedT, Base: BaseT))
19753 return false;
19754
19755 // Per [basic.compound]/4.3, containing object has to be standard-layout.
19756 if (DerivedT->getAsCXXRecordDecl()->isStandardLayout())
19757 return true;
19758
19759 return false;
19760}
19761
19762//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
19763
19764/// Given a type tag expression find the type tag itself.
19765///
19766/// \param TypeExpr Type tag expression, as it appears in user's code.
19767///
19768/// \param VD Declaration of an identifier that appears in a type tag.
19769///
19770/// \param MagicValue Type tag magic value.
19771///
19772/// \param isConstantEvaluated whether the evalaution should be performed in
19773
19774/// constant context.
19775static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
19776 const ValueDecl **VD, uint64_t *MagicValue,
19777 bool isConstantEvaluated) {
19778 while(true) {
19779 if (!TypeExpr)
19780 return false;
19781
19782 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
19783
19784 switch (TypeExpr->getStmtClass()) {
19785 case Stmt::UnaryOperatorClass: {
19786 const UnaryOperator *UO = cast<UnaryOperator>(Val: TypeExpr);
19787 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
19788 TypeExpr = UO->getSubExpr();
19789 continue;
19790 }
19791 return false;
19792 }
19793
19794 case Stmt::DeclRefExprClass: {
19795 const DeclRefExpr *DRE = cast<DeclRefExpr>(Val: TypeExpr);
19796 *VD = DRE->getDecl();
19797 return true;
19798 }
19799
19800 case Stmt::IntegerLiteralClass: {
19801 const IntegerLiteral *IL = cast<IntegerLiteral>(Val: TypeExpr);
19802 llvm::APInt MagicValueAPInt = IL->getValue();
19803 if (MagicValueAPInt.getActiveBits() <= 64) {
19804 *MagicValue = MagicValueAPInt.getZExtValue();
19805 return true;
19806 } else
19807 return false;
19808 }
19809
19810 case Stmt::BinaryConditionalOperatorClass:
19811 case Stmt::ConditionalOperatorClass: {
19812 const AbstractConditionalOperator *ACO =
19813 cast<AbstractConditionalOperator>(Val: TypeExpr);
19814 bool Result;
19815 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx,
19816 InConstantContext: isConstantEvaluated)) {
19817 if (Result)
19818 TypeExpr = ACO->getTrueExpr();
19819 else
19820 TypeExpr = ACO->getFalseExpr();
19821 continue;
19822 }
19823 return false;
19824 }
19825
19826 case Stmt::BinaryOperatorClass: {
19827 const BinaryOperator *BO = cast<BinaryOperator>(Val: TypeExpr);
19828 if (BO->getOpcode() == BO_Comma) {
19829 TypeExpr = BO->getRHS();
19830 continue;
19831 }
19832 return false;
19833 }
19834
19835 default:
19836 return false;
19837 }
19838 }
19839}
19840
19841/// Retrieve the C type corresponding to type tag TypeExpr.
19842///
19843/// \param TypeExpr Expression that specifies a type tag.
19844///
19845/// \param MagicValues Registered magic values.
19846///
19847/// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
19848/// kind.
19849///
19850/// \param TypeInfo Information about the corresponding C type.
19851///
19852/// \param isConstantEvaluated whether the evalaution should be performed in
19853/// constant context.
19854///
19855/// \returns true if the corresponding C type was found.
19856static bool GetMatchingCType(
19857 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr,
19858 const ASTContext &Ctx,
19859 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData>
19860 *MagicValues,
19861 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo,
19862 bool isConstantEvaluated) {
19863 FoundWrongKind = false;
19864
19865 // Variable declaration that has type_tag_for_datatype attribute.
19866 const ValueDecl *VD = nullptr;
19867
19868 uint64_t MagicValue;
19869
19870 if (!FindTypeTagExpr(TypeExpr, Ctx, VD: &VD, MagicValue: &MagicValue, isConstantEvaluated))
19871 return false;
19872
19873 if (VD) {
19874 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
19875 if (I->getArgumentKind() != ArgumentKind) {
19876 FoundWrongKind = true;
19877 return false;
19878 }
19879 TypeInfo.Type = I->getMatchingCType();
19880 TypeInfo.LayoutCompatible = I->getLayoutCompatible();
19881 TypeInfo.MustBeNull = I->getMustBeNull();
19882 return true;
19883 }
19884 return false;
19885 }
19886
19887 if (!MagicValues)
19888 return false;
19889
19890 llvm::DenseMap<Sema::TypeTagMagicValue,
19891 Sema::TypeTagData>::const_iterator I =
19892 MagicValues->find(Val: std::make_pair(x&: ArgumentKind, y&: MagicValue));
19893 if (I == MagicValues->end())
19894 return false;
19895
19896 TypeInfo = I->second;
19897 return true;
19898}
19899
19900void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
19901 uint64_t MagicValue, QualType Type,
19902 bool LayoutCompatible,
19903 bool MustBeNull) {
19904 if (!TypeTagForDatatypeMagicValues)
19905 TypeTagForDatatypeMagicValues.reset(
19906 p: new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
19907
19908 TypeTagMagicValue Magic(ArgumentKind, MagicValue);
19909 (*TypeTagForDatatypeMagicValues)[Magic] =
19910 TypeTagData(Type, LayoutCompatible, MustBeNull);
19911}
19912
19913static bool IsSameCharType(QualType T1, QualType T2) {
19914 const BuiltinType *BT1 = T1->getAs<BuiltinType>();
19915 if (!BT1)
19916 return false;
19917
19918 const BuiltinType *BT2 = T2->getAs<BuiltinType>();
19919 if (!BT2)
19920 return false;
19921
19922 BuiltinType::Kind T1Kind = BT1->getKind();
19923 BuiltinType::Kind T2Kind = BT2->getKind();
19924
19925 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
19926 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
19927 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
19928 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
19929}
19930
19931void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
19932 const ArrayRef<const Expr *> ExprArgs,
19933 SourceLocation CallSiteLoc) {
19934 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
19935 bool IsPointerAttr = Attr->getIsPointer();
19936
19937 // Retrieve the argument representing the 'type_tag'.
19938 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex();
19939 if (TypeTagIdxAST >= ExprArgs.size()) {
19940 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
19941 << 0 << Attr->getTypeTagIdx().getSourceIndex();
19942 return;
19943 }
19944 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST];
19945 bool FoundWrongKind;
19946 TypeTagData TypeInfo;
19947 if (!GetMatchingCType(ArgumentKind, TypeExpr: TypeTagExpr, Ctx: Context,
19948 MagicValues: TypeTagForDatatypeMagicValues.get(), FoundWrongKind,
19949 TypeInfo, isConstantEvaluated: isConstantEvaluatedContext())) {
19950 if (FoundWrongKind)
19951 Diag(TypeTagExpr->getExprLoc(),
19952 diag::warn_type_tag_for_datatype_wrong_kind)
19953 << TypeTagExpr->getSourceRange();
19954 return;
19955 }
19956
19957 // Retrieve the argument representing the 'arg_idx'.
19958 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex();
19959 if (ArgumentIdxAST >= ExprArgs.size()) {
19960 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
19961 << 1 << Attr->getArgumentIdx().getSourceIndex();
19962 return;
19963 }
19964 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST];
19965 if (IsPointerAttr) {
19966 // Skip implicit cast of pointer to `void *' (as a function argument).
19967 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: ArgumentExpr))
19968 if (ICE->getType()->isVoidPointerType() &&
19969 ICE->getCastKind() == CK_BitCast)
19970 ArgumentExpr = ICE->getSubExpr();
19971 }
19972 QualType ArgumentType = ArgumentExpr->getType();
19973
19974 // Passing a `void*' pointer shouldn't trigger a warning.
19975 if (IsPointerAttr && ArgumentType->isVoidPointerType())
19976 return;
19977
19978 if (TypeInfo.MustBeNull) {
19979 // Type tag with matching void type requires a null pointer.
19980 if (!ArgumentExpr->isNullPointerConstant(Ctx&: Context,
19981 NPC: Expr::NPC_ValueDependentIsNotNull)) {
19982 Diag(ArgumentExpr->getExprLoc(),
19983 diag::warn_type_safety_null_pointer_required)
19984 << ArgumentKind->getName()
19985 << ArgumentExpr->getSourceRange()
19986 << TypeTagExpr->getSourceRange();
19987 }
19988 return;
19989 }
19990
19991 QualType RequiredType = TypeInfo.Type;
19992 if (IsPointerAttr)
19993 RequiredType = Context.getPointerType(T: RequiredType);
19994
19995 bool mismatch = false;
19996 if (!TypeInfo.LayoutCompatible) {
19997 mismatch = !Context.hasSameType(T1: ArgumentType, T2: RequiredType);
19998
19999 // C++11 [basic.fundamental] p1:
20000 // Plain char, signed char, and unsigned char are three distinct types.
20001 //
20002 // But we treat plain `char' as equivalent to `signed char' or `unsigned
20003 // char' depending on the current char signedness mode.
20004 if (mismatch)
20005 if ((IsPointerAttr && IsSameCharType(T1: ArgumentType->getPointeeType(),
20006 T2: RequiredType->getPointeeType())) ||
20007 (!IsPointerAttr && IsSameCharType(T1: ArgumentType, T2: RequiredType)))
20008 mismatch = false;
20009 } else
20010 if (IsPointerAttr)
20011 mismatch = !isLayoutCompatible(C&: Context,
20012 T1: ArgumentType->getPointeeType(),
20013 T2: RequiredType->getPointeeType());
20014 else
20015 mismatch = !isLayoutCompatible(C&: Context, T1: ArgumentType, T2: RequiredType);
20016
20017 if (mismatch)
20018 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
20019 << ArgumentType << ArgumentKind
20020 << TypeInfo.LayoutCompatible << RequiredType
20021 << ArgumentExpr->getSourceRange()
20022 << TypeTagExpr->getSourceRange();
20023}
20024
20025void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
20026 CharUnits Alignment) {
20027 MisalignedMembers.emplace_back(Args&: E, Args&: RD, Args&: MD, Args&: Alignment);
20028}
20029
20030void Sema::DiagnoseMisalignedMembers() {
20031 for (MisalignedMember &m : MisalignedMembers) {
20032 const NamedDecl *ND = m.RD;
20033 if (ND->getName().empty()) {
20034 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
20035 ND = TD;
20036 }
20037 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member)
20038 << m.MD << ND << m.E->getSourceRange();
20039 }
20040 MisalignedMembers.clear();
20041}
20042
20043void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
20044 E = E->IgnoreParens();
20045 if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType())
20046 return;
20047 if (isa<UnaryOperator>(Val: E) &&
20048 cast<UnaryOperator>(Val: E)->getOpcode() == UO_AddrOf) {
20049 auto *Op = cast<UnaryOperator>(Val: E)->getSubExpr()->IgnoreParens();
20050 if (isa<MemberExpr>(Val: Op)) {
20051 auto *MA = llvm::find(Range&: MisalignedMembers, Val: MisalignedMember(Op));
20052 if (MA != MisalignedMembers.end() &&
20053 (T->isDependentType() || T->isIntegerType() ||
20054 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
20055 Context.getTypeAlignInChars(
20056 T: T->getPointeeType()) <= MA->Alignment))))
20057 MisalignedMembers.erase(CI: MA);
20058 }
20059 }
20060}
20061
20062void Sema::RefersToMemberWithReducedAlignment(
20063 Expr *E,
20064 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
20065 Action) {
20066 const auto *ME = dyn_cast<MemberExpr>(Val: E);
20067 if (!ME)
20068 return;
20069
20070 // No need to check expressions with an __unaligned-qualified type.
20071 if (E->getType().getQualifiers().hasUnaligned())
20072 return;
20073
20074 // For a chain of MemberExpr like "a.b.c.d" this list
20075 // will keep FieldDecl's like [d, c, b].
20076 SmallVector<FieldDecl *, 4> ReverseMemberChain;
20077 const MemberExpr *TopME = nullptr;
20078 bool AnyIsPacked = false;
20079 do {
20080 QualType BaseType = ME->getBase()->getType();
20081 if (BaseType->isDependentType())
20082 return;
20083 if (ME->isArrow())
20084 BaseType = BaseType->getPointeeType();
20085 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl();
20086 if (RD->isInvalidDecl())
20087 return;
20088
20089 ValueDecl *MD = ME->getMemberDecl();
20090 auto *FD = dyn_cast<FieldDecl>(Val: MD);
20091 // We do not care about non-data members.
20092 if (!FD || FD->isInvalidDecl())
20093 return;
20094
20095 AnyIsPacked =
20096 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>());
20097 ReverseMemberChain.push_back(Elt: FD);
20098
20099 TopME = ME;
20100 ME = dyn_cast<MemberExpr>(Val: ME->getBase()->IgnoreParens());
20101 } while (ME);
20102 assert(TopME && "We did not compute a topmost MemberExpr!");
20103
20104 // Not the scope of this diagnostic.
20105 if (!AnyIsPacked)
20106 return;
20107
20108 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts();
20109 const auto *DRE = dyn_cast<DeclRefExpr>(Val: TopBase);
20110 // TODO: The innermost base of the member expression may be too complicated.
20111 // For now, just disregard these cases. This is left for future
20112 // improvement.
20113 if (!DRE && !isa<CXXThisExpr>(Val: TopBase))
20114 return;
20115
20116 // Alignment expected by the whole expression.
20117 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(T: E->getType());
20118
20119 // No need to do anything else with this case.
20120 if (ExpectedAlignment.isOne())
20121 return;
20122
20123 // Synthesize offset of the whole access.
20124 CharUnits Offset;
20125 for (const FieldDecl *FD : llvm::reverse(C&: ReverseMemberChain))
20126 Offset += Context.toCharUnitsFromBits(BitSize: Context.getFieldOffset(FD));
20127
20128 // Compute the CompleteObjectAlignment as the alignment of the whole chain.
20129 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
20130 ReverseMemberChain.back()->getParent()->getTypeForDecl());
20131
20132 // The base expression of the innermost MemberExpr may give
20133 // stronger guarantees than the class containing the member.
20134 if (DRE && !TopME->isArrow()) {
20135 const ValueDecl *VD = DRE->getDecl();
20136 if (!VD->getType()->isReferenceType())
20137 CompleteObjectAlignment =
20138 std::max(a: CompleteObjectAlignment, b: Context.getDeclAlign(VD));
20139 }
20140
20141 // Check if the synthesized offset fulfills the alignment.
20142 if (Offset % ExpectedAlignment != 0 ||
20143 // It may fulfill the offset it but the effective alignment may still be
20144 // lower than the expected expression alignment.
20145 CompleteObjectAlignment < ExpectedAlignment) {
20146 // If this happens, we want to determine a sensible culprit of this.
20147 // Intuitively, watching the chain of member expressions from right to
20148 // left, we start with the required alignment (as required by the field
20149 // type) but some packed attribute in that chain has reduced the alignment.
20150 // It may happen that another packed structure increases it again. But if
20151 // we are here such increase has not been enough. So pointing the first
20152 // FieldDecl that either is packed or else its RecordDecl is,
20153 // seems reasonable.
20154 FieldDecl *FD = nullptr;
20155 CharUnits Alignment;
20156 for (FieldDecl *FDI : ReverseMemberChain) {
20157 if (FDI->hasAttr<PackedAttr>() ||
20158 FDI->getParent()->hasAttr<PackedAttr>()) {
20159 FD = FDI;
20160 Alignment = std::min(
20161 Context.getTypeAlignInChars(FD->getType()),
20162 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
20163 break;
20164 }
20165 }
20166 assert(FD && "We did not find a packed FieldDecl!");
20167 Action(E, FD->getParent(), FD, Alignment);
20168 }
20169}
20170
20171void Sema::CheckAddressOfPackedMember(Expr *rhs) {
20172 using namespace std::placeholders;
20173
20174 RefersToMemberWithReducedAlignment(
20175 rhs, std::bind(f: &Sema::AddPotentialMisalignedMembers, args: std::ref(t&: *this), args: _1,
20176 args: _2, args: _3, args: _4));
20177}
20178
20179bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
20180 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
20181 return true;
20182
20183 ExprResult A = UsualUnaryConversions(E: TheCall->getArg(Arg: 0));
20184 if (A.isInvalid())
20185 return true;
20186
20187 TheCall->setArg(Arg: 0, ArgExpr: A.get());
20188 QualType TyA = A.get()->getType();
20189
20190 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA, 1))
20191 return true;
20192
20193 TheCall->setType(TyA);
20194 return false;
20195}
20196
20197bool Sema::BuiltinElementwiseMath(CallExpr *TheCall) {
20198 QualType Res;
20199 if (BuiltinVectorMath(TheCall, Res))
20200 return true;
20201 TheCall->setType(Res);
20202 return false;
20203}
20204
20205bool Sema::BuiltinVectorToScalarMath(CallExpr *TheCall) {
20206 QualType Res;
20207 if (BuiltinVectorMath(TheCall, Res))
20208 return true;
20209
20210 if (auto *VecTy0 = Res->getAs<VectorType>())
20211 TheCall->setType(VecTy0->getElementType());
20212 else
20213 TheCall->setType(Res);
20214
20215 return false;
20216}
20217
20218bool Sema::BuiltinVectorMath(CallExpr *TheCall, QualType &Res) {
20219 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
20220 return true;
20221
20222 ExprResult A = TheCall->getArg(Arg: 0);
20223 ExprResult B = TheCall->getArg(Arg: 1);
20224 // Do standard promotions between the two arguments, returning their common
20225 // type.
20226 Res = UsualArithmeticConversions(LHS&: A, RHS&: B, Loc: TheCall->getExprLoc(), ACK: ACK_Comparison);
20227 if (A.isInvalid() || B.isInvalid())
20228 return true;
20229
20230 QualType TyA = A.get()->getType();
20231 QualType TyB = B.get()->getType();
20232
20233 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType())
20234 return Diag(A.get()->getBeginLoc(),
20235 diag::err_typecheck_call_different_arg_types)
20236 << TyA << TyB;
20237
20238 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA, 1))
20239 return true;
20240
20241 TheCall->setArg(Arg: 0, ArgExpr: A.get());
20242 TheCall->setArg(Arg: 1, ArgExpr: B.get());
20243 return false;
20244}
20245
20246bool Sema::BuiltinElementwiseTernaryMath(CallExpr *TheCall,
20247 bool CheckForFloatArgs) {
20248 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
20249 return true;
20250
20251 Expr *Args[3];
20252 for (int I = 0; I < 3; ++I) {
20253 ExprResult Converted = UsualUnaryConversions(E: TheCall->getArg(Arg: I));
20254 if (Converted.isInvalid())
20255 return true;
20256 Args[I] = Converted.get();
20257 }
20258
20259 if (CheckForFloatArgs) {
20260 int ArgOrdinal = 1;
20261 for (Expr *Arg : Args) {
20262 if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(),
20263 Arg->getType(), ArgOrdinal++))
20264 return true;
20265 }
20266 } else {
20267 int ArgOrdinal = 1;
20268 for (Expr *Arg : Args) {
20269 if (checkMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(),
20270 ArgOrdinal++))
20271 return true;
20272 }
20273 }
20274
20275 for (int I = 1; I < 3; ++I) {
20276 if (Args[0]->getType().getCanonicalType() !=
20277 Args[I]->getType().getCanonicalType()) {
20278 return Diag(Args[0]->getBeginLoc(),
20279 diag::err_typecheck_call_different_arg_types)
20280 << Args[0]->getType() << Args[I]->getType();
20281 }
20282
20283 TheCall->setArg(Arg: I, ArgExpr: Args[I]);
20284 }
20285
20286 TheCall->setType(Args[0]->getType());
20287 return false;
20288}
20289
20290bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
20291 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
20292 return true;
20293
20294 ExprResult A = UsualUnaryConversions(E: TheCall->getArg(Arg: 0));
20295 if (A.isInvalid())
20296 return true;
20297
20298 TheCall->setArg(Arg: 0, ArgExpr: A.get());
20299 return false;
20300}
20301
20302bool Sema::BuiltinNonDeterministicValue(CallExpr *TheCall) {
20303 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
20304 return true;
20305
20306 ExprResult Arg = TheCall->getArg(Arg: 0);
20307 QualType TyArg = Arg.get()->getType();
20308
20309 if (!TyArg->isBuiltinType() && !TyArg->isVectorType())
20310 return Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type)
20311 << 1 << /*vector, integer or floating point ty*/ 0 << TyArg;
20312
20313 TheCall->setType(TyArg);
20314 return false;
20315}
20316
20317ExprResult Sema::BuiltinMatrixTranspose(CallExpr *TheCall,
20318 ExprResult CallResult) {
20319 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
20320 return ExprError();
20321
20322 ExprResult MatrixArg = DefaultLvalueConversion(E: TheCall->getArg(Arg: 0));
20323 if (MatrixArg.isInvalid())
20324 return MatrixArg;
20325 Expr *Matrix = MatrixArg.get();
20326
20327 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>();
20328 if (!MType) {
20329 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type)
20330 << 1 << /* matrix ty*/ 1 << Matrix->getType();
20331 return ExprError();
20332 }
20333
20334 // Create returned matrix type by swapping rows and columns of the argument
20335 // matrix type.
20336 QualType ResultType = Context.getConstantMatrixType(
20337 ElementType: MType->getElementType(), NumRows: MType->getNumColumns(), NumColumns: MType->getNumRows());
20338
20339 // Change the return type to the type of the returned matrix.
20340 TheCall->setType(ResultType);
20341
20342 // Update call argument to use the possibly converted matrix argument.
20343 TheCall->setArg(Arg: 0, ArgExpr: Matrix);
20344 return CallResult;
20345}
20346
20347// Get and verify the matrix dimensions.
20348static std::optional<unsigned>
20349getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
20350 SourceLocation ErrorPos;
20351 std::optional<llvm::APSInt> Value =
20352 Expr->getIntegerConstantExpr(Ctx: S.Context, Loc: &ErrorPos);
20353 if (!Value) {
20354 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
20355 << Name;
20356 return {};
20357 }
20358 uint64_t Dim = Value->getZExtValue();
20359 if (!ConstantMatrixType::isDimensionValid(NumElements: Dim)) {
20360 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension)
20361 << Name << ConstantMatrixType::getMaxElementsPerDimension();
20362 return {};
20363 }
20364 return Dim;
20365}
20366
20367ExprResult Sema::BuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
20368 ExprResult CallResult) {
20369 if (!getLangOpts().MatrixTypes) {
20370 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled);
20371 return ExprError();
20372 }
20373
20374 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 4))
20375 return ExprError();
20376
20377 unsigned PtrArgIdx = 0;
20378 Expr *PtrExpr = TheCall->getArg(Arg: PtrArgIdx);
20379 Expr *RowsExpr = TheCall->getArg(Arg: 1);
20380 Expr *ColumnsExpr = TheCall->getArg(Arg: 2);
20381 Expr *StrideExpr = TheCall->getArg(Arg: 3);
20382
20383 bool ArgError = false;
20384
20385 // Check pointer argument.
20386 {
20387 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(E: PtrExpr);
20388 if (PtrConv.isInvalid())
20389 return PtrConv;
20390 PtrExpr = PtrConv.get();
20391 TheCall->setArg(Arg: 0, ArgExpr: PtrExpr);
20392 if (PtrExpr->isTypeDependent()) {
20393 TheCall->setType(Context.DependentTy);
20394 return TheCall;
20395 }
20396 }
20397
20398 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
20399 QualType ElementTy;
20400 if (!PtrTy) {
20401 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
20402 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType();
20403 ArgError = true;
20404 } else {
20405 ElementTy = PtrTy->getPointeeType().getUnqualifiedType();
20406
20407 if (!ConstantMatrixType::isValidElementType(T: ElementTy)) {
20408 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
20409 << PtrArgIdx + 1 << /* pointer to element ty*/ 2
20410 << PtrExpr->getType();
20411 ArgError = true;
20412 }
20413 }
20414
20415 // Apply default Lvalue conversions and convert the expression to size_t.
20416 auto ApplyArgumentConversions = [this](Expr *E) {
20417 ExprResult Conv = DefaultLvalueConversion(E);
20418 if (Conv.isInvalid())
20419 return Conv;
20420
20421 return tryConvertExprToType(E: Conv.get(), Ty: Context.getSizeType());
20422 };
20423
20424 // Apply conversion to row and column expressions.
20425 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr);
20426 if (!RowsConv.isInvalid()) {
20427 RowsExpr = RowsConv.get();
20428 TheCall->setArg(Arg: 1, ArgExpr: RowsExpr);
20429 } else
20430 RowsExpr = nullptr;
20431
20432 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr);
20433 if (!ColumnsConv.isInvalid()) {
20434 ColumnsExpr = ColumnsConv.get();
20435 TheCall->setArg(Arg: 2, ArgExpr: ColumnsExpr);
20436 } else
20437 ColumnsExpr = nullptr;
20438
20439 // If any part of the result matrix type is still pending, just use
20440 // Context.DependentTy, until all parts are resolved.
20441 if ((RowsExpr && RowsExpr->isTypeDependent()) ||
20442 (ColumnsExpr && ColumnsExpr->isTypeDependent())) {
20443 TheCall->setType(Context.DependentTy);
20444 return CallResult;
20445 }
20446
20447 // Check row and column dimensions.
20448 std::optional<unsigned> MaybeRows;
20449 if (RowsExpr)
20450 MaybeRows = getAndVerifyMatrixDimension(Expr: RowsExpr, Name: "row", S&: *this);
20451
20452 std::optional<unsigned> MaybeColumns;
20453 if (ColumnsExpr)
20454 MaybeColumns = getAndVerifyMatrixDimension(Expr: ColumnsExpr, Name: "column", S&: *this);
20455
20456 // Check stride argument.
20457 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr);
20458 if (StrideConv.isInvalid())
20459 return ExprError();
20460 StrideExpr = StrideConv.get();
20461 TheCall->setArg(Arg: 3, ArgExpr: StrideExpr);
20462
20463 if (MaybeRows) {
20464 if (std::optional<llvm::APSInt> Value =
20465 StrideExpr->getIntegerConstantExpr(Ctx: Context)) {
20466 uint64_t Stride = Value->getZExtValue();
20467 if (Stride < *MaybeRows) {
20468 Diag(StrideExpr->getBeginLoc(),
20469 diag::err_builtin_matrix_stride_too_small);
20470 ArgError = true;
20471 }
20472 }
20473 }
20474
20475 if (ArgError || !MaybeRows || !MaybeColumns)
20476 return ExprError();
20477
20478 TheCall->setType(
20479 Context.getConstantMatrixType(ElementType: ElementTy, NumRows: *MaybeRows, NumColumns: *MaybeColumns));
20480 return CallResult;
20481}
20482
20483ExprResult Sema::BuiltinMatrixColumnMajorStore(CallExpr *TheCall,
20484 ExprResult CallResult) {
20485 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
20486 return ExprError();
20487
20488 unsigned PtrArgIdx = 1;
20489 Expr *MatrixExpr = TheCall->getArg(Arg: 0);
20490 Expr *PtrExpr = TheCall->getArg(Arg: PtrArgIdx);
20491 Expr *StrideExpr = TheCall->getArg(Arg: 2);
20492
20493 bool ArgError = false;
20494
20495 {
20496 ExprResult MatrixConv = DefaultLvalueConversion(E: MatrixExpr);
20497 if (MatrixConv.isInvalid())
20498 return MatrixConv;
20499 MatrixExpr = MatrixConv.get();
20500 TheCall->setArg(Arg: 0, ArgExpr: MatrixExpr);
20501 }
20502 if (MatrixExpr->isTypeDependent()) {
20503 TheCall->setType(Context.DependentTy);
20504 return TheCall;
20505 }
20506
20507 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>();
20508 if (!MatrixTy) {
20509 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
20510 << 1 << /*matrix ty */ 1 << MatrixExpr->getType();
20511 ArgError = true;
20512 }
20513
20514 {
20515 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(E: PtrExpr);
20516 if (PtrConv.isInvalid())
20517 return PtrConv;
20518 PtrExpr = PtrConv.get();
20519 TheCall->setArg(Arg: 1, ArgExpr: PtrExpr);
20520 if (PtrExpr->isTypeDependent()) {
20521 TheCall->setType(Context.DependentTy);
20522 return TheCall;
20523 }
20524 }
20525
20526 // Check pointer argument.
20527 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
20528 if (!PtrTy) {
20529 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
20530 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType();
20531 ArgError = true;
20532 } else {
20533 QualType ElementTy = PtrTy->getPointeeType();
20534 if (ElementTy.isConstQualified()) {
20535 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const);
20536 ArgError = true;
20537 }
20538 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType();
20539 if (MatrixTy &&
20540 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) {
20541 Diag(PtrExpr->getBeginLoc(),
20542 diag::err_builtin_matrix_pointer_arg_mismatch)
20543 << ElementTy << MatrixTy->getElementType();
20544 ArgError = true;
20545 }
20546 }
20547
20548 // Apply default Lvalue conversions and convert the stride expression to
20549 // size_t.
20550 {
20551 ExprResult StrideConv = DefaultLvalueConversion(E: StrideExpr);
20552 if (StrideConv.isInvalid())
20553 return StrideConv;
20554
20555 StrideConv = tryConvertExprToType(E: StrideConv.get(), Ty: Context.getSizeType());
20556 if (StrideConv.isInvalid())
20557 return StrideConv;
20558 StrideExpr = StrideConv.get();
20559 TheCall->setArg(Arg: 2, ArgExpr: StrideExpr);
20560 }
20561
20562 // Check stride argument.
20563 if (MatrixTy) {
20564 if (std::optional<llvm::APSInt> Value =
20565 StrideExpr->getIntegerConstantExpr(Ctx: Context)) {
20566 uint64_t Stride = Value->getZExtValue();
20567 if (Stride < MatrixTy->getNumRows()) {
20568 Diag(StrideExpr->getBeginLoc(),
20569 diag::err_builtin_matrix_stride_too_small);
20570 ArgError = true;
20571 }
20572 }
20573 }
20574
20575 if (ArgError)
20576 return ExprError();
20577
20578 return CallResult;
20579}
20580
20581/// Checks the argument at the given index is a WebAssembly table and if it
20582/// is, sets ElTy to the element type.
20583static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
20584 QualType &ElTy) {
20585 Expr *ArgExpr = E->getArg(Arg: ArgIndex);
20586 const auto *ATy = dyn_cast<ArrayType>(Val: ArgExpr->getType());
20587 if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
20588 return S.Diag(ArgExpr->getBeginLoc(),
20589 diag::err_wasm_builtin_arg_must_be_table_type)
20590 << ArgIndex + 1 << ArgExpr->getSourceRange();
20591 }
20592 ElTy = ATy->getElementType();
20593 return false;
20594}
20595
20596/// Checks the argument at the given index is an integer.
20597static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
20598 unsigned ArgIndex) {
20599 Expr *ArgExpr = E->getArg(Arg: ArgIndex);
20600 if (!ArgExpr->getType()->isIntegerType()) {
20601 return S.Diag(ArgExpr->getBeginLoc(),
20602 diag::err_wasm_builtin_arg_must_be_integer_type)
20603 << ArgIndex + 1 << ArgExpr->getSourceRange();
20604 }
20605 return false;
20606}
20607
20608/// Check that the first argument is a WebAssembly table, and the second
20609/// is an index to use as index into the table.
20610bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
20611 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
20612 return true;
20613
20614 QualType ElTy;
20615 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
20616 return true;
20617
20618 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 1))
20619 return true;
20620
20621 // If all is well, we set the type of TheCall to be the type of the
20622 // element of the table.
20623 // i.e. a table.get on an externref table has type externref,
20624 // or whatever the type of the table element is.
20625 TheCall->setType(ElTy);
20626
20627 return false;
20628}
20629
20630/// Check that the first argumnet is a WebAssembly table, the second is
20631/// an index to use as index into the table and the third is the reference
20632/// type to set into the table.
20633bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
20634 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
20635 return true;
20636
20637 QualType ElTy;
20638 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
20639 return true;
20640
20641 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 1))
20642 return true;
20643
20644 if (!Context.hasSameType(T1: ElTy, T2: TheCall->getArg(Arg: 2)->getType()))
20645 return true;
20646
20647 return false;
20648}
20649
20650/// Check that the argument is a WebAssembly table.
20651bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
20652 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
20653 return true;
20654
20655 QualType ElTy;
20656 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
20657 return true;
20658
20659 return false;
20660}
20661
20662/// Check that the first argument is a WebAssembly table, the second is the
20663/// value to use for new elements (of a type matching the table type), the
20664/// third value is an integer.
20665bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
20666 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
20667 return true;
20668
20669 QualType ElTy;
20670 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
20671 return true;
20672
20673 Expr *NewElemArg = TheCall->getArg(Arg: 1);
20674 if (!Context.hasSameType(T1: ElTy, T2: NewElemArg->getType())) {
20675 return Diag(NewElemArg->getBeginLoc(),
20676 diag::err_wasm_builtin_arg_must_match_table_element_type)
20677 << 2 << 1 << NewElemArg->getSourceRange();
20678 }
20679
20680 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 2))
20681 return true;
20682
20683 return false;
20684}
20685
20686/// Check that the first argument is a WebAssembly table, the second is an
20687/// integer, the third is the value to use to fill the table (of a type
20688/// matching the table type), and the fourth is an integer.
20689bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
20690 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 4))
20691 return true;
20692
20693 QualType ElTy;
20694 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
20695 return true;
20696
20697 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 1))
20698 return true;
20699
20700 Expr *NewElemArg = TheCall->getArg(Arg: 2);
20701 if (!Context.hasSameType(T1: ElTy, T2: NewElemArg->getType())) {
20702 return Diag(NewElemArg->getBeginLoc(),
20703 diag::err_wasm_builtin_arg_must_match_table_element_type)
20704 << 3 << 1 << NewElemArg->getSourceRange();
20705 }
20706
20707 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 3))
20708 return true;
20709
20710 return false;
20711}
20712
20713/// Check that the first argument is a WebAssembly table, the second is also a
20714/// WebAssembly table (of the same element type), and the third to fifth
20715/// arguments are integers.
20716bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
20717 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 5))
20718 return true;
20719
20720 QualType XElTy;
20721 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy&: XElTy))
20722 return true;
20723
20724 QualType YElTy;
20725 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 1, ElTy&: YElTy))
20726 return true;
20727
20728 Expr *TableYArg = TheCall->getArg(Arg: 1);
20729 if (!Context.hasSameType(T1: XElTy, T2: YElTy)) {
20730 return Diag(TableYArg->getBeginLoc(),
20731 diag::err_wasm_builtin_arg_must_match_table_element_type)
20732 << 2 << 1 << TableYArg->getSourceRange();
20733 }
20734
20735 for (int I = 2; I <= 4; I++) {
20736 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: I))
20737 return true;
20738 }
20739
20740 return false;
20741}
20742
20743/// \brief Enforce the bounds of a TCB
20744/// CheckTCBEnforcement - Enforces that every function in a named TCB only
20745/// directly calls other functions in the same TCB as marked by the enforce_tcb
20746/// and enforce_tcb_leaf attributes.
20747void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc,
20748 const NamedDecl *Callee) {
20749 // This warning does not make sense in code that has no runtime behavior.
20750 if (isUnevaluatedContext())
20751 return;
20752
20753 const NamedDecl *Caller = getCurFunctionOrMethodDecl();
20754
20755 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>())
20756 return;
20757
20758 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find
20759 // all TCBs the callee is a part of.
20760 llvm::StringSet<> CalleeTCBs;
20761 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>())
20762 CalleeTCBs.insert(A->getTCBName());
20763 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>())
20764 CalleeTCBs.insert(A->getTCBName());
20765
20766 // Go through the TCBs the caller is a part of and emit warnings if Caller
20767 // is in a TCB that the Callee is not.
20768 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) {
20769 StringRef CallerTCB = A->getTCBName();
20770 if (CalleeTCBs.count(CallerTCB) == 0) {
20771 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation)
20772 << Callee << CallerTCB;
20773 }
20774 }
20775}
20776

source code of clang/lib/Sema/SemaChecking.cpp